ipmi: kill off 'timespec' usage again
[linux-2.6-block.git] / drivers / char / ipmi / ipmi_si_intf.c
CommitLineData
243ac210 1// SPDX-License-Identifier: GPL-2.0+
1da177e4
LT
2/*
3 * ipmi_si.c
4 *
5 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
6 * BT).
7 *
8 * Author: MontaVista Software, Inc.
9 * Corey Minyard <minyard@mvista.com>
10 * source@mvista.com
11 *
12 * Copyright 2002 MontaVista Software Inc.
dba9b4f6 13 * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
1da177e4
LT
14 */
15
16/*
17 * This file holds the "policy" for the interface to the SMI state
18 * machine. It does the configuration, handles timers and interrupts,
19 * and drives the real SMI state machine.
20 */
21
25880f7d
JP
22#define pr_fmt(fmt) "ipmi_si: " fmt
23
1da177e4
LT
24#include <linux/module.h>
25#include <linux/moduleparam.h>
1da177e4 26#include <linux/sched.h>
07412736 27#include <linux/seq_file.h>
1da177e4
LT
28#include <linux/timer.h>
29#include <linux/errno.h>
30#include <linux/spinlock.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/list.h>
ea94027b 34#include <linux/notifier.h>
b0defcdb 35#include <linux/mutex.h>
e9a705a0 36#include <linux/kthread.h>
1da177e4 37#include <asm/irq.h>
1da177e4
LT
38#include <linux/interrupt.h>
39#include <linux/rcupdate.h>
16f4232c 40#include <linux/ipmi.h>
1da177e4 41#include <linux/ipmi_smi.h>
1e89a499 42#include "ipmi_si.h"
104fb25f 43#include "ipmi_si_sm.h"
b361e27b
CM
44#include <linux/string.h>
45#include <linux/ctype.h>
dba9b4f6 46
1da177e4
LT
47/* Measure times between events in the driver. */
48#undef DEBUG_TIMING
49
50/* Call every 10 ms. */
51#define SI_TIMEOUT_TIME_USEC 10000
52#define SI_USEC_PER_JIFFY (1000000/HZ)
53#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
54#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
c305e3d3 55 short timeout */
1da177e4
LT
56
57enum si_intf_state {
58 SI_NORMAL,
59 SI_GETTING_FLAGS,
60 SI_GETTING_EVENTS,
61 SI_CLEARING_FLAGS,
1da177e4 62 SI_GETTING_MESSAGES,
d9b7e4f7
CM
63 SI_CHECKING_ENABLES,
64 SI_SETTING_ENABLES
1da177e4
LT
65 /* FIXME - add watchdog stuff. */
66};
67
9dbf68f9
CM
68/* Some BT-specific defines we need here. */
69#define IPMI_BT_INTMASK_REG 2
70#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
71#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
72
95e300c0 73static const char * const si_to_str[] = { "invalid", "kcs", "smic", "bt" };
1da177e4 74
dd7450ca 75static bool initialized;
bb398a4c 76
64959e2d
CM
77/*
78 * Indexes into stats[] in smi_info below.
79 */
ba8ff1c6
CM
80enum si_stat_indexes {
81 /*
82 * Number of times the driver requested a timer while an operation
83 * was in progress.
84 */
85 SI_STAT_short_timeouts = 0,
86
87 /*
88 * Number of times the driver requested a timer while nothing was in
89 * progress.
90 */
91 SI_STAT_long_timeouts,
92
93 /* Number of times the interface was idle while being polled. */
94 SI_STAT_idles,
95
96 /* Number of interrupts the driver handled. */
97 SI_STAT_interrupts,
98
99 /* Number of time the driver got an ATTN from the hardware. */
100 SI_STAT_attentions,
64959e2d 101
ba8ff1c6
CM
102 /* Number of times the driver requested flags from the hardware. */
103 SI_STAT_flag_fetches,
104
105 /* Number of times the hardware didn't follow the state machine. */
106 SI_STAT_hosed_count,
107
108 /* Number of completed messages. */
109 SI_STAT_complete_transactions,
110
111 /* Number of IPMI events received from the hardware. */
112 SI_STAT_events,
113
114 /* Number of watchdog pretimeouts. */
115 SI_STAT_watchdog_pretimeouts,
116
b3834be5 117 /* Number of asynchronous messages received. */
ba8ff1c6
CM
118 SI_STAT_incoming_messages,
119
120
121 /* This *must* remain last, add new values above this. */
122 SI_NUM_STATS
123};
64959e2d 124
c305e3d3 125struct smi_info {
57bccb4e 126 int si_num;
a567b623 127 struct ipmi_smi *intf;
1da177e4 128 struct si_sm_data *si_sm;
81d02b7f 129 const struct si_sm_handlers *handlers;
1da177e4 130 spinlock_t si_lock;
b874b985 131 struct ipmi_smi_msg *waiting_msg;
1da177e4
LT
132 struct ipmi_smi_msg *curr_msg;
133 enum si_intf_state si_state;
134
c305e3d3
CM
135 /*
136 * Used to handle the various types of I/O that can occur with
137 * IPMI
138 */
1da177e4 139 struct si_sm_io io;
1da177e4 140
c305e3d3
CM
141 /*
142 * Per-OEM handler, called from handle_flags(). Returns 1
143 * when handle_flags() needs to be re-run or 0 indicating it
144 * set si_state itself.
145 */
3ae0e0f9
CM
146 int (*oem_data_avail_handler)(struct smi_info *smi_info);
147
c305e3d3
CM
148 /*
149 * Flags from the last GET_MSG_FLAGS command, used when an ATTN
150 * is set to hold the flags until we are done handling everything
151 * from the flags.
152 */
1da177e4
LT
153#define RECEIVE_MSG_AVAIL 0x01
154#define EVENT_MSG_BUFFER_FULL 0x02
155#define WDT_PRE_TIMEOUT_INT 0x08
3ae0e0f9
CM
156#define OEM0_DATA_AVAIL 0x20
157#define OEM1_DATA_AVAIL 0x40
158#define OEM2_DATA_AVAIL 0x80
159#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
c305e3d3
CM
160 OEM1_DATA_AVAIL | \
161 OEM2_DATA_AVAIL)
1da177e4
LT
162 unsigned char msg_flags;
163
40112ae7 164 /* Does the BMC have an event buffer? */
7aefac26 165 bool has_event_buffer;
40112ae7 166
c305e3d3
CM
167 /*
168 * If set to true, this will request events the next time the
169 * state machine is idle.
170 */
1da177e4
LT
171 atomic_t req_events;
172
c305e3d3
CM
173 /*
174 * If true, run the state machine to completion on every send
175 * call. Generally used after a panic to make sure stuff goes
176 * out.
177 */
7aefac26 178 bool run_to_completion;
1da177e4 179
1da177e4
LT
180 /* The timer for this si. */
181 struct timer_list si_timer;
182
4f7f5551
MY
183 /* This flag is set, if the timer can be set */
184 bool timer_can_start;
185
48e8ac29
BS
186 /* This flag is set, if the timer is running (timer_pending() isn't enough) */
187 bool timer_running;
188
1da177e4
LT
189 /* The time (in jiffies) the last timeout occurred at. */
190 unsigned long last_timeout_jiffies;
191
89986496
CM
192 /* Are we waiting for the events, pretimeouts, received msgs? */
193 atomic_t need_watch;
194
c305e3d3
CM
195 /*
196 * The driver will disable interrupts when it gets into a
197 * situation where it cannot handle messages due to lack of
198 * memory. Once that situation clears up, it will re-enable
199 * interrupts.
200 */
7aefac26 201 bool interrupt_disabled;
1da177e4 202
d9b7e4f7
CM
203 /*
204 * Does the BMC support events?
205 */
206 bool supports_event_msg_buff;
207
1e7d6a45 208 /*
d0882897
CM
209 * Can we disable interrupts the global enables receive irq
210 * bit? There are currently two forms of brokenness, some
211 * systems cannot disable the bit (which is technically within
212 * the spec but a bad idea) and some systems have the bit
213 * forced to zero even though interrupts work (which is
214 * clearly outside the spec). The next bool tells which form
215 * of brokenness is present.
1e7d6a45 216 */
d0882897
CM
217 bool cannot_disable_irq;
218
219 /*
220 * Some systems are broken and cannot set the irq enable
221 * bit, even if they support interrupts.
222 */
223 bool irq_enable_broken;
1e7d6a45 224
340ff31a
CM
225 /* Is the driver in maintenance mode? */
226 bool in_maintenance_mode;
227
a8df150c
CM
228 /*
229 * Did we get an attention that we did not handle?
230 */
231 bool got_attn;
232
50c812b2 233 /* From the get device id response... */
3ae0e0f9 234 struct ipmi_device_id device_id;
1da177e4 235
cc095f0a
CM
236 /* Have we added the device group to the device? */
237 bool dev_group_added;
238
1da177e4 239 /* Counters and things for the proc filesystem. */
64959e2d 240 atomic_t stats[SI_NUM_STATS];
a9a2c44f 241
c305e3d3 242 struct task_struct *thread;
b0defcdb
CM
243
244 struct list_head link;
1da177e4
LT
245};
246
64959e2d
CM
247#define smi_inc_stat(smi, stat) \
248 atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
249#define smi_get_stat(smi, stat) \
250 ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
251
7a453308
CM
252#define IPMI_MAX_INTFS 4
253static int force_kipmid[IPMI_MAX_INTFS];
a51f4a81
CM
254static int num_force_kipmid;
255
7a453308 256static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
ae74e823
MW
257static int num_max_busy_us;
258
7aefac26 259static bool unload_when_empty = true;
b361e27b 260
b0defcdb 261static int try_smi_init(struct smi_info *smi);
71404a2f 262static void cleanup_one_si(struct smi_info *smi_info);
d2478521 263static void cleanup_ipmi_si(void);
b0defcdb 264
f93aae9f
JS
265#ifdef DEBUG_TIMING
266void debug_timestamp(char *msg)
267{
8d73b2ae 268 struct timespec64 t;
f93aae9f 269
8d73b2ae
AB
270 ktime_get_ts64(&t);
271 pr_debug("**%s: %lld.%9.9ld\n", msg, t.tv_sec, t.tv_nsec);
f93aae9f
JS
272}
273#else
274#define debug_timestamp(x)
275#endif
276
e041c683 277static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
c305e3d3 278static int register_xaction_notifier(struct notifier_block *nb)
ea94027b 279{
e041c683 280 return atomic_notifier_chain_register(&xaction_notifier_list, nb);
ea94027b
CM
281}
282
1da177e4
LT
283static void deliver_recv_msg(struct smi_info *smi_info,
284 struct ipmi_smi_msg *msg)
285{
7adf579c 286 /* Deliver the message to the upper layer. */
0fbecb4f 287 ipmi_smi_msg_received(smi_info->intf, msg);
1da177e4
LT
288}
289
4d7cbac7 290static void return_hosed_msg(struct smi_info *smi_info, int cCode)
1da177e4
LT
291{
292 struct ipmi_smi_msg *msg = smi_info->curr_msg;
293
4d7cbac7
CM
294 if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
295 cCode = IPMI_ERR_UNSPECIFIED;
296 /* else use it as is */
297
25985edc 298 /* Make it a response */
1da177e4
LT
299 msg->rsp[0] = msg->data[0] | 4;
300 msg->rsp[1] = msg->data[1];
4d7cbac7 301 msg->rsp[2] = cCode;
1da177e4
LT
302 msg->rsp_size = 3;
303
304 smi_info->curr_msg = NULL;
305 deliver_recv_msg(smi_info, msg);
306}
307
308static enum si_sm_result start_next_msg(struct smi_info *smi_info)
309{
310 int rv;
1da177e4 311
b874b985 312 if (!smi_info->waiting_msg) {
1da177e4
LT
313 smi_info->curr_msg = NULL;
314 rv = SI_SM_IDLE;
315 } else {
316 int err;
317
b874b985
CM
318 smi_info->curr_msg = smi_info->waiting_msg;
319 smi_info->waiting_msg = NULL;
f93aae9f 320 debug_timestamp("Start2");
e041c683
AS
321 err = atomic_notifier_call_chain(&xaction_notifier_list,
322 0, smi_info);
ea94027b
CM
323 if (err & NOTIFY_STOP_MASK) {
324 rv = SI_SM_CALL_WITHOUT_DELAY;
325 goto out;
326 }
1da177e4
LT
327 err = smi_info->handlers->start_transaction(
328 smi_info->si_sm,
329 smi_info->curr_msg->data,
330 smi_info->curr_msg->data_size);
c305e3d3 331 if (err)
4d7cbac7 332 return_hosed_msg(smi_info, err);
1da177e4
LT
333
334 rv = SI_SM_CALL_WITHOUT_DELAY;
335 }
76824852 336out:
1da177e4
LT
337 return rv;
338}
339
0cfec916
CM
340static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
341{
4f7f5551
MY
342 if (!smi_info->timer_can_start)
343 return;
0cfec916
CM
344 smi_info->last_timeout_jiffies = jiffies;
345 mod_timer(&smi_info->si_timer, new_val);
346 smi_info->timer_running = true;
347}
348
349/*
350 * Start a new message and (re)start the timer and thread.
351 */
352static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
353 unsigned int size)
354{
355 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
356
357 if (smi_info->thread)
358 wake_up_process(smi_info->thread);
359
360 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
361}
362
4f7f5551 363static void start_check_enables(struct smi_info *smi_info)
ee6cd5f8
CM
364{
365 unsigned char msg[2];
366
367 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
368 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
369
4f7f5551 370 start_new_msg(smi_info, msg, 2);
d9b7e4f7 371 smi_info->si_state = SI_CHECKING_ENABLES;
ee6cd5f8
CM
372}
373
4f7f5551 374static void start_clear_flags(struct smi_info *smi_info)
1da177e4
LT
375{
376 unsigned char msg[3];
377
378 /* Make sure the watchdog pre-timeout flag is not set at startup. */
379 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
380 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
381 msg[2] = WDT_PRE_TIMEOUT_INT;
382
4f7f5551 383 start_new_msg(smi_info, msg, 3);
1da177e4
LT
384 smi_info->si_state = SI_CLEARING_FLAGS;
385}
386
968bf7cc
CM
387static void start_getting_msg_queue(struct smi_info *smi_info)
388{
389 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
390 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
391 smi_info->curr_msg->data_size = 2;
392
0cfec916
CM
393 start_new_msg(smi_info, smi_info->curr_msg->data,
394 smi_info->curr_msg->data_size);
968bf7cc
CM
395 smi_info->si_state = SI_GETTING_MESSAGES;
396}
397
398static void start_getting_events(struct smi_info *smi_info)
399{
400 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
401 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
402 smi_info->curr_msg->data_size = 2;
403
0cfec916
CM
404 start_new_msg(smi_info, smi_info->curr_msg->data,
405 smi_info->curr_msg->data_size);
968bf7cc
CM
406 smi_info->si_state = SI_GETTING_EVENTS;
407}
408
c305e3d3
CM
409/*
410 * When we have a situtaion where we run out of memory and cannot
411 * allocate messages, we just leave them in the BMC and run the system
412 * polled until we can allocate some memory. Once we have some
413 * memory, we will re-enable the interrupt.
1e7d6a45
CM
414 *
415 * Note that we cannot just use disable_irq(), since the interrupt may
416 * be shared.
c305e3d3 417 */
4f7f5551 418static inline bool disable_si_irq(struct smi_info *smi_info)
1da177e4 419{
910840f2 420 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
7aefac26 421 smi_info->interrupt_disabled = true;
4f7f5551 422 start_check_enables(smi_info);
968bf7cc 423 return true;
1da177e4 424 }
968bf7cc 425 return false;
1da177e4
LT
426}
427
968bf7cc 428static inline bool enable_si_irq(struct smi_info *smi_info)
1da177e4 429{
910840f2 430 if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
7aefac26 431 smi_info->interrupt_disabled = false;
4f7f5551 432 start_check_enables(smi_info);
968bf7cc
CM
433 return true;
434 }
435 return false;
436}
437
438/*
439 * Allocate a message. If unable to allocate, start the interrupt
440 * disable process and return NULL. If able to allocate but
441 * interrupts are disabled, free the message and return NULL after
442 * starting the interrupt enable process.
443 */
444static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
445{
446 struct ipmi_smi_msg *msg;
447
448 msg = ipmi_alloc_smi_msg();
449 if (!msg) {
4f7f5551 450 if (!disable_si_irq(smi_info))
968bf7cc
CM
451 smi_info->si_state = SI_NORMAL;
452 } else if (enable_si_irq(smi_info)) {
453 ipmi_free_smi_msg(msg);
454 msg = NULL;
1da177e4 455 }
968bf7cc 456 return msg;
1da177e4
LT
457}
458
459static void handle_flags(struct smi_info *smi_info)
460{
76824852 461retry:
1da177e4
LT
462 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
463 /* Watchdog pre-timeout */
64959e2d 464 smi_inc_stat(smi_info, watchdog_pretimeouts);
1da177e4 465
4f7f5551 466 start_clear_flags(smi_info);
1da177e4 467 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
0fbecb4f 468 ipmi_smi_watchdog_pretimeout(smi_info->intf);
1da177e4
LT
469 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
470 /* Messages available. */
968bf7cc
CM
471 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
472 if (!smi_info->curr_msg)
1da177e4 473 return;
1da177e4 474
968bf7cc 475 start_getting_msg_queue(smi_info);
1da177e4
LT
476 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
477 /* Events available. */
968bf7cc
CM
478 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
479 if (!smi_info->curr_msg)
1da177e4 480 return;
1da177e4 481
968bf7cc 482 start_getting_events(smi_info);
4064d5ef 483 } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
c305e3d3 484 smi_info->oem_data_avail_handler) {
4064d5ef
CM
485 if (smi_info->oem_data_avail_handler(smi_info))
486 goto retry;
c305e3d3 487 } else
1da177e4 488 smi_info->si_state = SI_NORMAL;
1da177e4
LT
489}
490
d9b7e4f7
CM
491/*
492 * Global enables we care about.
493 */
494#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
495 IPMI_BMC_EVT_MSG_INTR)
496
95c97b59
CM
497static u8 current_global_enables(struct smi_info *smi_info, u8 base,
498 bool *irq_on)
d9b7e4f7
CM
499{
500 u8 enables = 0;
501
502 if (smi_info->supports_event_msg_buff)
503 enables |= IPMI_BMC_EVT_MSG_BUFF;
d9b7e4f7 504
910840f2 505 if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
d0882897
CM
506 smi_info->cannot_disable_irq) &&
507 !smi_info->irq_enable_broken)
d9b7e4f7 508 enables |= IPMI_BMC_RCV_MSG_INTR;
d9b7e4f7
CM
509
510 if (smi_info->supports_event_msg_buff &&
910840f2 511 smi_info->io.irq && !smi_info->interrupt_disabled &&
d0882897 512 !smi_info->irq_enable_broken)
d9b7e4f7 513 enables |= IPMI_BMC_EVT_MSG_INTR;
d9b7e4f7 514
95c97b59
CM
515 *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
516
d9b7e4f7
CM
517 return enables;
518}
519
95c97b59
CM
520static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
521{
522 u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
523
524 irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
525
526 if ((bool)irqstate == irq_on)
527 return;
528
529 if (irq_on)
530 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
531 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
532 else
533 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
534}
535
1da177e4
LT
536static void handle_transaction_done(struct smi_info *smi_info)
537{
538 struct ipmi_smi_msg *msg;
1da177e4 539
f93aae9f 540 debug_timestamp("Done");
1da177e4
LT
541 switch (smi_info->si_state) {
542 case SI_NORMAL:
b0defcdb 543 if (!smi_info->curr_msg)
1da177e4
LT
544 break;
545
546 smi_info->curr_msg->rsp_size
547 = smi_info->handlers->get_result(
548 smi_info->si_sm,
549 smi_info->curr_msg->rsp,
550 IPMI_MAX_MSG_LENGTH);
551
c305e3d3
CM
552 /*
553 * Do this here becase deliver_recv_msg() releases the
554 * lock, and a new message can be put in during the
555 * time the lock is released.
556 */
1da177e4
LT
557 msg = smi_info->curr_msg;
558 smi_info->curr_msg = NULL;
559 deliver_recv_msg(smi_info, msg);
560 break;
561
562 case SI_GETTING_FLAGS:
563 {
564 unsigned char msg[4];
565 unsigned int len;
566
567 /* We got the flags from the SMI, now handle them. */
568 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
569 if (msg[2] != 0) {
c305e3d3 570 /* Error fetching flags, just give up for now. */
1da177e4
LT
571 smi_info->si_state = SI_NORMAL;
572 } else if (len < 4) {
c305e3d3
CM
573 /*
574 * Hmm, no flags. That's technically illegal, but
575 * don't use uninitialized data.
576 */
1da177e4
LT
577 smi_info->si_state = SI_NORMAL;
578 } else {
579 smi_info->msg_flags = msg[3];
580 handle_flags(smi_info);
581 }
582 break;
583 }
584
585 case SI_CLEARING_FLAGS:
1da177e4
LT
586 {
587 unsigned char msg[3];
588
589 /* We cleared the flags. */
590 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
591 if (msg[2] != 0) {
592 /* Error clearing flags */
910840f2 593 dev_warn(smi_info->io.dev,
279fbd0c 594 "Error clearing flags: %2.2x\n", msg[2]);
1da177e4 595 }
d9b7e4f7 596 smi_info->si_state = SI_NORMAL;
1da177e4
LT
597 break;
598 }
599
600 case SI_GETTING_EVENTS:
601 {
602 smi_info->curr_msg->rsp_size
603 = smi_info->handlers->get_result(
604 smi_info->si_sm,
605 smi_info->curr_msg->rsp,
606 IPMI_MAX_MSG_LENGTH);
607
c305e3d3
CM
608 /*
609 * Do this here becase deliver_recv_msg() releases the
610 * lock, and a new message can be put in during the
611 * time the lock is released.
612 */
1da177e4
LT
613 msg = smi_info->curr_msg;
614 smi_info->curr_msg = NULL;
615 if (msg->rsp[2] != 0) {
616 /* Error getting event, probably done. */
617 msg->done(msg);
618
619 /* Take off the event flag. */
620 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
621 handle_flags(smi_info);
622 } else {
64959e2d 623 smi_inc_stat(smi_info, events);
1da177e4 624
c305e3d3
CM
625 /*
626 * Do this before we deliver the message
627 * because delivering the message releases the
628 * lock and something else can mess with the
629 * state.
630 */
1da177e4
LT
631 handle_flags(smi_info);
632
633 deliver_recv_msg(smi_info, msg);
634 }
635 break;
636 }
637
638 case SI_GETTING_MESSAGES:
639 {
640 smi_info->curr_msg->rsp_size
641 = smi_info->handlers->get_result(
642 smi_info->si_sm,
643 smi_info->curr_msg->rsp,
644 IPMI_MAX_MSG_LENGTH);
645
c305e3d3
CM
646 /*
647 * Do this here becase deliver_recv_msg() releases the
648 * lock, and a new message can be put in during the
649 * time the lock is released.
650 */
1da177e4
LT
651 msg = smi_info->curr_msg;
652 smi_info->curr_msg = NULL;
653 if (msg->rsp[2] != 0) {
654 /* Error getting event, probably done. */
655 msg->done(msg);
656
657 /* Take off the msg flag. */
658 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
659 handle_flags(smi_info);
660 } else {
64959e2d 661 smi_inc_stat(smi_info, incoming_messages);
1da177e4 662
c305e3d3
CM
663 /*
664 * Do this before we deliver the message
665 * because delivering the message releases the
666 * lock and something else can mess with the
667 * state.
668 */
1da177e4
LT
669 handle_flags(smi_info);
670
671 deliver_recv_msg(smi_info, msg);
672 }
673 break;
674 }
675
d9b7e4f7 676 case SI_CHECKING_ENABLES:
1da177e4
LT
677 {
678 unsigned char msg[4];
d9b7e4f7 679 u8 enables;
95c97b59 680 bool irq_on;
1da177e4
LT
681
682 /* We got the flags from the SMI, now handle them. */
683 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
684 if (msg[2] != 0) {
910840f2 685 dev_warn(smi_info->io.dev,
0849bfec 686 "Couldn't get irq info: %x.\n", msg[2]);
910840f2 687 dev_warn(smi_info->io.dev,
0849bfec 688 "Maybe ok, but ipmi might run very slowly.\n");
1da177e4 689 smi_info->si_state = SI_NORMAL;
d9b7e4f7
CM
690 break;
691 }
95c97b59 692 enables = current_global_enables(smi_info, 0, &irq_on);
910840f2 693 if (smi_info->io.si_type == SI_BT)
95c97b59
CM
694 /* BT has its own interrupt enable bit. */
695 check_bt_irq(smi_info, irq_on);
d9b7e4f7
CM
696 if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
697 /* Enables are not correct, fix them. */
1da177e4
LT
698 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
699 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
d9b7e4f7 700 msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
1da177e4
LT
701 smi_info->handlers->start_transaction(
702 smi_info->si_sm, msg, 3);
d9b7e4f7
CM
703 smi_info->si_state = SI_SETTING_ENABLES;
704 } else if (smi_info->supports_event_msg_buff) {
705 smi_info->curr_msg = ipmi_alloc_smi_msg();
706 if (!smi_info->curr_msg) {
707 smi_info->si_state = SI_NORMAL;
708 break;
709 }
5ac7b2fc 710 start_getting_events(smi_info);
d9b7e4f7
CM
711 } else {
712 smi_info->si_state = SI_NORMAL;
1da177e4
LT
713 }
714 break;
715 }
716
d9b7e4f7 717 case SI_SETTING_ENABLES:
1da177e4
LT
718 {
719 unsigned char msg[4];
720
1da177e4 721 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
d9b7e4f7 722 if (msg[2] != 0)
910840f2 723 dev_warn(smi_info->io.dev,
d9b7e4f7
CM
724 "Could not set the global enables: 0x%x.\n",
725 msg[2]);
726
727 if (smi_info->supports_event_msg_buff) {
728 smi_info->curr_msg = ipmi_alloc_smi_msg();
729 if (!smi_info->curr_msg) {
730 smi_info->si_state = SI_NORMAL;
731 break;
732 }
5ac7b2fc 733 start_getting_events(smi_info);
ee6cd5f8 734 } else {
d9b7e4f7 735 smi_info->si_state = SI_NORMAL;
ee6cd5f8 736 }
ee6cd5f8
CM
737 break;
738 }
1da177e4
LT
739 }
740}
741
c305e3d3
CM
742/*
743 * Called on timeouts and events. Timeouts should pass the elapsed
744 * time, interrupts should pass in zero. Must be called with
745 * si_lock held and interrupts disabled.
746 */
1da177e4
LT
747static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
748 int time)
749{
750 enum si_sm_result si_sm_result;
751
76824852 752restart:
c305e3d3
CM
753 /*
754 * There used to be a loop here that waited a little while
755 * (around 25us) before giving up. That turned out to be
756 * pointless, the minimum delays I was seeing were in the 300us
757 * range, which is far too long to wait in an interrupt. So
758 * we just run until the state machine tells us something
759 * happened or it needs a delay.
760 */
1da177e4
LT
761 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
762 time = 0;
763 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
1da177e4 764 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
1da177e4 765
c305e3d3 766 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
64959e2d 767 smi_inc_stat(smi_info, complete_transactions);
1da177e4
LT
768
769 handle_transaction_done(smi_info);
d9dffd2a 770 goto restart;
c305e3d3 771 } else if (si_sm_result == SI_SM_HOSED) {
64959e2d 772 smi_inc_stat(smi_info, hosed_count);
1da177e4 773
c305e3d3
CM
774 /*
775 * Do the before return_hosed_msg, because that
776 * releases the lock.
777 */
1da177e4
LT
778 smi_info->si_state = SI_NORMAL;
779 if (smi_info->curr_msg != NULL) {
c305e3d3
CM
780 /*
781 * If we were handling a user message, format
782 * a response to send to the upper layer to
783 * tell it about the error.
784 */
4d7cbac7 785 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
1da177e4 786 }
d9dffd2a 787 goto restart;
1da177e4
LT
788 }
789
4ea18425
CM
790 /*
791 * We prefer handling attn over new messages. But don't do
792 * this if there is not yet an upper layer to handle anything.
793 */
0fbecb4f 794 if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
1da177e4
LT
795 unsigned char msg[2];
796
a8df150c
CM
797 if (smi_info->si_state != SI_NORMAL) {
798 /*
799 * We got an ATTN, but we are doing something else.
800 * Handle the ATTN later.
801 */
802 smi_info->got_attn = true;
803 } else {
804 smi_info->got_attn = false;
805 smi_inc_stat(smi_info, attentions);
1da177e4 806
a8df150c
CM
807 /*
808 * Got a attn, send down a get message flags to see
809 * what's causing it. It would be better to handle
810 * this in the upper layer, but due to the way
811 * interrupts work with the SMI, that's not really
812 * possible.
813 */
814 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
815 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
1da177e4 816
0cfec916 817 start_new_msg(smi_info, msg, 2);
a8df150c
CM
818 smi_info->si_state = SI_GETTING_FLAGS;
819 goto restart;
820 }
1da177e4
LT
821 }
822
823 /* If we are currently idle, try to start the next message. */
824 if (si_sm_result == SI_SM_IDLE) {
64959e2d 825 smi_inc_stat(smi_info, idles);
1da177e4
LT
826
827 si_sm_result = start_next_msg(smi_info);
828 if (si_sm_result != SI_SM_IDLE)
829 goto restart;
c305e3d3 830 }
1da177e4
LT
831
832 if ((si_sm_result == SI_SM_IDLE)
c305e3d3
CM
833 && (atomic_read(&smi_info->req_events))) {
834 /*
835 * We are idle and the upper layer requested that I fetch
836 * events, so do so.
837 */
55162fb1 838 atomic_set(&smi_info->req_events, 0);
1da177e4 839
d9b7e4f7
CM
840 /*
841 * Take this opportunity to check the interrupt and
842 * message enable state for the BMC. The BMC can be
843 * asynchronously reset, and may thus get interrupts
844 * disable and messages disabled.
845 */
910840f2 846 if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
4f7f5551 847 start_check_enables(smi_info);
d9b7e4f7
CM
848 } else {
849 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
850 if (!smi_info->curr_msg)
851 goto out;
1da177e4 852
d9b7e4f7
CM
853 start_getting_events(smi_info);
854 }
1da177e4
LT
855 goto restart;
856 }
314ef52f
CM
857
858 if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
859 /* Ok it if fails, the timer will just go off. */
860 if (del_timer(&smi_info->si_timer))
861 smi_info->timer_running = false;
862 }
863
76824852 864out:
1da177e4
LT
865 return si_sm_result;
866}
867
89986496
CM
868static void check_start_timer_thread(struct smi_info *smi_info)
869{
870 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
871 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
872
873 if (smi_info->thread)
874 wake_up_process(smi_info->thread);
875
876 start_next_msg(smi_info);
877 smi_event_handler(smi_info, 0);
878 }
879}
880
82802f96 881static void flush_messages(void *send_info)
e45361d7 882{
82802f96 883 struct smi_info *smi_info = send_info;
e45361d7
HK
884 enum si_sm_result result;
885
886 /*
887 * Currently, this function is called only in run-to-completion
888 * mode. This means we are single-threaded, no need for locks.
889 */
890 result = smi_event_handler(smi_info, 0);
891 while (result != SI_SM_IDLE) {
892 udelay(SI_SHORT_TIMEOUT_USEC);
893 result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
894 }
895}
896
1da177e4 897static void sender(void *send_info,
99ab32f3 898 struct ipmi_smi_msg *msg)
1da177e4
LT
899{
900 struct smi_info *smi_info = send_info;
1da177e4 901 unsigned long flags;
1da177e4 902
f93aae9f 903 debug_timestamp("Enqueue");
1da177e4
LT
904
905 if (smi_info->run_to_completion) {
bda4c30a 906 /*
82802f96
HK
907 * If we are running to completion, start it. Upper
908 * layer will call flush_messages to clear it out.
bda4c30a 909 */
9f812704 910 smi_info->waiting_msg = msg;
1da177e4 911 return;
1da177e4 912 }
1da177e4 913
f60adf42 914 spin_lock_irqsave(&smi_info->si_lock, flags);
1d86e29b
CM
915 /*
916 * The following two lines don't need to be under the lock for
917 * the lock's sake, but they do need SMP memory barriers to
918 * avoid getting things out of order. We are already claiming
919 * the lock, anyway, so just do it under the lock to avoid the
920 * ordering problem.
921 */
922 BUG_ON(smi_info->waiting_msg);
923 smi_info->waiting_msg = msg;
89986496 924 check_start_timer_thread(smi_info);
bda4c30a 925 spin_unlock_irqrestore(&smi_info->si_lock, flags);
1da177e4
LT
926}
927
7aefac26 928static void set_run_to_completion(void *send_info, bool i_run_to_completion)
1da177e4
LT
929{
930 struct smi_info *smi_info = send_info;
1da177e4
LT
931
932 smi_info->run_to_completion = i_run_to_completion;
e45361d7
HK
933 if (i_run_to_completion)
934 flush_messages(smi_info);
1da177e4
LT
935}
936
ae74e823 937/*
8d73b2ae
AB
938 * Use -1 as a special constant to tell that we are spinning in kipmid
939 * looking for something and not delaying between checks
ae74e823 940 */
8d73b2ae 941#define IPMI_TIME_NOT_BUSY ns_to_ktime(-1ull)
cbb19cb1
CM
942static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result,
943 const struct smi_info *smi_info,
8d73b2ae 944 ktime_t *busy_until)
ae74e823
MW
945{
946 unsigned int max_busy_us = 0;
947
57bccb4e
CM
948 if (smi_info->si_num < num_max_busy_us)
949 max_busy_us = kipmid_max_busy_us[smi_info->si_num];
ae74e823 950 if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
8d73b2ae
AB
951 *busy_until = IPMI_TIME_NOT_BUSY;
952 else if (*busy_until == IPMI_TIME_NOT_BUSY) {
953 *busy_until = ktime_get() + max_busy_us * NSEC_PER_USEC;
ae74e823 954 } else {
8d73b2ae
AB
955 if (unlikely(ktime_get() > *busy_until)) {
956 *busy_until = IPMI_TIME_NOT_BUSY;
cbb19cb1 957 return false;
ae74e823
MW
958 }
959 }
cbb19cb1 960 return true;
ae74e823
MW
961}
962
963
964/*
965 * A busy-waiting loop for speeding up IPMI operation.
966 *
967 * Lousy hardware makes this hard. This is only enabled for systems
968 * that are not BT and do not have interrupts. It starts spinning
969 * when an operation is complete or until max_busy tells it to stop
970 * (if that is enabled). See the paragraph on kimid_max_busy_us in
971 * Documentation/IPMI.txt for details.
972 */
a9a2c44f
CM
973static int ipmi_thread(void *data)
974{
975 struct smi_info *smi_info = data;
e9a705a0 976 unsigned long flags;
a9a2c44f 977 enum si_sm_result smi_result;
8d73b2ae 978 ktime_t busy_until = IPMI_TIME_NOT_BUSY;
a9a2c44f 979
8698a745 980 set_user_nice(current, MAX_NICE);
e9a705a0 981 while (!kthread_should_stop()) {
ae74e823
MW
982 int busy_wait;
983
a9a2c44f 984 spin_lock_irqsave(&(smi_info->si_lock), flags);
8a3628d5 985 smi_result = smi_event_handler(smi_info, 0);
48e8ac29
BS
986
987 /*
988 * If the driver is doing something, there is a possible
989 * race with the timer. If the timer handler see idle,
990 * and the thread here sees something else, the timer
991 * handler won't restart the timer even though it is
992 * required. So start it here if necessary.
993 */
994 if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
995 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
996
a9a2c44f 997 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
ae74e823
MW
998 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
999 &busy_until);
340ff31a 1000 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
c305e3d3 1001 ; /* do nothing */
340ff31a
CM
1002 } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
1003 /*
1004 * In maintenance mode we run as fast as
1005 * possible to allow firmware updates to
1006 * complete as fast as possible, but normally
1007 * don't bang on the scheduler.
1008 */
1009 if (smi_info->in_maintenance_mode)
1010 schedule();
1011 else
1012 usleep_range(100, 200);
1013 } else if (smi_result == SI_SM_IDLE) {
89986496
CM
1014 if (atomic_read(&smi_info->need_watch)) {
1015 schedule_timeout_interruptible(100);
1016 } else {
1017 /* Wait to be woken up when we are needed. */
1018 __set_current_state(TASK_INTERRUPTIBLE);
1019 schedule();
1020 }
340ff31a 1021 } else {
8d1f66dc 1022 schedule_timeout_interruptible(1);
340ff31a 1023 }
a9a2c44f 1024 }
a9a2c44f
CM
1025 return 0;
1026}
1027
1028
1da177e4
LT
1029static void poll(void *send_info)
1030{
1031 struct smi_info *smi_info = send_info;
f60adf42 1032 unsigned long flags = 0;
7aefac26 1033 bool run_to_completion = smi_info->run_to_completion;
1da177e4 1034
15c62e10
CM
1035 /*
1036 * Make sure there is some delay in the poll loop so we can
1037 * drive time forward and timeout things.
1038 */
1039 udelay(10);
f60adf42
CM
1040 if (!run_to_completion)
1041 spin_lock_irqsave(&smi_info->si_lock, flags);
15c62e10 1042 smi_event_handler(smi_info, 10);
f60adf42
CM
1043 if (!run_to_completion)
1044 spin_unlock_irqrestore(&smi_info->si_lock, flags);
1da177e4
LT
1045}
1046
1047static void request_events(void *send_info)
1048{
1049 struct smi_info *smi_info = send_info;
1050
b874b985 1051 if (!smi_info->has_event_buffer)
b361e27b
CM
1052 return;
1053
1da177e4
LT
1054 atomic_set(&smi_info->req_events, 1);
1055}
1056
c65ea996 1057static void set_need_watch(void *send_info, unsigned int watch_mask)
89986496
CM
1058{
1059 struct smi_info *smi_info = send_info;
1060 unsigned long flags;
c65ea996
CM
1061 int enable;
1062
e1891cff 1063 enable = !!watch_mask;
89986496
CM
1064
1065 atomic_set(&smi_info->need_watch, enable);
1066 spin_lock_irqsave(&smi_info->si_lock, flags);
1067 check_start_timer_thread(smi_info);
1068 spin_unlock_irqrestore(&smi_info->si_lock, flags);
1069}
1070
e99e88a9 1071static void smi_timeout(struct timer_list *t)
1da177e4 1072{
e99e88a9 1073 struct smi_info *smi_info = from_timer(smi_info, t, si_timer);
1da177e4
LT
1074 enum si_sm_result smi_result;
1075 unsigned long flags;
1076 unsigned long jiffies_now;
c4edff1c 1077 long time_diff;
3326f4f2 1078 long timeout;
1da177e4 1079
1da177e4 1080 spin_lock_irqsave(&(smi_info->si_lock), flags);
f93aae9f
JS
1081 debug_timestamp("Timer");
1082
1da177e4 1083 jiffies_now = jiffies;
c4edff1c 1084 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1da177e4
LT
1085 * SI_USEC_PER_JIFFY);
1086 smi_result = smi_event_handler(smi_info, time_diff);
1087
910840f2 1088 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
1da177e4 1089 /* Running with interrupts, only do long timeouts. */
3326f4f2 1090 timeout = jiffies + SI_TIMEOUT_JIFFIES;
64959e2d 1091 smi_inc_stat(smi_info, long_timeouts);
3326f4f2 1092 goto do_mod_timer;
1da177e4
LT
1093 }
1094
c305e3d3
CM
1095 /*
1096 * If the state machine asks for a short delay, then shorten
1097 * the timer timeout.
1098 */
1da177e4 1099 if (smi_result == SI_SM_CALL_WITH_DELAY) {
64959e2d 1100 smi_inc_stat(smi_info, short_timeouts);
3326f4f2 1101 timeout = jiffies + 1;
1da177e4 1102 } else {
64959e2d 1103 smi_inc_stat(smi_info, long_timeouts);
3326f4f2 1104 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1da177e4
LT
1105 }
1106
76824852 1107do_mod_timer:
3326f4f2 1108 if (smi_result != SI_SM_IDLE)
48e8ac29
BS
1109 smi_mod_timer(smi_info, timeout);
1110 else
1111 smi_info->timer_running = false;
1112 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1da177e4
LT
1113}
1114
4f3e8199 1115irqreturn_t ipmi_si_irq_handler(int irq, void *data)
1da177e4
LT
1116{
1117 struct smi_info *smi_info = data;
1118 unsigned long flags;
1da177e4 1119
4f3e8199
CM
1120 if (smi_info->io.si_type == SI_BT)
1121 /* We need to clear the IRQ flag for the BT interface. */
1122 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
1123 IPMI_BT_INTMASK_CLEAR_IRQ_BIT
1124 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1125
1da177e4
LT
1126 spin_lock_irqsave(&(smi_info->si_lock), flags);
1127
64959e2d 1128 smi_inc_stat(smi_info, interrupts);
1da177e4 1129
f93aae9f
JS
1130 debug_timestamp("Interrupt");
1131
1da177e4 1132 smi_event_handler(smi_info, 0);
1da177e4
LT
1133 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1134 return IRQ_HANDLED;
1135}
1136
a567b623
CM
1137static int smi_start_processing(void *send_info,
1138 struct ipmi_smi *intf)
453823ba
CM
1139{
1140 struct smi_info *new_smi = send_info;
a51f4a81 1141 int enable = 0;
453823ba
CM
1142
1143 new_smi->intf = intf;
1144
1145 /* Set up the timer that drives the interface. */
e99e88a9 1146 timer_setup(&new_smi->si_timer, smi_timeout, 0);
4f7f5551 1147 new_smi->timer_can_start = true;
48e8ac29 1148 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
453823ba 1149
27f972d3 1150 /* Try to claim any interrupts. */
4f3e8199
CM
1151 if (new_smi->io.irq_setup) {
1152 new_smi->io.irq_handler_data = new_smi;
1153 new_smi->io.irq_setup(&new_smi->io);
1154 }
27f972d3 1155
a51f4a81
CM
1156 /*
1157 * Check if the user forcefully enabled the daemon.
1158 */
57bccb4e
CM
1159 if (new_smi->si_num < num_force_kipmid)
1160 enable = force_kipmid[new_smi->si_num];
df3fe8de
CM
1161 /*
1162 * The BT interface is efficient enough to not need a thread,
1163 * and there is no need for a thread if we have interrupts.
1164 */
910840f2 1165 else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
a51f4a81
CM
1166 enable = 1;
1167
1168 if (enable) {
453823ba 1169 new_smi->thread = kthread_run(ipmi_thread, new_smi,
57bccb4e 1170 "kipmi%d", new_smi->si_num);
453823ba 1171 if (IS_ERR(new_smi->thread)) {
910840f2 1172 dev_notice(new_smi->io.dev, "Could not start"
279fbd0c
MS
1173 " kernel thread due to error %ld, only using"
1174 " timers to drive the interface\n",
1175 PTR_ERR(new_smi->thread));
453823ba
CM
1176 new_smi->thread = NULL;
1177 }
1178 }
1179
1180 return 0;
1181}
9dbf68f9 1182
16f4232c
ZY
1183static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
1184{
1185 struct smi_info *smi = send_info;
1186
910840f2
CM
1187 data->addr_src = smi->io.addr_source;
1188 data->dev = smi->io.dev;
bb398a4c 1189 data->addr_info = smi->io.addr_info;
910840f2 1190 get_device(smi->io.dev);
16f4232c
ZY
1191
1192 return 0;
1193}
1194
7aefac26 1195static void set_maintenance_mode(void *send_info, bool enable)
b9675136
CM
1196{
1197 struct smi_info *smi_info = send_info;
1198
1199 if (!enable)
1200 atomic_set(&smi_info->req_events, 0);
340ff31a 1201 smi_info->in_maintenance_mode = enable;
b9675136
CM
1202}
1203
7960f18a 1204static void shutdown_smi(void *send_info);
81d02b7f 1205static const struct ipmi_smi_handlers handlers = {
1da177e4 1206 .owner = THIS_MODULE,
453823ba 1207 .start_processing = smi_start_processing,
7960f18a 1208 .shutdown = shutdown_smi,
16f4232c 1209 .get_smi_info = get_smi_info,
1da177e4
LT
1210 .sender = sender,
1211 .request_events = request_events,
89986496 1212 .set_need_watch = set_need_watch,
b9675136 1213 .set_maintenance_mode = set_maintenance_mode,
1da177e4 1214 .set_run_to_completion = set_run_to_completion,
82802f96 1215 .flush_messages = flush_messages,
1da177e4
LT
1216 .poll = poll,
1217};
1218
b0defcdb 1219static LIST_HEAD(smi_infos);
d6dfd131 1220static DEFINE_MUTEX(smi_infos_lock);
b0defcdb 1221static int smi_num; /* Used to sequence the SMIs */
1da177e4 1222
99ee6735 1223static const char * const addr_space_to_str[] = { "i/o", "mem" };
b361e27b 1224
a51f4a81
CM
1225module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1226MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1227 " disabled(0). Normally the IPMI driver auto-detects"
1228 " this, but the value may be overridden by this parm.");
7aefac26 1229module_param(unload_when_empty, bool, 0);
b361e27b
CM
1230MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1231 " specified or found, default is 1. Setting to 0"
1232 " is useful for hot add of devices using hotmod.");
ae74e823
MW
1233module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
1234MODULE_PARM_DESC(kipmid_max_busy_us,
1235 "Max time (in microseconds) to busy-wait for IPMI data before"
1236 " sleeping. 0 (default) means to wait forever. Set to 100-500"
1237 " if kipmid is using up a lot of CPU time.");
1da177e4 1238
4f3e8199
CM
1239void ipmi_irq_finish_setup(struct si_sm_io *io)
1240{
1241 if (io->si_type == SI_BT)
1242 /* Enable the interrupt in the BT interface. */
1243 io->outputb(io, IPMI_BT_INTMASK_REG,
1244 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1245}
1da177e4 1246
4f3e8199 1247void ipmi_irq_start_cleanup(struct si_sm_io *io)
1da177e4 1248{
4f3e8199 1249 if (io->si_type == SI_BT)
b0defcdb 1250 /* Disable the interrupt in the BT interface. */
4f3e8199
CM
1251 io->outputb(io, IPMI_BT_INTMASK_REG, 0);
1252}
1253
1254static void std_irq_cleanup(struct si_sm_io *io)
1255{
1256 ipmi_irq_start_cleanup(io);
1257 free_irq(io->irq, io->irq_handler_data);
1da177e4 1258}
1da177e4 1259
4f3e8199 1260int ipmi_std_irq_setup(struct si_sm_io *io)
1da177e4
LT
1261{
1262 int rv;
1263
4f3e8199 1264 if (!io->irq)
1da177e4
LT
1265 return 0;
1266
4f3e8199
CM
1267 rv = request_irq(io->irq,
1268 ipmi_si_irq_handler,
1269 IRQF_SHARED,
104fb25f 1270 SI_DEVICE_NAME,
4f3e8199 1271 io->irq_handler_data);
1da177e4 1272 if (rv) {
4f3e8199 1273 dev_warn(io->dev, "%s unable to claim interrupt %d,"
279fbd0c 1274 " running polled\n",
104fb25f 1275 SI_DEVICE_NAME, io->irq);
4f3e8199 1276 io->irq = 0;
1da177e4 1277 } else {
4f3e8199
CM
1278 io->irq_cleanup = std_irq_cleanup;
1279 ipmi_irq_finish_setup(io);
1280 dev_info(io->dev, "Using irq %d\n", io->irq);
1da177e4
LT
1281 }
1282
1283 return rv;
1284}
1285
40112ae7 1286static int wait_for_msg_done(struct smi_info *smi_info)
1da177e4 1287{
50c812b2 1288 enum si_sm_result smi_result;
1da177e4
LT
1289
1290 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
c305e3d3 1291 for (;;) {
c3e7e791
CM
1292 if (smi_result == SI_SM_CALL_WITH_DELAY ||
1293 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
da4cd8df 1294 schedule_timeout_uninterruptible(1);
1da177e4 1295 smi_result = smi_info->handlers->event(
e21404dc 1296 smi_info->si_sm, jiffies_to_usecs(1));
c305e3d3 1297 } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
1da177e4
LT
1298 smi_result = smi_info->handlers->event(
1299 smi_info->si_sm, 0);
c305e3d3 1300 } else
1da177e4
LT
1301 break;
1302 }
40112ae7 1303 if (smi_result == SI_SM_HOSED)
c305e3d3
CM
1304 /*
1305 * We couldn't get the state machine to run, so whatever's at
1306 * the port is probably not an IPMI SMI interface.
1307 */
40112ae7
CM
1308 return -ENODEV;
1309
1310 return 0;
1311}
1312
1313static int try_get_dev_id(struct smi_info *smi_info)
1314{
1315 unsigned char msg[2];
1316 unsigned char *resp;
1317 unsigned long resp_len;
1318 int rv = 0;
1319
1320 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1321 if (!resp)
1322 return -ENOMEM;
1323
1324 /*
1325 * Do a Get Device ID command, since it comes back with some
1326 * useful info.
1327 */
1328 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1329 msg[1] = IPMI_GET_DEVICE_ID_CMD;
1330 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1331
1332 rv = wait_for_msg_done(smi_info);
1333 if (rv)
1da177e4 1334 goto out;
1da177e4 1335
1da177e4
LT
1336 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1337 resp, IPMI_MAX_MSG_LENGTH);
1da177e4 1338
d8c98618 1339 /* Check and record info from the get device id, in case we need it. */
c468f911
JK
1340 rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
1341 resp + 2, resp_len - 2, &smi_info->device_id);
1da177e4 1342
76824852 1343out:
1da177e4
LT
1344 kfree(resp);
1345 return rv;
1346}
1347
d0882897 1348static int get_global_enables(struct smi_info *smi_info, u8 *enables)
1e7d6a45
CM
1349{
1350 unsigned char msg[3];
1351 unsigned char *resp;
1352 unsigned long resp_len;
1353 int rv;
1354
1355 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
d0882897
CM
1356 if (!resp)
1357 return -ENOMEM;
1e7d6a45
CM
1358
1359 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1360 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1361 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1362
1363 rv = wait_for_msg_done(smi_info);
1364 if (rv) {
910840f2 1365 dev_warn(smi_info->io.dev,
d0882897
CM
1366 "Error getting response from get global enables command: %d\n",
1367 rv);
1e7d6a45
CM
1368 goto out;
1369 }
1370
1371 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1372 resp, IPMI_MAX_MSG_LENGTH);
1373
1374 if (resp_len < 4 ||
1375 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1376 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
1377 resp[2] != 0) {
910840f2 1378 dev_warn(smi_info->io.dev,
d0882897
CM
1379 "Invalid return from get global enables command: %ld %x %x %x\n",
1380 resp_len, resp[0], resp[1], resp[2]);
1e7d6a45
CM
1381 rv = -EINVAL;
1382 goto out;
d0882897
CM
1383 } else {
1384 *enables = resp[3];
1e7d6a45
CM
1385 }
1386
d0882897
CM
1387out:
1388 kfree(resp);
1389 return rv;
1390}
1391
1392/*
1393 * Returns 1 if it gets an error from the command.
1394 */
1395static int set_global_enables(struct smi_info *smi_info, u8 enables)
1396{
1397 unsigned char msg[3];
1398 unsigned char *resp;
1399 unsigned long resp_len;
1400 int rv;
1401
1402 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1403 if (!resp)
1404 return -ENOMEM;
1e7d6a45
CM
1405
1406 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1407 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
d0882897 1408 msg[2] = enables;
1e7d6a45
CM
1409 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1410
1411 rv = wait_for_msg_done(smi_info);
1412 if (rv) {
910840f2 1413 dev_warn(smi_info->io.dev,
d0882897
CM
1414 "Error getting response from set global enables command: %d\n",
1415 rv);
1e7d6a45
CM
1416 goto out;
1417 }
1418
1419 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1420 resp, IPMI_MAX_MSG_LENGTH);
1421
1422 if (resp_len < 3 ||
1423 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1424 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
910840f2 1425 dev_warn(smi_info->io.dev,
d0882897
CM
1426 "Invalid return from set global enables command: %ld %x %x\n",
1427 resp_len, resp[0], resp[1]);
1e7d6a45
CM
1428 rv = -EINVAL;
1429 goto out;
1430 }
1431
d0882897
CM
1432 if (resp[2] != 0)
1433 rv = 1;
1434
1435out:
1436 kfree(resp);
1437 return rv;
1438}
1439
1440/*
1441 * Some BMCs do not support clearing the receive irq bit in the global
1442 * enables (even if they don't support interrupts on the BMC). Check
1443 * for this and handle it properly.
1444 */
1445static void check_clr_rcv_irq(struct smi_info *smi_info)
1446{
1447 u8 enables = 0;
1448 int rv;
1449
1450 rv = get_global_enables(smi_info, &enables);
1451 if (!rv) {
1452 if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
1453 /* Already clear, should work ok. */
1454 return;
1455
1456 enables &= ~IPMI_BMC_RCV_MSG_INTR;
1457 rv = set_global_enables(smi_info, enables);
1458 }
1459
1460 if (rv < 0) {
910840f2 1461 dev_err(smi_info->io.dev,
d0882897
CM
1462 "Cannot check clearing the rcv irq: %d\n", rv);
1463 return;
1464 }
1465
1466 if (rv) {
1e7d6a45
CM
1467 /*
1468 * An error when setting the event buffer bit means
1469 * clearing the bit is not supported.
1470 */
910840f2 1471 dev_warn(smi_info->io.dev,
d0882897
CM
1472 "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
1473 smi_info->cannot_disable_irq = true;
1474 }
1475}
1476
1477/*
1478 * Some BMCs do not support setting the interrupt bits in the global
1479 * enables even if they support interrupts. Clearly bad, but we can
1480 * compensate.
1481 */
1482static void check_set_rcv_irq(struct smi_info *smi_info)
1483{
1484 u8 enables = 0;
1485 int rv;
1486
910840f2 1487 if (!smi_info->io.irq)
d0882897
CM
1488 return;
1489
1490 rv = get_global_enables(smi_info, &enables);
1491 if (!rv) {
1492 enables |= IPMI_BMC_RCV_MSG_INTR;
1493 rv = set_global_enables(smi_info, enables);
1494 }
1495
1496 if (rv < 0) {
910840f2 1497 dev_err(smi_info->io.dev,
d0882897
CM
1498 "Cannot check setting the rcv irq: %d\n", rv);
1499 return;
1500 }
1501
1502 if (rv) {
1503 /*
1504 * An error when setting the event buffer bit means
1505 * setting the bit is not supported.
1506 */
910840f2 1507 dev_warn(smi_info->io.dev,
d0882897
CM
1508 "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
1509 smi_info->cannot_disable_irq = true;
1510 smi_info->irq_enable_broken = true;
1e7d6a45 1511 }
1e7d6a45
CM
1512}
1513
40112ae7
CM
1514static int try_enable_event_buffer(struct smi_info *smi_info)
1515{
1516 unsigned char msg[3];
1517 unsigned char *resp;
1518 unsigned long resp_len;
1519 int rv = 0;
1520
1521 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1522 if (!resp)
1523 return -ENOMEM;
1524
1525 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1526 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1527 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1528
1529 rv = wait_for_msg_done(smi_info);
1530 if (rv) {
25880f7d 1531 pr_warn("Error getting response from get global enables command, the event buffer is not enabled\n");
40112ae7
CM
1532 goto out;
1533 }
1534
1535 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1536 resp, IPMI_MAX_MSG_LENGTH);
1537
1538 if (resp_len < 4 ||
1539 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1540 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
1541 resp[2] != 0) {
25880f7d 1542 pr_warn("Invalid return from get global enables command, cannot enable the event buffer\n");
40112ae7
CM
1543 rv = -EINVAL;
1544 goto out;
1545 }
1546
d9b7e4f7 1547 if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
40112ae7 1548 /* buffer is already enabled, nothing to do. */
d9b7e4f7 1549 smi_info->supports_event_msg_buff = true;
40112ae7 1550 goto out;
d9b7e4f7 1551 }
40112ae7
CM
1552
1553 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1554 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1555 msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
1556 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1557
1558 rv = wait_for_msg_done(smi_info);
1559 if (rv) {
25880f7d 1560 pr_warn("Error getting response from set global, enables command, the event buffer is not enabled\n");
40112ae7
CM
1561 goto out;
1562 }
1563
1564 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1565 resp, IPMI_MAX_MSG_LENGTH);
1566
1567 if (resp_len < 3 ||
1568 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1569 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
25880f7d 1570 pr_warn("Invalid return from get global, enables command, not enable the event buffer\n");
40112ae7
CM
1571 rv = -EINVAL;
1572 goto out;
1573 }
1574
1575 if (resp[2] != 0)
1576 /*
1577 * An error when setting the event buffer bit means
1578 * that the event buffer is not supported.
1579 */
1580 rv = -ENOENT;
d9b7e4f7
CM
1581 else
1582 smi_info->supports_event_msg_buff = true;
1583
76824852 1584out:
40112ae7
CM
1585 kfree(resp);
1586 return rv;
1587}
1588
3dd377b5 1589#define IPMI_SI_ATTR(name) \
93b6984b
CM
1590static ssize_t name##_show(struct device *dev, \
1591 struct device_attribute *attr, \
1592 char *buf) \
3dd377b5
CM
1593{ \
1594 struct smi_info *smi_info = dev_get_drvdata(dev); \
1595 \
1596 return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
1597} \
93b6984b 1598static DEVICE_ATTR(name, 0444, name##_show, NULL)
3dd377b5 1599
93b6984b
CM
1600static ssize_t type_show(struct device *dev,
1601 struct device_attribute *attr,
1602 char *buf)
3dd377b5
CM
1603{
1604 struct smi_info *smi_info = dev_get_drvdata(dev);
1605
1606 return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
1607}
93b6984b 1608static DEVICE_ATTR(type, 0444, type_show, NULL);
3dd377b5 1609
93b6984b
CM
1610static ssize_t interrupts_enabled_show(struct device *dev,
1611 struct device_attribute *attr,
1612 char *buf)
3dd377b5
CM
1613{
1614 struct smi_info *smi_info = dev_get_drvdata(dev);
1615 int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
1616
1617 return snprintf(buf, 10, "%d\n", enabled);
1618}
a6f4c331 1619static DEVICE_ATTR(interrupts_enabled, 0444,
93b6984b 1620 interrupts_enabled_show, NULL);
3dd377b5
CM
1621
1622IPMI_SI_ATTR(short_timeouts);
1623IPMI_SI_ATTR(long_timeouts);
1624IPMI_SI_ATTR(idles);
1625IPMI_SI_ATTR(interrupts);
1626IPMI_SI_ATTR(attentions);
1627IPMI_SI_ATTR(flag_fetches);
1628IPMI_SI_ATTR(hosed_count);
1629IPMI_SI_ATTR(complete_transactions);
1630IPMI_SI_ATTR(events);
1631IPMI_SI_ATTR(watchdog_pretimeouts);
1632IPMI_SI_ATTR(incoming_messages);
1633
93b6984b
CM
1634static ssize_t params_show(struct device *dev,
1635 struct device_attribute *attr,
1636 char *buf)
3dd377b5
CM
1637{
1638 struct smi_info *smi_info = dev_get_drvdata(dev);
1639
1640 return snprintf(buf, 200,
1641 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
1642 si_to_str[smi_info->io.si_type],
f6296bdc 1643 addr_space_to_str[smi_info->io.addr_space],
3dd377b5
CM
1644 smi_info->io.addr_data,
1645 smi_info->io.regspacing,
1646 smi_info->io.regsize,
1647 smi_info->io.regshift,
1648 smi_info->io.irq,
1649 smi_info->io.slave_addr);
1650}
93b6984b 1651static DEVICE_ATTR(params, 0444, params_show, NULL);
3dd377b5
CM
1652
1653static struct attribute *ipmi_si_dev_attrs[] = {
1654 &dev_attr_type.attr,
1655 &dev_attr_interrupts_enabled.attr,
1656 &dev_attr_short_timeouts.attr,
1657 &dev_attr_long_timeouts.attr,
1658 &dev_attr_idles.attr,
1659 &dev_attr_interrupts.attr,
1660 &dev_attr_attentions.attr,
1661 &dev_attr_flag_fetches.attr,
1662 &dev_attr_hosed_count.attr,
1663 &dev_attr_complete_transactions.attr,
1664 &dev_attr_events.attr,
1665 &dev_attr_watchdog_pretimeouts.attr,
1666 &dev_attr_incoming_messages.attr,
1667 &dev_attr_params.attr,
1668 NULL
1669};
1670
1671static const struct attribute_group ipmi_si_dev_attr_group = {
1672 .attrs = ipmi_si_dev_attrs,
1673};
1674
3ae0e0f9
CM
1675/*
1676 * oem_data_avail_to_receive_msg_avail
1677 * @info - smi_info structure with msg_flags set
1678 *
1679 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
1680 * Returns 1 indicating need to re-run handle_flags().
1681 */
1682static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
1683{
e8b33617 1684 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
c305e3d3 1685 RECEIVE_MSG_AVAIL);
3ae0e0f9
CM
1686 return 1;
1687}
1688
1689/*
1690 * setup_dell_poweredge_oem_data_handler
1691 * @info - smi_info.device_id must be populated
1692 *
1693 * Systems that match, but have firmware version < 1.40 may assert
1694 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
1695 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
1696 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
1697 * as RECEIVE_MSG_AVAIL instead.
1698 *
1699 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
1700 * assert the OEM[012] bits, and if it did, the driver would have to
1701 * change to handle that properly, we don't actually check for the
1702 * firmware version.
1703 * Device ID = 0x20 BMC on PowerEdge 8G servers
1704 * Device Revision = 0x80
1705 * Firmware Revision1 = 0x01 BMC version 1.40
1706 * Firmware Revision2 = 0x40 BCD encoded
1707 * IPMI Version = 0x51 IPMI 1.5
1708 * Manufacturer ID = A2 02 00 Dell IANA
1709 *
d5a2b89a
CM
1710 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
1711 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
1712 *
3ae0e0f9
CM
1713 */
1714#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
1715#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
1716#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
50c812b2 1717#define DELL_IANA_MFR_ID 0x0002a2
3ae0e0f9
CM
1718static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
1719{
1720 struct ipmi_device_id *id = &smi_info->device_id;
50c812b2 1721 if (id->manufacturer_id == DELL_IANA_MFR_ID) {
d5a2b89a
CM
1722 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
1723 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
50c812b2 1724 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
d5a2b89a
CM
1725 smi_info->oem_data_avail_handler =
1726 oem_data_avail_to_receive_msg_avail;
c305e3d3
CM
1727 } else if (ipmi_version_major(id) < 1 ||
1728 (ipmi_version_major(id) == 1 &&
1729 ipmi_version_minor(id) < 5)) {
d5a2b89a
CM
1730 smi_info->oem_data_avail_handler =
1731 oem_data_avail_to_receive_msg_avail;
1732 }
3ae0e0f9
CM
1733 }
1734}
1735
ea94027b
CM
1736#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
1737static void return_hosed_msg_badsize(struct smi_info *smi_info)
1738{
1739 struct ipmi_smi_msg *msg = smi_info->curr_msg;
1740
25985edc 1741 /* Make it a response */
ea94027b
CM
1742 msg->rsp[0] = msg->data[0] | 4;
1743 msg->rsp[1] = msg->data[1];
1744 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
1745 msg->rsp_size = 3;
1746 smi_info->curr_msg = NULL;
1747 deliver_recv_msg(smi_info, msg);
1748}
1749
1750/*
1751 * dell_poweredge_bt_xaction_handler
1752 * @info - smi_info.device_id must be populated
1753 *
1754 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
1755 * not respond to a Get SDR command if the length of the data
1756 * requested is exactly 0x3A, which leads to command timeouts and no
1757 * data returned. This intercepts such commands, and causes userspace
1758 * callers to try again with a different-sized buffer, which succeeds.
1759 */
1760
1761#define STORAGE_NETFN 0x0A
1762#define STORAGE_CMD_GET_SDR 0x23
1763static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
1764 unsigned long unused,
1765 void *in)
1766{
1767 struct smi_info *smi_info = in;
1768 unsigned char *data = smi_info->curr_msg->data;
1769 unsigned int size = smi_info->curr_msg->data_size;
1770 if (size >= 8 &&
1771 (data[0]>>2) == STORAGE_NETFN &&
1772 data[1] == STORAGE_CMD_GET_SDR &&
1773 data[7] == 0x3A) {
1774 return_hosed_msg_badsize(smi_info);
1775 return NOTIFY_STOP;
1776 }
1777 return NOTIFY_DONE;
1778}
1779
1780static struct notifier_block dell_poweredge_bt_xaction_notifier = {
1781 .notifier_call = dell_poweredge_bt_xaction_handler,
1782};
1783
1784/*
1785 * setup_dell_poweredge_bt_xaction_handler
1786 * @info - smi_info.device_id must be filled in already
1787 *
1788 * Fills in smi_info.device_id.start_transaction_pre_hook
1789 * when we know what function to use there.
1790 */
1791static void
1792setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
1793{
1794 struct ipmi_device_id *id = &smi_info->device_id;
50c812b2 1795 if (id->manufacturer_id == DELL_IANA_MFR_ID &&
910840f2 1796 smi_info->io.si_type == SI_BT)
ea94027b
CM
1797 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
1798}
1799
3ae0e0f9
CM
1800/*
1801 * setup_oem_data_handler
1802 * @info - smi_info.device_id must be filled in already
1803 *
1804 * Fills in smi_info.device_id.oem_data_available_handler
1805 * when we know what function to use there.
1806 */
1807
1808static void setup_oem_data_handler(struct smi_info *smi_info)
1809{
1810 setup_dell_poweredge_oem_data_handler(smi_info);
1811}
1812
ea94027b
CM
1813static void setup_xaction_handlers(struct smi_info *smi_info)
1814{
1815 setup_dell_poweredge_bt_xaction_handler(smi_info);
1816}
1817
d0882897
CM
1818static void check_for_broken_irqs(struct smi_info *smi_info)
1819{
1820 check_clr_rcv_irq(smi_info);
1821 check_set_rcv_irq(smi_info);
1822}
1823
4f7f5551 1824static inline void stop_timer_and_thread(struct smi_info *smi_info)
a9a2c44f 1825{
bd1c06a4 1826 if (smi_info->thread != NULL) {
b874b985 1827 kthread_stop(smi_info->thread);
bd1c06a4
MY
1828 smi_info->thread = NULL;
1829 }
4f7f5551
MY
1830
1831 smi_info->timer_can_start = false;
c9acc3c4 1832 del_timer_sync(&smi_info->si_timer);
a9a2c44f
CM
1833}
1834
7e030d6d 1835static struct smi_info *find_dup_si(struct smi_info *info)
1da177e4 1836{
b0defcdb 1837 struct smi_info *e;
1da177e4 1838
b0defcdb 1839 list_for_each_entry(e, &smi_infos, link) {
f6296bdc 1840 if (e->io.addr_space != info->io.addr_space)
b0defcdb 1841 continue;
94671710
CM
1842 if (e->io.addr_data == info->io.addr_data) {
1843 /*
1844 * This is a cheap hack, ACPI doesn't have a defined
1845 * slave address but SMBIOS does. Pick it up from
1846 * any source that has it available.
1847 */
910840f2
CM
1848 if (info->io.slave_addr && !e->io.slave_addr)
1849 e->io.slave_addr = info->io.slave_addr;
7e030d6d 1850 return e;
94671710 1851 }
b0defcdb 1852 }
1da177e4 1853
7e030d6d 1854 return NULL;
b0defcdb 1855}
1da177e4 1856
bb398a4c 1857int ipmi_si_add_smi(struct si_sm_io *io)
b0defcdb 1858{
2407d77a 1859 int rv = 0;
bb398a4c 1860 struct smi_info *new_smi, *dup;
b0defcdb 1861
41b766d6
CM
1862 /*
1863 * If the user gave us a hard-coded device at the same
1864 * address, they presumably want us to use it and not what is
1865 * in the firmware.
1866 */
3bb8ea40 1867 if (io->addr_source != SI_HARDCODED && io->addr_source != SI_HOTMOD &&
f6296bdc 1868 ipmi_si_hardcode_match(io->addr_space, io->addr_data)) {
41b766d6
CM
1869 dev_info(io->dev,
1870 "Hard-coded device at this address already exists");
1871 return -ENODEV;
1872 }
1873
bb398a4c 1874 if (!io->io_setup) {
f6296bdc 1875 if (io->addr_space == IPMI_IO_ADDR_SPACE) {
58e27635 1876 io->io_setup = ipmi_si_port_setup;
f6296bdc 1877 } else if (io->addr_space == IPMI_MEM_ADDR_SPACE) {
58e27635 1878 io->io_setup = ipmi_si_mem_setup;
e1eeb7f8
CM
1879 } else {
1880 return -EINVAL;
1881 }
1882 }
1883
67f4fb02 1884 new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL);
bb398a4c
CM
1885 if (!new_smi)
1886 return -ENOMEM;
67f4fb02 1887 spin_lock_init(&new_smi->si_lock);
bb398a4c
CM
1888
1889 new_smi->io = *io;
1890
d6dfd131 1891 mutex_lock(&smi_infos_lock);
7e030d6d
CM
1892 dup = find_dup_si(new_smi);
1893 if (dup) {
910840f2
CM
1894 if (new_smi->io.addr_source == SI_ACPI &&
1895 dup->io.addr_source == SI_SMBIOS) {
7e030d6d 1896 /* We prefer ACPI over SMBIOS. */
910840f2 1897 dev_info(dup->io.dev,
7e030d6d 1898 "Removing SMBIOS-specified %s state machine in favor of ACPI\n",
910840f2 1899 si_to_str[new_smi->io.si_type]);
7e030d6d
CM
1900 cleanup_one_si(dup);
1901 } else {
910840f2 1902 dev_info(new_smi->io.dev,
7e030d6d 1903 "%s-specified %s state machine: duplicate\n",
910840f2
CM
1904 ipmi_addr_src_to_str(new_smi->io.addr_source),
1905 si_to_str[new_smi->io.si_type]);
7e030d6d 1906 rv = -EBUSY;
c0a32fe1 1907 kfree(new_smi);
7e030d6d
CM
1908 goto out_err;
1909 }
b0defcdb 1910 }
1da177e4 1911
25880f7d 1912 pr_info("Adding %s-specified %s state machine\n",
910840f2
CM
1913 ipmi_addr_src_to_str(new_smi->io.addr_source),
1914 si_to_str[new_smi->io.si_type]);
2407d77a 1915
2407d77a
MG
1916 list_add_tail(&new_smi->link, &smi_infos);
1917
93c303d2 1918 if (initialized)
bb398a4c 1919 rv = try_smi_init(new_smi);
2407d77a
MG
1920out_err:
1921 mutex_unlock(&smi_infos_lock);
1922 return rv;
1923}
1924
3f724c40
TC
1925/*
1926 * Try to start up an interface. Must be called with smi_infos_lock
1927 * held, primarily to keep smi_num consistent, we only one to do these
1928 * one at a time.
1929 */
2407d77a
MG
1930static int try_smi_init(struct smi_info *new_smi)
1931{
1932 int rv = 0;
1933 int i;
1934
25880f7d 1935 pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
910840f2
CM
1936 ipmi_addr_src_to_str(new_smi->io.addr_source),
1937 si_to_str[new_smi->io.si_type],
f6296bdc 1938 addr_space_to_str[new_smi->io.addr_space],
bb2a08c0 1939 new_smi->io.addr_data,
910840f2 1940 new_smi->io.slave_addr, new_smi->io.irq);
2407d77a 1941
910840f2 1942 switch (new_smi->io.si_type) {
b0defcdb 1943 case SI_KCS:
1da177e4 1944 new_smi->handlers = &kcs_smi_handlers;
b0defcdb
CM
1945 break;
1946
1947 case SI_SMIC:
1da177e4 1948 new_smi->handlers = &smic_smi_handlers;
b0defcdb
CM
1949 break;
1950
1951 case SI_BT:
1da177e4 1952 new_smi->handlers = &bt_smi_handlers;
b0defcdb
CM
1953 break;
1954
1955 default:
1da177e4
LT
1956 /* No support for anything else yet. */
1957 rv = -EIO;
1958 goto out_err;
1959 }
1960
57bccb4e 1961 new_smi->si_num = smi_num;
3f724c40 1962
1abf71ee 1963 /* Do this early so it's available for logs. */
910840f2 1964 if (!new_smi->io.dev) {
90b2d4f1
CM
1965 pr_err("IPMI interface added with no device\n");
1966 rv = EIO;
1967 goto out_err;
1abf71ee
CM
1968 }
1969
1da177e4
LT
1970 /* Allocate the state machine's data and initialize it. */
1971 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
b0defcdb 1972 if (!new_smi->si_sm) {
1da177e4
LT
1973 rv = -ENOMEM;
1974 goto out_err;
1975 }
e1eeb7f8
CM
1976 new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
1977 &new_smi->io);
1da177e4
LT
1978
1979 /* Now that we know the I/O size, we can set up the I/O. */
e1eeb7f8 1980 rv = new_smi->io.io_setup(&new_smi->io);
1da177e4 1981 if (rv) {
910840f2 1982 dev_err(new_smi->io.dev, "Could not set up I/O space\n");
1da177e4
LT
1983 goto out_err;
1984 }
1985
1da177e4
LT
1986 /* Do low-level detection first. */
1987 if (new_smi->handlers->detect(new_smi->si_sm)) {
910840f2
CM
1988 if (new_smi->io.addr_source)
1989 dev_err(new_smi->io.dev,
1990 "Interface detection failed\n");
1da177e4
LT
1991 rv = -ENODEV;
1992 goto out_err;
1993 }
1994
c305e3d3
CM
1995 /*
1996 * Attempt a get device id command. If it fails, we probably
1997 * don't have a BMC here.
1998 */
1da177e4 1999 rv = try_get_dev_id(new_smi);
b0defcdb 2000 if (rv) {
910840f2
CM
2001 if (new_smi->io.addr_source)
2002 dev_err(new_smi->io.dev,
2003 "There appears to be no BMC at this location\n");
1da177e4 2004 goto out_err;
b0defcdb 2005 }
1da177e4 2006
3ae0e0f9 2007 setup_oem_data_handler(new_smi);
ea94027b 2008 setup_xaction_handlers(new_smi);
d0882897 2009 check_for_broken_irqs(new_smi);
3ae0e0f9 2010
b874b985 2011 new_smi->waiting_msg = NULL;
1da177e4
LT
2012 new_smi->curr_msg = NULL;
2013 atomic_set(&new_smi->req_events, 0);
7aefac26 2014 new_smi->run_to_completion = false;
64959e2d
CM
2015 for (i = 0; i < SI_NUM_STATS; i++)
2016 atomic_set(&new_smi->stats[i], 0);
1da177e4 2017
7aefac26 2018 new_smi->interrupt_disabled = true;
89986496 2019 atomic_set(&new_smi->need_watch, 0);
1da177e4 2020
40112ae7
CM
2021 rv = try_enable_event_buffer(new_smi);
2022 if (rv == 0)
7aefac26 2023 new_smi->has_event_buffer = true;
40112ae7 2024
c305e3d3
CM
2025 /*
2026 * Start clearing the flags before we enable interrupts or the
2027 * timer to avoid racing with the timer.
2028 */
4f7f5551 2029 start_clear_flags(new_smi);
d9b7e4f7
CM
2030
2031 /*
2032 * IRQ is defined to be set when non-zero. req_events will
2033 * cause a global flags check that will enable interrupts.
2034 */
910840f2 2035 if (new_smi->io.irq) {
d9b7e4f7
CM
2036 new_smi->interrupt_disabled = false;
2037 atomic_set(&new_smi->req_events, 1);
2038 }
1da177e4 2039
3dd377b5
CM
2040 dev_set_drvdata(new_smi->io.dev, new_smi);
2041 rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
2042 if (rv) {
2043 dev_err(new_smi->io.dev,
2044 "Unable to add device attributes: error %d\n",
2045 rv);
71404a2f 2046 goto out_err;
3dd377b5 2047 }
cc095f0a 2048 new_smi->dev_group_added = true;
3dd377b5 2049
1da177e4
LT
2050 rv = ipmi_register_smi(&handlers,
2051 new_smi,
910840f2
CM
2052 new_smi->io.dev,
2053 new_smi->io.slave_addr);
1da177e4 2054 if (rv) {
910840f2
CM
2055 dev_err(new_smi->io.dev,
2056 "Unable to register device: error %d\n",
279fbd0c 2057 rv);
71404a2f 2058 goto out_err;
1da177e4
LT
2059 }
2060
3f724c40
TC
2061 /* Don't increment till we know we have succeeded. */
2062 smi_num++;
2063
910840f2
CM
2064 dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
2065 si_to_str[new_smi->io.si_type]);
1da177e4 2066
910840f2 2067 WARN_ON(new_smi->io.dev->init_name != NULL);
b0defcdb 2068
2512e40e 2069 out_err:
401e7e88
YY
2070 if (rv && new_smi->io.io_cleanup) {
2071 new_smi->io.io_cleanup(&new_smi->io);
2072 new_smi->io.io_cleanup = NULL;
2073 }
2074
1da177e4
LT
2075 return rv;
2076}
2077
41b766d6 2078static int __init init_ipmi_si(void)
1da177e4 2079{
2407d77a 2080 struct smi_info *e;
06ee4594 2081 enum ipmi_addr_src type = SI_INVALID;
1da177e4
LT
2082
2083 if (initialized)
2084 return 0;
1da177e4 2085
41b766d6 2086 ipmi_hardcode_init();
1da177e4 2087
41b766d6 2088 pr_info("IPMI System Interface driver\n");
d8cc5267 2089
9d70029e
CM
2090 ipmi_si_platform_init();
2091
13d0b35c 2092 ipmi_si_pci_init();
b0defcdb 2093
c6f85a75 2094 ipmi_si_parisc_init();
fdbeb7de 2095
06ee4594
MG
2096 /* We prefer devices with interrupts, but in the case of a machine
2097 with multiple BMCs we assume that there will be several instances
2098 of a given type so if we succeed in registering a type then also
2099 try to register everything else of the same type */
2407d77a
MG
2100 mutex_lock(&smi_infos_lock);
2101 list_for_each_entry(e, &smi_infos, link) {
06ee4594
MG
2102 /* Try to register a device if it has an IRQ and we either
2103 haven't successfully registered a device yet or this
2104 device has the same type as one we successfully registered */
910840f2 2105 if (e->io.irq && (!type || e->io.addr_source == type)) {
d8cc5267 2106 if (!try_smi_init(e)) {
910840f2 2107 type = e->io.addr_source;
d8cc5267
MG
2108 }
2109 }
2110 }
2111
06ee4594 2112 /* type will only have been set if we successfully registered an si */
bb398a4c
CM
2113 if (type)
2114 goto skip_fallback_noirq;
06ee4594 2115
d8cc5267
MG
2116 /* Fall back to the preferred device */
2117
2118 list_for_each_entry(e, &smi_infos, link) {
910840f2 2119 if (!e->io.irq && (!type || e->io.addr_source == type)) {
d8cc5267 2120 if (!try_smi_init(e)) {
910840f2 2121 type = e->io.addr_source;
d8cc5267
MG
2122 }
2123 }
2407d77a 2124 }
bb398a4c
CM
2125
2126skip_fallback_noirq:
dd7450ca 2127 initialized = true;
2407d77a
MG
2128 mutex_unlock(&smi_infos_lock);
2129
06ee4594
MG
2130 if (type)
2131 return 0;
2132
d6dfd131 2133 mutex_lock(&smi_infos_lock);
b361e27b 2134 if (unload_when_empty && list_empty(&smi_infos)) {
d6dfd131 2135 mutex_unlock(&smi_infos_lock);
d2478521 2136 cleanup_ipmi_si();
25880f7d 2137 pr_warn("Unable to find any System Interface(s)\n");
1da177e4 2138 return -ENODEV;
b0defcdb 2139 } else {
d6dfd131 2140 mutex_unlock(&smi_infos_lock);
b0defcdb 2141 return 0;
1da177e4 2142 }
1da177e4
LT
2143}
2144module_init(init_ipmi_si);
2145
7960f18a 2146static void shutdown_smi(void *send_info)
1da177e4 2147{
7960f18a 2148 struct smi_info *smi_info = send_info;
b874b985 2149
71404a2f
CM
2150 if (smi_info->dev_group_added) {
2151 device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group);
2152 smi_info->dev_group_added = false;
2153 }
2154 if (smi_info->io.dev)
2155 dev_set_drvdata(smi_info->io.dev, NULL);
b0defcdb 2156
c305e3d3 2157 /*
b874b985
CM
2158 * Make sure that interrupts, the timer and the thread are
2159 * stopped and will not run again.
c305e3d3 2160 */
71404a2f
CM
2161 smi_info->interrupt_disabled = true;
2162 if (smi_info->io.irq_cleanup) {
2163 smi_info->io.irq_cleanup(&smi_info->io);
2164 smi_info->io.irq_cleanup = NULL;
2165 }
2166 stop_timer_and_thread(smi_info);
2167
2168 /*
2169 * Wait until we know that we are out of any interrupt
2170 * handlers might have been running before we freed the
2171 * interrupt.
2172 */
17c0eb74 2173 synchronize_rcu();
1da177e4 2174
c305e3d3
CM
2175 /*
2176 * Timeouts are stopped, now make sure the interrupts are off
b874b985
CM
2177 * in the BMC. Note that timers and CPU interrupts are off,
2178 * so no need for locks.
c305e3d3 2179 */
71404a2f
CM
2180 while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
2181 poll(smi_info);
ee6cd5f8 2182 schedule_timeout_uninterruptible(1);
ee6cd5f8 2183 }
71404a2f
CM
2184 if (smi_info->handlers)
2185 disable_si_irq(smi_info);
2186 while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
2187 poll(smi_info);
da4cd8df 2188 schedule_timeout_uninterruptible(1);
1da177e4 2189 }
71404a2f
CM
2190 if (smi_info->handlers)
2191 smi_info->handlers->cleanup(smi_info->si_sm);
2192
2193 if (smi_info->io.addr_source_cleanup) {
2194 smi_info->io.addr_source_cleanup(&smi_info->io);
2195 smi_info->io.addr_source_cleanup = NULL;
2196 }
2197 if (smi_info->io.io_cleanup) {
2198 smi_info->io.io_cleanup(&smi_info->io);
2199 smi_info->io.io_cleanup = NULL;
2200 }
1da177e4 2201
71404a2f
CM
2202 kfree(smi_info->si_sm);
2203 smi_info->si_sm = NULL;
2512e40e
CM
2204
2205 smi_info->intf = NULL;
71404a2f
CM
2206}
2207
93c303d2
CM
2208/*
2209 * Must be called with smi_infos_lock held, to serialize the
2210 * smi_info->intf check.
2211 */
71404a2f
CM
2212static void cleanup_one_si(struct smi_info *smi_info)
2213{
2214 if (!smi_info)
2215 return;
1da177e4 2216
71404a2f 2217 list_del(&smi_info->link);
1da177e4 2218
2512e40e 2219 if (smi_info->intf)
93c303d2 2220 ipmi_unregister_smi(smi_info->intf);
50c812b2 2221
71404a2f 2222 kfree(smi_info);
1da177e4
LT
2223}
2224
bb398a4c
CM
2225int ipmi_si_remove_by_dev(struct device *dev)
2226{
2227 struct smi_info *e;
2228 int rv = -ENOENT;
2229
2230 mutex_lock(&smi_infos_lock);
2231 list_for_each_entry(e, &smi_infos, link) {
2232 if (e->io.dev == dev) {
2233 cleanup_one_si(e);
2234 rv = 0;
2235 break;
2236 }
2237 }
2238 mutex_unlock(&smi_infos_lock);
2239
2240 return rv;
2241}
2242
bdb57b7b
CM
2243struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
2244 unsigned long addr)
44814ec9
CM
2245{
2246 /* remove */
2247 struct smi_info *e, *tmp_e;
bdb57b7b 2248 struct device *dev = NULL;
44814ec9
CM
2249
2250 mutex_lock(&smi_infos_lock);
2251 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
f6296bdc 2252 if (e->io.addr_space != addr_space)
44814ec9
CM
2253 continue;
2254 if (e->io.si_type != si_type)
2255 continue;
bdb57b7b
CM
2256 if (e->io.addr_data == addr) {
2257 dev = get_device(e->io.dev);
44814ec9 2258 cleanup_one_si(e);
bdb57b7b 2259 }
44814ec9
CM
2260 }
2261 mutex_unlock(&smi_infos_lock);
bdb57b7b
CM
2262
2263 return dev;
44814ec9
CM
2264}
2265
0dcf334c 2266static void cleanup_ipmi_si(void)
1da177e4 2267{
b0defcdb 2268 struct smi_info *e, *tmp_e;
1da177e4 2269
b0defcdb 2270 if (!initialized)
1da177e4
LT
2271 return;
2272
13d0b35c 2273 ipmi_si_pci_shutdown();
c6f85a75
CM
2274
2275 ipmi_si_parisc_shutdown();
b0defcdb 2276
9d70029e 2277 ipmi_si_platform_shutdown();
dba9b4f6 2278
d6dfd131 2279 mutex_lock(&smi_infos_lock);
b0defcdb
CM
2280 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2281 cleanup_one_si(e);
d6dfd131 2282 mutex_unlock(&smi_infos_lock);
41b766d6
CM
2283
2284 ipmi_si_hardcode_exit();
bdb57b7b 2285 ipmi_si_hotmod_exit();
1da177e4
LT
2286}
2287module_exit(cleanup_ipmi_si);
2288
0944d889 2289MODULE_ALIAS("platform:dmi-ipmi-si");
1da177e4 2290MODULE_LICENSE("GPL");
1fdd75bd 2291MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
c305e3d3
CM
2292MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
2293 " system interfaces.");