[PATCH] irq-flags: drivers/char: Use the new IRQF_ constants
[linux-block.git] / drivers / char / ipmi / ipmi_si_intf.c
CommitLineData
1da177e4
LT
1/*
2 * ipmi_si.c
3 *
4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5 * BT).
6 *
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
9 * source@mvista.com
10 *
11 * Copyright 2002 MontaVista Software Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 *
19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * You should have received a copy of the GNU General Public License along
31 * with this program; if not, write to the Free Software Foundation, Inc.,
32 * 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35/*
36 * This file holds the "policy" for the interface to the SMI state
37 * machine. It does the configuration, handles timers and interrupts,
38 * and drives the real SMI state machine.
39 */
40
1da177e4
LT
41#include <linux/module.h>
42#include <linux/moduleparam.h>
43#include <asm/system.h>
44#include <linux/sched.h>
45#include <linux/timer.h>
46#include <linux/errno.h>
47#include <linux/spinlock.h>
48#include <linux/slab.h>
49#include <linux/delay.h>
50#include <linux/list.h>
51#include <linux/pci.h>
52#include <linux/ioport.h>
ea94027b 53#include <linux/notifier.h>
b0defcdb 54#include <linux/mutex.h>
e9a705a0 55#include <linux/kthread.h>
1da177e4 56#include <asm/irq.h>
1da177e4
LT
57#include <linux/interrupt.h>
58#include <linux/rcupdate.h>
59#include <linux/ipmi_smi.h>
60#include <asm/io.h>
61#include "ipmi_si_sm.h"
62#include <linux/init.h>
b224cd3a 63#include <linux/dmi.h>
1da177e4
LT
64
65/* Measure times between events in the driver. */
66#undef DEBUG_TIMING
67
68/* Call every 10 ms. */
69#define SI_TIMEOUT_TIME_USEC 10000
70#define SI_USEC_PER_JIFFY (1000000/HZ)
71#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
72#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
73 short timeout */
74
75enum si_intf_state {
76 SI_NORMAL,
77 SI_GETTING_FLAGS,
78 SI_GETTING_EVENTS,
79 SI_CLEARING_FLAGS,
80 SI_CLEARING_FLAGS_THEN_SET_IRQ,
81 SI_GETTING_MESSAGES,
82 SI_ENABLE_INTERRUPTS1,
83 SI_ENABLE_INTERRUPTS2
84 /* FIXME - add watchdog stuff. */
85};
86
9dbf68f9
CM
87/* Some BT-specific defines we need here. */
88#define IPMI_BT_INTMASK_REG 2
89#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
90#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
91
1da177e4
LT
92enum si_type {
93 SI_KCS, SI_SMIC, SI_BT
94};
b0defcdb 95static char *si_to_str[] = { "KCS", "SMIC", "BT" };
1da177e4 96
50c812b2
CM
97#define DEVICE_NAME "ipmi_si"
98
99static struct device_driver ipmi_driver =
100{
101 .name = DEVICE_NAME,
102 .bus = &platform_bus_type
103};
3ae0e0f9 104
1da177e4
LT
105struct smi_info
106{
a9a2c44f 107 int intf_num;
1da177e4
LT
108 ipmi_smi_t intf;
109 struct si_sm_data *si_sm;
110 struct si_sm_handlers *handlers;
111 enum si_type si_type;
112 spinlock_t si_lock;
113 spinlock_t msg_lock;
114 struct list_head xmit_msgs;
115 struct list_head hp_xmit_msgs;
116 struct ipmi_smi_msg *curr_msg;
117 enum si_intf_state si_state;
118
119 /* Used to handle the various types of I/O that can occur with
120 IPMI */
121 struct si_sm_io io;
122 int (*io_setup)(struct smi_info *info);
123 void (*io_cleanup)(struct smi_info *info);
124 int (*irq_setup)(struct smi_info *info);
125 void (*irq_cleanup)(struct smi_info *info);
126 unsigned int io_size;
b0defcdb
CM
127 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
128 void (*addr_source_cleanup)(struct smi_info *info);
129 void *addr_source_data;
1da177e4 130
3ae0e0f9
CM
131 /* Per-OEM handler, called from handle_flags().
132 Returns 1 when handle_flags() needs to be re-run
133 or 0 indicating it set si_state itself.
134 */
135 int (*oem_data_avail_handler)(struct smi_info *smi_info);
136
1da177e4
LT
137 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
138 is set to hold the flags until we are done handling everything
139 from the flags. */
140#define RECEIVE_MSG_AVAIL 0x01
141#define EVENT_MSG_BUFFER_FULL 0x02
142#define WDT_PRE_TIMEOUT_INT 0x08
3ae0e0f9
CM
143#define OEM0_DATA_AVAIL 0x20
144#define OEM1_DATA_AVAIL 0x40
145#define OEM2_DATA_AVAIL 0x80
146#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
147 OEM1_DATA_AVAIL | \
148 OEM2_DATA_AVAIL)
1da177e4
LT
149 unsigned char msg_flags;
150
151 /* If set to true, this will request events the next time the
152 state machine is idle. */
153 atomic_t req_events;
154
155 /* If true, run the state machine to completion on every send
156 call. Generally used after a panic to make sure stuff goes
157 out. */
158 int run_to_completion;
159
160 /* The I/O port of an SI interface. */
161 int port;
162
163 /* The space between start addresses of the two ports. For
164 instance, if the first port is 0xca2 and the spacing is 4, then
165 the second port is 0xca6. */
166 unsigned int spacing;
167
168 /* zero if no irq; */
169 int irq;
170
171 /* The timer for this si. */
172 struct timer_list si_timer;
173
174 /* The time (in jiffies) the last timeout occurred at. */
175 unsigned long last_timeout_jiffies;
176
177 /* Used to gracefully stop the timer without race conditions. */
a9a2c44f 178 atomic_t stop_operation;
1da177e4
LT
179
180 /* The driver will disable interrupts when it gets into a
181 situation where it cannot handle messages due to lack of
182 memory. Once that situation clears up, it will re-enable
183 interrupts. */
184 int interrupt_disabled;
185
50c812b2 186 /* From the get device id response... */
3ae0e0f9 187 struct ipmi_device_id device_id;
1da177e4 188
50c812b2
CM
189 /* Driver model stuff. */
190 struct device *dev;
191 struct platform_device *pdev;
192
193 /* True if we allocated the device, false if it came from
194 * someplace else (like PCI). */
195 int dev_registered;
196
1da177e4
LT
197 /* Slave address, could be reported from DMI. */
198 unsigned char slave_addr;
199
200 /* Counters and things for the proc filesystem. */
201 spinlock_t count_lock;
202 unsigned long short_timeouts;
203 unsigned long long_timeouts;
204 unsigned long timeout_restarts;
205 unsigned long idles;
206 unsigned long interrupts;
207 unsigned long attentions;
208 unsigned long flag_fetches;
209 unsigned long hosed_count;
210 unsigned long complete_transactions;
211 unsigned long events;
212 unsigned long watchdog_pretimeouts;
213 unsigned long incoming_messages;
a9a2c44f 214
e9a705a0 215 struct task_struct *thread;
b0defcdb
CM
216
217 struct list_head link;
1da177e4
LT
218};
219
b0defcdb
CM
220static int try_smi_init(struct smi_info *smi);
221
e041c683 222static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
ea94027b
CM
223static int register_xaction_notifier(struct notifier_block * nb)
224{
e041c683 225 return atomic_notifier_chain_register(&xaction_notifier_list, nb);
ea94027b
CM
226}
227
1da177e4
LT
228static void deliver_recv_msg(struct smi_info *smi_info,
229 struct ipmi_smi_msg *msg)
230{
231 /* Deliver the message to the upper layer with the lock
232 released. */
233 spin_unlock(&(smi_info->si_lock));
234 ipmi_smi_msg_received(smi_info->intf, msg);
235 spin_lock(&(smi_info->si_lock));
236}
237
238static void return_hosed_msg(struct smi_info *smi_info)
239{
240 struct ipmi_smi_msg *msg = smi_info->curr_msg;
241
242 /* Make it a reponse */
243 msg->rsp[0] = msg->data[0] | 4;
244 msg->rsp[1] = msg->data[1];
245 msg->rsp[2] = 0xFF; /* Unknown error. */
246 msg->rsp_size = 3;
247
248 smi_info->curr_msg = NULL;
249 deliver_recv_msg(smi_info, msg);
250}
251
252static enum si_sm_result start_next_msg(struct smi_info *smi_info)
253{
254 int rv;
255 struct list_head *entry = NULL;
256#ifdef DEBUG_TIMING
257 struct timeval t;
258#endif
259
260 /* No need to save flags, we aleady have interrupts off and we
261 already hold the SMI lock. */
262 spin_lock(&(smi_info->msg_lock));
263
264 /* Pick the high priority queue first. */
b0defcdb 265 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
1da177e4 266 entry = smi_info->hp_xmit_msgs.next;
b0defcdb 267 } else if (!list_empty(&(smi_info->xmit_msgs))) {
1da177e4
LT
268 entry = smi_info->xmit_msgs.next;
269 }
270
b0defcdb 271 if (!entry) {
1da177e4
LT
272 smi_info->curr_msg = NULL;
273 rv = SI_SM_IDLE;
274 } else {
275 int err;
276
277 list_del(entry);
278 smi_info->curr_msg = list_entry(entry,
279 struct ipmi_smi_msg,
280 link);
281#ifdef DEBUG_TIMING
282 do_gettimeofday(&t);
283 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
284#endif
e041c683
AS
285 err = atomic_notifier_call_chain(&xaction_notifier_list,
286 0, smi_info);
ea94027b
CM
287 if (err & NOTIFY_STOP_MASK) {
288 rv = SI_SM_CALL_WITHOUT_DELAY;
289 goto out;
290 }
1da177e4
LT
291 err = smi_info->handlers->start_transaction(
292 smi_info->si_sm,
293 smi_info->curr_msg->data,
294 smi_info->curr_msg->data_size);
295 if (err) {
296 return_hosed_msg(smi_info);
297 }
298
299 rv = SI_SM_CALL_WITHOUT_DELAY;
300 }
ea94027b 301 out:
1da177e4
LT
302 spin_unlock(&(smi_info->msg_lock));
303
304 return rv;
305}
306
307static void start_enable_irq(struct smi_info *smi_info)
308{
309 unsigned char msg[2];
310
311 /* If we are enabling interrupts, we have to tell the
312 BMC to use them. */
313 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
314 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
315
316 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
317 smi_info->si_state = SI_ENABLE_INTERRUPTS1;
318}
319
320static void start_clear_flags(struct smi_info *smi_info)
321{
322 unsigned char msg[3];
323
324 /* Make sure the watchdog pre-timeout flag is not set at startup. */
325 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
326 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
327 msg[2] = WDT_PRE_TIMEOUT_INT;
328
329 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
330 smi_info->si_state = SI_CLEARING_FLAGS;
331}
332
333/* When we have a situtaion where we run out of memory and cannot
334 allocate messages, we just leave them in the BMC and run the system
335 polled until we can allocate some memory. Once we have some
336 memory, we will re-enable the interrupt. */
337static inline void disable_si_irq(struct smi_info *smi_info)
338{
b0defcdb 339 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1da177e4
LT
340 disable_irq_nosync(smi_info->irq);
341 smi_info->interrupt_disabled = 1;
342 }
343}
344
345static inline void enable_si_irq(struct smi_info *smi_info)
346{
347 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
348 enable_irq(smi_info->irq);
349 smi_info->interrupt_disabled = 0;
350 }
351}
352
353static void handle_flags(struct smi_info *smi_info)
354{
3ae0e0f9 355 retry:
1da177e4
LT
356 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
357 /* Watchdog pre-timeout */
358 spin_lock(&smi_info->count_lock);
359 smi_info->watchdog_pretimeouts++;
360 spin_unlock(&smi_info->count_lock);
361
362 start_clear_flags(smi_info);
363 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
364 spin_unlock(&(smi_info->si_lock));
365 ipmi_smi_watchdog_pretimeout(smi_info->intf);
366 spin_lock(&(smi_info->si_lock));
367 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
368 /* Messages available. */
369 smi_info->curr_msg = ipmi_alloc_smi_msg();
b0defcdb 370 if (!smi_info->curr_msg) {
1da177e4
LT
371 disable_si_irq(smi_info);
372 smi_info->si_state = SI_NORMAL;
373 return;
374 }
375 enable_si_irq(smi_info);
376
377 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
378 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
379 smi_info->curr_msg->data_size = 2;
380
381 smi_info->handlers->start_transaction(
382 smi_info->si_sm,
383 smi_info->curr_msg->data,
384 smi_info->curr_msg->data_size);
385 smi_info->si_state = SI_GETTING_MESSAGES;
386 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
387 /* Events available. */
388 smi_info->curr_msg = ipmi_alloc_smi_msg();
b0defcdb 389 if (!smi_info->curr_msg) {
1da177e4
LT
390 disable_si_irq(smi_info);
391 smi_info->si_state = SI_NORMAL;
392 return;
393 }
394 enable_si_irq(smi_info);
395
396 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
397 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
398 smi_info->curr_msg->data_size = 2;
399
400 smi_info->handlers->start_transaction(
401 smi_info->si_sm,
402 smi_info->curr_msg->data,
403 smi_info->curr_msg->data_size);
404 smi_info->si_state = SI_GETTING_EVENTS;
3ae0e0f9
CM
405 } else if (smi_info->msg_flags & OEM_DATA_AVAIL) {
406 if (smi_info->oem_data_avail_handler)
407 if (smi_info->oem_data_avail_handler(smi_info))
408 goto retry;
1da177e4
LT
409 } else {
410 smi_info->si_state = SI_NORMAL;
411 }
412}
413
414static void handle_transaction_done(struct smi_info *smi_info)
415{
416 struct ipmi_smi_msg *msg;
417#ifdef DEBUG_TIMING
418 struct timeval t;
419
420 do_gettimeofday(&t);
421 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
422#endif
423 switch (smi_info->si_state) {
424 case SI_NORMAL:
b0defcdb 425 if (!smi_info->curr_msg)
1da177e4
LT
426 break;
427
428 smi_info->curr_msg->rsp_size
429 = smi_info->handlers->get_result(
430 smi_info->si_sm,
431 smi_info->curr_msg->rsp,
432 IPMI_MAX_MSG_LENGTH);
433
434 /* Do this here becase deliver_recv_msg() releases the
435 lock, and a new message can be put in during the
436 time the lock is released. */
437 msg = smi_info->curr_msg;
438 smi_info->curr_msg = NULL;
439 deliver_recv_msg(smi_info, msg);
440 break;
441
442 case SI_GETTING_FLAGS:
443 {
444 unsigned char msg[4];
445 unsigned int len;
446
447 /* We got the flags from the SMI, now handle them. */
448 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
449 if (msg[2] != 0) {
450 /* Error fetching flags, just give up for
451 now. */
452 smi_info->si_state = SI_NORMAL;
453 } else if (len < 4) {
454 /* Hmm, no flags. That's technically illegal, but
455 don't use uninitialized data. */
456 smi_info->si_state = SI_NORMAL;
457 } else {
458 smi_info->msg_flags = msg[3];
459 handle_flags(smi_info);
460 }
461 break;
462 }
463
464 case SI_CLEARING_FLAGS:
465 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
466 {
467 unsigned char msg[3];
468
469 /* We cleared the flags. */
470 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
471 if (msg[2] != 0) {
472 /* Error clearing flags */
473 printk(KERN_WARNING
474 "ipmi_si: Error clearing flags: %2.2x\n",
475 msg[2]);
476 }
477 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
478 start_enable_irq(smi_info);
479 else
480 smi_info->si_state = SI_NORMAL;
481 break;
482 }
483
484 case SI_GETTING_EVENTS:
485 {
486 smi_info->curr_msg->rsp_size
487 = smi_info->handlers->get_result(
488 smi_info->si_sm,
489 smi_info->curr_msg->rsp,
490 IPMI_MAX_MSG_LENGTH);
491
492 /* Do this here becase deliver_recv_msg() releases the
493 lock, and a new message can be put in during the
494 time the lock is released. */
495 msg = smi_info->curr_msg;
496 smi_info->curr_msg = NULL;
497 if (msg->rsp[2] != 0) {
498 /* Error getting event, probably done. */
499 msg->done(msg);
500
501 /* Take off the event flag. */
502 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
503 handle_flags(smi_info);
504 } else {
505 spin_lock(&smi_info->count_lock);
506 smi_info->events++;
507 spin_unlock(&smi_info->count_lock);
508
509 /* Do this before we deliver the message
510 because delivering the message releases the
511 lock and something else can mess with the
512 state. */
513 handle_flags(smi_info);
514
515 deliver_recv_msg(smi_info, msg);
516 }
517 break;
518 }
519
520 case SI_GETTING_MESSAGES:
521 {
522 smi_info->curr_msg->rsp_size
523 = smi_info->handlers->get_result(
524 smi_info->si_sm,
525 smi_info->curr_msg->rsp,
526 IPMI_MAX_MSG_LENGTH);
527
528 /* Do this here becase deliver_recv_msg() releases the
529 lock, and a new message can be put in during the
530 time the lock is released. */
531 msg = smi_info->curr_msg;
532 smi_info->curr_msg = NULL;
533 if (msg->rsp[2] != 0) {
534 /* Error getting event, probably done. */
535 msg->done(msg);
536
537 /* Take off the msg flag. */
538 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
539 handle_flags(smi_info);
540 } else {
541 spin_lock(&smi_info->count_lock);
542 smi_info->incoming_messages++;
543 spin_unlock(&smi_info->count_lock);
544
545 /* Do this before we deliver the message
546 because delivering the message releases the
547 lock and something else can mess with the
548 state. */
549 handle_flags(smi_info);
550
551 deliver_recv_msg(smi_info, msg);
552 }
553 break;
554 }
555
556 case SI_ENABLE_INTERRUPTS1:
557 {
558 unsigned char msg[4];
559
560 /* We got the flags from the SMI, now handle them. */
561 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
562 if (msg[2] != 0) {
563 printk(KERN_WARNING
564 "ipmi_si: Could not enable interrupts"
565 ", failed get, using polled mode.\n");
566 smi_info->si_state = SI_NORMAL;
567 } else {
568 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
569 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
570 msg[2] = msg[3] | 1; /* enable msg queue int */
571 smi_info->handlers->start_transaction(
572 smi_info->si_sm, msg, 3);
573 smi_info->si_state = SI_ENABLE_INTERRUPTS2;
574 }
575 break;
576 }
577
578 case SI_ENABLE_INTERRUPTS2:
579 {
580 unsigned char msg[4];
581
582 /* We got the flags from the SMI, now handle them. */
583 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
584 if (msg[2] != 0) {
585 printk(KERN_WARNING
586 "ipmi_si: Could not enable interrupts"
587 ", failed set, using polled mode.\n");
588 }
589 smi_info->si_state = SI_NORMAL;
590 break;
591 }
592 }
593}
594
595/* Called on timeouts and events. Timeouts should pass the elapsed
596 time, interrupts should pass in zero. */
597static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
598 int time)
599{
600 enum si_sm_result si_sm_result;
601
602 restart:
603 /* There used to be a loop here that waited a little while
604 (around 25us) before giving up. That turned out to be
605 pointless, the minimum delays I was seeing were in the 300us
606 range, which is far too long to wait in an interrupt. So
607 we just run until the state machine tells us something
608 happened or it needs a delay. */
609 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
610 time = 0;
611 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
612 {
613 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
614 }
615
616 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
617 {
618 spin_lock(&smi_info->count_lock);
619 smi_info->complete_transactions++;
620 spin_unlock(&smi_info->count_lock);
621
622 handle_transaction_done(smi_info);
623 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
624 }
625 else if (si_sm_result == SI_SM_HOSED)
626 {
627 spin_lock(&smi_info->count_lock);
628 smi_info->hosed_count++;
629 spin_unlock(&smi_info->count_lock);
630
631 /* Do the before return_hosed_msg, because that
632 releases the lock. */
633 smi_info->si_state = SI_NORMAL;
634 if (smi_info->curr_msg != NULL) {
635 /* If we were handling a user message, format
636 a response to send to the upper layer to
637 tell it about the error. */
638 return_hosed_msg(smi_info);
639 }
640 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
641 }
642
643 /* We prefer handling attn over new messages. */
644 if (si_sm_result == SI_SM_ATTN)
645 {
646 unsigned char msg[2];
647
648 spin_lock(&smi_info->count_lock);
649 smi_info->attentions++;
650 spin_unlock(&smi_info->count_lock);
651
652 /* Got a attn, send down a get message flags to see
653 what's causing it. It would be better to handle
654 this in the upper layer, but due to the way
655 interrupts work with the SMI, that's not really
656 possible. */
657 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
658 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
659
660 smi_info->handlers->start_transaction(
661 smi_info->si_sm, msg, 2);
662 smi_info->si_state = SI_GETTING_FLAGS;
663 goto restart;
664 }
665
666 /* If we are currently idle, try to start the next message. */
667 if (si_sm_result == SI_SM_IDLE) {
668 spin_lock(&smi_info->count_lock);
669 smi_info->idles++;
670 spin_unlock(&smi_info->count_lock);
671
672 si_sm_result = start_next_msg(smi_info);
673 if (si_sm_result != SI_SM_IDLE)
674 goto restart;
675 }
676
677 if ((si_sm_result == SI_SM_IDLE)
678 && (atomic_read(&smi_info->req_events)))
679 {
680 /* We are idle and the upper layer requested that I fetch
681 events, so do so. */
682 unsigned char msg[2];
683
684 spin_lock(&smi_info->count_lock);
685 smi_info->flag_fetches++;
686 spin_unlock(&smi_info->count_lock);
687
688 atomic_set(&smi_info->req_events, 0);
689 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
690 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
691
692 smi_info->handlers->start_transaction(
693 smi_info->si_sm, msg, 2);
694 smi_info->si_state = SI_GETTING_FLAGS;
695 goto restart;
696 }
697
698 return si_sm_result;
699}
700
701static void sender(void *send_info,
702 struct ipmi_smi_msg *msg,
703 int priority)
704{
705 struct smi_info *smi_info = send_info;
706 enum si_sm_result result;
707 unsigned long flags;
708#ifdef DEBUG_TIMING
709 struct timeval t;
710#endif
711
712 spin_lock_irqsave(&(smi_info->msg_lock), flags);
713#ifdef DEBUG_TIMING
714 do_gettimeofday(&t);
715 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
716#endif
717
718 if (smi_info->run_to_completion) {
719 /* If we are running to completion, then throw it in
720 the list and run transactions until everything is
721 clear. Priority doesn't matter here. */
722 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
723
724 /* We have to release the msg lock and claim the smi
725 lock in this case, because of race conditions. */
726 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
727
728 spin_lock_irqsave(&(smi_info->si_lock), flags);
729 result = smi_event_handler(smi_info, 0);
730 while (result != SI_SM_IDLE) {
731 udelay(SI_SHORT_TIMEOUT_USEC);
732 result = smi_event_handler(smi_info,
733 SI_SHORT_TIMEOUT_USEC);
734 }
735 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
736 return;
737 } else {
738 if (priority > 0) {
739 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
740 } else {
741 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
742 }
743 }
744 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
745
746 spin_lock_irqsave(&(smi_info->si_lock), flags);
747 if ((smi_info->si_state == SI_NORMAL)
748 && (smi_info->curr_msg == NULL))
749 {
750 start_next_msg(smi_info);
1da177e4
LT
751 }
752 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
753}
754
755static void set_run_to_completion(void *send_info, int i_run_to_completion)
756{
757 struct smi_info *smi_info = send_info;
758 enum si_sm_result result;
759 unsigned long flags;
760
761 spin_lock_irqsave(&(smi_info->si_lock), flags);
762
763 smi_info->run_to_completion = i_run_to_completion;
764 if (i_run_to_completion) {
765 result = smi_event_handler(smi_info, 0);
766 while (result != SI_SM_IDLE) {
767 udelay(SI_SHORT_TIMEOUT_USEC);
768 result = smi_event_handler(smi_info,
769 SI_SHORT_TIMEOUT_USEC);
770 }
771 }
772
773 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
774}
775
a9a2c44f
CM
776static int ipmi_thread(void *data)
777{
778 struct smi_info *smi_info = data;
e9a705a0 779 unsigned long flags;
a9a2c44f
CM
780 enum si_sm_result smi_result;
781
a9a2c44f 782 set_user_nice(current, 19);
e9a705a0 783 while (!kthread_should_stop()) {
a9a2c44f 784 spin_lock_irqsave(&(smi_info->si_lock), flags);
8a3628d5 785 smi_result = smi_event_handler(smi_info, 0);
a9a2c44f 786 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
e9a705a0
MD
787 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
788 /* do nothing */
a9a2c44f 789 }
e9a705a0 790 else if (smi_result == SI_SM_CALL_WITH_DELAY)
33979734 791 schedule();
e9a705a0
MD
792 else
793 schedule_timeout_interruptible(1);
a9a2c44f 794 }
a9a2c44f
CM
795 return 0;
796}
797
798
1da177e4
LT
799static void poll(void *send_info)
800{
801 struct smi_info *smi_info = send_info;
802
803 smi_event_handler(smi_info, 0);
804}
805
806static void request_events(void *send_info)
807{
808 struct smi_info *smi_info = send_info;
809
810 atomic_set(&smi_info->req_events, 1);
811}
812
813static int initialized = 0;
814
1da177e4
LT
815static void smi_timeout(unsigned long data)
816{
817 struct smi_info *smi_info = (struct smi_info *) data;
818 enum si_sm_result smi_result;
819 unsigned long flags;
820 unsigned long jiffies_now;
c4edff1c 821 long time_diff;
1da177e4
LT
822#ifdef DEBUG_TIMING
823 struct timeval t;
824#endif
825
a9a2c44f 826 if (atomic_read(&smi_info->stop_operation))
1da177e4 827 return;
1da177e4
LT
828
829 spin_lock_irqsave(&(smi_info->si_lock), flags);
830#ifdef DEBUG_TIMING
831 do_gettimeofday(&t);
832 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
833#endif
834 jiffies_now = jiffies;
c4edff1c 835 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1da177e4
LT
836 * SI_USEC_PER_JIFFY);
837 smi_result = smi_event_handler(smi_info, time_diff);
838
839 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
840
841 smi_info->last_timeout_jiffies = jiffies_now;
842
b0defcdb 843 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1da177e4
LT
844 /* Running with interrupts, only do long timeouts. */
845 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
846 spin_lock_irqsave(&smi_info->count_lock, flags);
847 smi_info->long_timeouts++;
848 spin_unlock_irqrestore(&smi_info->count_lock, flags);
849 goto do_add_timer;
850 }
851
852 /* If the state machine asks for a short delay, then shorten
853 the timer timeout. */
854 if (smi_result == SI_SM_CALL_WITH_DELAY) {
855 spin_lock_irqsave(&smi_info->count_lock, flags);
856 smi_info->short_timeouts++;
857 spin_unlock_irqrestore(&smi_info->count_lock, flags);
1da177e4 858 smi_info->si_timer.expires = jiffies + 1;
1da177e4
LT
859 } else {
860 spin_lock_irqsave(&smi_info->count_lock, flags);
861 smi_info->long_timeouts++;
862 spin_unlock_irqrestore(&smi_info->count_lock, flags);
863 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
1da177e4
LT
864 }
865
866 do_add_timer:
867 add_timer(&(smi_info->si_timer));
868}
869
870static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
871{
872 struct smi_info *smi_info = data;
873 unsigned long flags;
874#ifdef DEBUG_TIMING
875 struct timeval t;
876#endif
877
878 spin_lock_irqsave(&(smi_info->si_lock), flags);
879
880 spin_lock(&smi_info->count_lock);
881 smi_info->interrupts++;
882 spin_unlock(&smi_info->count_lock);
883
a9a2c44f 884 if (atomic_read(&smi_info->stop_operation))
1da177e4
LT
885 goto out;
886
887#ifdef DEBUG_TIMING
888 do_gettimeofday(&t);
889 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
890#endif
891 smi_event_handler(smi_info, 0);
892 out:
893 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
894 return IRQ_HANDLED;
895}
896
9dbf68f9
CM
897static irqreturn_t si_bt_irq_handler(int irq, void *data, struct pt_regs *regs)
898{
899 struct smi_info *smi_info = data;
900 /* We need to clear the IRQ flag for the BT interface. */
901 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
902 IPMI_BT_INTMASK_CLEAR_IRQ_BIT
903 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
904 return si_irq_handler(irq, data, regs);
905}
906
453823ba
CM
907static int smi_start_processing(void *send_info,
908 ipmi_smi_t intf)
909{
910 struct smi_info *new_smi = send_info;
911
912 new_smi->intf = intf;
913
914 /* Set up the timer that drives the interface. */
915 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
916 new_smi->last_timeout_jiffies = jiffies;
917 mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
918
919 if (new_smi->si_type != SI_BT) {
920 new_smi->thread = kthread_run(ipmi_thread, new_smi,
921 "kipmi%d", new_smi->intf_num);
922 if (IS_ERR(new_smi->thread)) {
923 printk(KERN_NOTICE "ipmi_si_intf: Could not start"
924 " kernel thread due to error %ld, only using"
925 " timers to drive the interface\n",
926 PTR_ERR(new_smi->thread));
927 new_smi->thread = NULL;
928 }
929 }
930
931 return 0;
932}
9dbf68f9 933
1da177e4
LT
934static struct ipmi_smi_handlers handlers =
935{
936 .owner = THIS_MODULE,
453823ba 937 .start_processing = smi_start_processing,
1da177e4
LT
938 .sender = sender,
939 .request_events = request_events,
940 .set_run_to_completion = set_run_to_completion,
941 .poll = poll,
942};
943
944/* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
945 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
946
947#define SI_MAX_PARMS 4
b0defcdb 948static LIST_HEAD(smi_infos);
d6dfd131 949static DEFINE_MUTEX(smi_infos_lock);
b0defcdb 950static int smi_num; /* Used to sequence the SMIs */
1da177e4 951
1da177e4
LT
952#define DEFAULT_REGSPACING 1
953
954static int si_trydefaults = 1;
955static char *si_type[SI_MAX_PARMS];
956#define MAX_SI_TYPE_STR 30
957static char si_type_str[MAX_SI_TYPE_STR];
958static unsigned long addrs[SI_MAX_PARMS];
959static int num_addrs;
960static unsigned int ports[SI_MAX_PARMS];
961static int num_ports;
962static int irqs[SI_MAX_PARMS];
963static int num_irqs;
964static int regspacings[SI_MAX_PARMS];
965static int num_regspacings = 0;
966static int regsizes[SI_MAX_PARMS];
967static int num_regsizes = 0;
968static int regshifts[SI_MAX_PARMS];
969static int num_regshifts = 0;
970static int slave_addrs[SI_MAX_PARMS];
971static int num_slave_addrs = 0;
972
973
974module_param_named(trydefaults, si_trydefaults, bool, 0);
975MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
976 " default scan of the KCS and SMIC interface at the standard"
977 " address");
978module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
979MODULE_PARM_DESC(type, "Defines the type of each interface, each"
980 " interface separated by commas. The types are 'kcs',"
981 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
982 " the first interface to kcs and the second to bt");
983module_param_array(addrs, long, &num_addrs, 0);
984MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
985 " addresses separated by commas. Only use if an interface"
986 " is in memory. Otherwise, set it to zero or leave"
987 " it blank.");
988module_param_array(ports, int, &num_ports, 0);
989MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
990 " addresses separated by commas. Only use if an interface"
991 " is a port. Otherwise, set it to zero or leave"
992 " it blank.");
993module_param_array(irqs, int, &num_irqs, 0);
994MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
995 " addresses separated by commas. Only use if an interface"
996 " has an interrupt. Otherwise, set it to zero or leave"
997 " it blank.");
998module_param_array(regspacings, int, &num_regspacings, 0);
999MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1000 " and each successive register used by the interface. For"
1001 " instance, if the start address is 0xca2 and the spacing"
1002 " is 2, then the second address is at 0xca4. Defaults"
1003 " to 1.");
1004module_param_array(regsizes, int, &num_regsizes, 0);
1005MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1006 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1007 " 16-bit, 32-bit, or 64-bit register. Use this if you"
1008 " the 8-bit IPMI register has to be read from a larger"
1009 " register.");
1010module_param_array(regshifts, int, &num_regshifts, 0);
1011MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1012 " IPMI register, in bits. For instance, if the data"
1013 " is read from a 32-bit word and the IPMI data is in"
1014 " bit 8-15, then the shift would be 8");
1015module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1016MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1017 " the controller. Normally this is 0x20, but can be"
1018 " overridden by this parm. This is an array indexed"
1019 " by interface number.");
1020
1021
b0defcdb 1022#define IPMI_IO_ADDR_SPACE 0
1da177e4 1023#define IPMI_MEM_ADDR_SPACE 1
b0defcdb 1024static char *addr_space_to_str[] = { "I/O", "memory" };
1da177e4 1025
b0defcdb 1026static void std_irq_cleanup(struct smi_info *info)
1da177e4 1027{
b0defcdb
CM
1028 if (info->si_type == SI_BT)
1029 /* Disable the interrupt in the BT interface. */
1030 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1031 free_irq(info->irq, info);
1da177e4 1032}
1da177e4
LT
1033
1034static int std_irq_setup(struct smi_info *info)
1035{
1036 int rv;
1037
b0defcdb 1038 if (!info->irq)
1da177e4
LT
1039 return 0;
1040
9dbf68f9
CM
1041 if (info->si_type == SI_BT) {
1042 rv = request_irq(info->irq,
1043 si_bt_irq_handler,
0f2ed4c6 1044 IRQF_DISABLED,
9dbf68f9
CM
1045 DEVICE_NAME,
1046 info);
b0defcdb 1047 if (!rv)
9dbf68f9
CM
1048 /* Enable the interrupt in the BT interface. */
1049 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1050 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1051 } else
1052 rv = request_irq(info->irq,
1053 si_irq_handler,
0f2ed4c6 1054 IRQF_DISABLED,
9dbf68f9
CM
1055 DEVICE_NAME,
1056 info);
1da177e4
LT
1057 if (rv) {
1058 printk(KERN_WARNING
1059 "ipmi_si: %s unable to claim interrupt %d,"
1060 " running polled\n",
1061 DEVICE_NAME, info->irq);
1062 info->irq = 0;
1063 } else {
b0defcdb 1064 info->irq_cleanup = std_irq_cleanup;
1da177e4
LT
1065 printk(" Using irq %d\n", info->irq);
1066 }
1067
1068 return rv;
1069}
1070
1da177e4
LT
1071static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1072{
b0defcdb 1073 unsigned int addr = io->addr_data;
1da177e4 1074
b0defcdb 1075 return inb(addr + (offset * io->regspacing));
1da177e4
LT
1076}
1077
1078static void port_outb(struct si_sm_io *io, unsigned int offset,
1079 unsigned char b)
1080{
b0defcdb 1081 unsigned int addr = io->addr_data;
1da177e4 1082
b0defcdb 1083 outb(b, addr + (offset * io->regspacing));
1da177e4
LT
1084}
1085
1086static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1087{
b0defcdb 1088 unsigned int addr = io->addr_data;
1da177e4 1089
b0defcdb 1090 return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1da177e4
LT
1091}
1092
1093static void port_outw(struct si_sm_io *io, unsigned int offset,
1094 unsigned char b)
1095{
b0defcdb 1096 unsigned int addr = io->addr_data;
1da177e4 1097
b0defcdb 1098 outw(b << io->regshift, addr + (offset * io->regspacing));
1da177e4
LT
1099}
1100
1101static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1102{
b0defcdb 1103 unsigned int addr = io->addr_data;
1da177e4 1104
b0defcdb 1105 return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1da177e4
LT
1106}
1107
1108static void port_outl(struct si_sm_io *io, unsigned int offset,
1109 unsigned char b)
1110{
b0defcdb 1111 unsigned int addr = io->addr_data;
1da177e4 1112
b0defcdb 1113 outl(b << io->regshift, addr+(offset * io->regspacing));
1da177e4
LT
1114}
1115
1116static void port_cleanup(struct smi_info *info)
1117{
b0defcdb 1118 unsigned int addr = info->io.addr_data;
d61a3ead 1119 int idx;
1da177e4 1120
b0defcdb 1121 if (addr) {
d61a3ead
CM
1122 for (idx = 0; idx < info->io_size; idx++) {
1123 release_region(addr + idx * info->io.regspacing,
1124 info->io.regsize);
1125 }
1da177e4 1126 }
1da177e4
LT
1127}
1128
1129static int port_setup(struct smi_info *info)
1130{
b0defcdb 1131 unsigned int addr = info->io.addr_data;
d61a3ead 1132 int idx;
1da177e4 1133
b0defcdb 1134 if (!addr)
1da177e4
LT
1135 return -ENODEV;
1136
1137 info->io_cleanup = port_cleanup;
1138
1139 /* Figure out the actual inb/inw/inl/etc routine to use based
1140 upon the register size. */
1141 switch (info->io.regsize) {
1142 case 1:
1143 info->io.inputb = port_inb;
1144 info->io.outputb = port_outb;
1145 break;
1146 case 2:
1147 info->io.inputb = port_inw;
1148 info->io.outputb = port_outw;
1149 break;
1150 case 4:
1151 info->io.inputb = port_inl;
1152 info->io.outputb = port_outl;
1153 break;
1154 default:
1155 printk("ipmi_si: Invalid register size: %d\n",
1156 info->io.regsize);
1157 return -EINVAL;
1158 }
1159
d61a3ead
CM
1160 /* Some BIOSes reserve disjoint I/O regions in their ACPI
1161 * tables. This causes problems when trying to register the
1162 * entire I/O region. Therefore we must register each I/O
1163 * port separately.
1164 */
1165 for (idx = 0; idx < info->io_size; idx++) {
1166 if (request_region(addr + idx * info->io.regspacing,
1167 info->io.regsize, DEVICE_NAME) == NULL) {
1168 /* Undo allocations */
1169 while (idx--) {
1170 release_region(addr + idx * info->io.regspacing,
1171 info->io.regsize);
1172 }
1173 return -EIO;
1174 }
1175 }
1da177e4
LT
1176 return 0;
1177}
1178
546cfdf4 1179static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1da177e4
LT
1180{
1181 return readb((io->addr)+(offset * io->regspacing));
1182}
1183
546cfdf4 1184static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1da177e4
LT
1185 unsigned char b)
1186{
1187 writeb(b, (io->addr)+(offset * io->regspacing));
1188}
1189
546cfdf4 1190static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1da177e4
LT
1191{
1192 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1193 && 0xff;
1194}
1195
546cfdf4 1196static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1da177e4
LT
1197 unsigned char b)
1198{
1199 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1200}
1201
546cfdf4 1202static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1da177e4
LT
1203{
1204 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1205 && 0xff;
1206}
1207
546cfdf4 1208static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1da177e4
LT
1209 unsigned char b)
1210{
1211 writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1212}
1213
1214#ifdef readq
1215static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1216{
1217 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1218 && 0xff;
1219}
1220
1221static void mem_outq(struct si_sm_io *io, unsigned int offset,
1222 unsigned char b)
1223{
1224 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1225}
1226#endif
1227
1228static void mem_cleanup(struct smi_info *info)
1229{
b0defcdb 1230 unsigned long addr = info->io.addr_data;
1da177e4
LT
1231 int mapsize;
1232
1233 if (info->io.addr) {
1234 iounmap(info->io.addr);
1235
1236 mapsize = ((info->io_size * info->io.regspacing)
1237 - (info->io.regspacing - info->io.regsize));
1238
b0defcdb 1239 release_mem_region(addr, mapsize);
1da177e4 1240 }
1da177e4
LT
1241}
1242
1243static int mem_setup(struct smi_info *info)
1244{
b0defcdb 1245 unsigned long addr = info->io.addr_data;
1da177e4
LT
1246 int mapsize;
1247
b0defcdb 1248 if (!addr)
1da177e4
LT
1249 return -ENODEV;
1250
1251 info->io_cleanup = mem_cleanup;
1252
1253 /* Figure out the actual readb/readw/readl/etc routine to use based
1254 upon the register size. */
1255 switch (info->io.regsize) {
1256 case 1:
546cfdf4
AD
1257 info->io.inputb = intf_mem_inb;
1258 info->io.outputb = intf_mem_outb;
1da177e4
LT
1259 break;
1260 case 2:
546cfdf4
AD
1261 info->io.inputb = intf_mem_inw;
1262 info->io.outputb = intf_mem_outw;
1da177e4
LT
1263 break;
1264 case 4:
546cfdf4
AD
1265 info->io.inputb = intf_mem_inl;
1266 info->io.outputb = intf_mem_outl;
1da177e4
LT
1267 break;
1268#ifdef readq
1269 case 8:
1270 info->io.inputb = mem_inq;
1271 info->io.outputb = mem_outq;
1272 break;
1273#endif
1274 default:
1275 printk("ipmi_si: Invalid register size: %d\n",
1276 info->io.regsize);
1277 return -EINVAL;
1278 }
1279
1280 /* Calculate the total amount of memory to claim. This is an
1281 * unusual looking calculation, but it avoids claiming any
1282 * more memory than it has to. It will claim everything
1283 * between the first address to the end of the last full
1284 * register. */
1285 mapsize = ((info->io_size * info->io.regspacing)
1286 - (info->io.regspacing - info->io.regsize));
1287
b0defcdb 1288 if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1da177e4
LT
1289 return -EIO;
1290
b0defcdb 1291 info->io.addr = ioremap(addr, mapsize);
1da177e4 1292 if (info->io.addr == NULL) {
b0defcdb 1293 release_mem_region(addr, mapsize);
1da177e4
LT
1294 return -EIO;
1295 }
1296 return 0;
1297}
1298
b0defcdb
CM
1299
1300static __devinit void hardcode_find_bmc(void)
1da177e4 1301{
b0defcdb 1302 int i;
1da177e4
LT
1303 struct smi_info *info;
1304
b0defcdb
CM
1305 for (i = 0; i < SI_MAX_PARMS; i++) {
1306 if (!ports[i] && !addrs[i])
1307 continue;
1da177e4 1308
b0defcdb
CM
1309 info = kzalloc(sizeof(*info), GFP_KERNEL);
1310 if (!info)
1311 return;
1da177e4 1312
b0defcdb 1313 info->addr_source = "hardcoded";
1da177e4 1314
b0defcdb
CM
1315 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1316 info->si_type = SI_KCS;
1317 } else if (strcmp(si_type[i], "smic") == 0) {
1318 info->si_type = SI_SMIC;
1319 } else if (strcmp(si_type[i], "bt") == 0) {
1320 info->si_type = SI_BT;
1321 } else {
1322 printk(KERN_WARNING
1323 "ipmi_si: Interface type specified "
1324 "for interface %d, was invalid: %s\n",
1325 i, si_type[i]);
1326 kfree(info);
1327 continue;
1328 }
1da177e4 1329
b0defcdb
CM
1330 if (ports[i]) {
1331 /* An I/O port */
1332 info->io_setup = port_setup;
1333 info->io.addr_data = ports[i];
1334 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1335 } else if (addrs[i]) {
1336 /* A memory port */
1337 info->io_setup = mem_setup;
1338 info->io.addr_data = addrs[i];
1339 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1340 } else {
1341 printk(KERN_WARNING
1342 "ipmi_si: Interface type specified "
1343 "for interface %d, "
1344 "but port and address were not set or "
1345 "set to zero.\n", i);
1346 kfree(info);
1347 continue;
1348 }
1da177e4 1349
b0defcdb
CM
1350 info->io.addr = NULL;
1351 info->io.regspacing = regspacings[i];
1352 if (!info->io.regspacing)
1353 info->io.regspacing = DEFAULT_REGSPACING;
1354 info->io.regsize = regsizes[i];
1355 if (!info->io.regsize)
1356 info->io.regsize = DEFAULT_REGSPACING;
1357 info->io.regshift = regshifts[i];
1358 info->irq = irqs[i];
1359 if (info->irq)
1360 info->irq_setup = std_irq_setup;
1da177e4 1361
b0defcdb
CM
1362 try_smi_init(info);
1363 }
1364}
1da177e4 1365
8466361a 1366#ifdef CONFIG_ACPI
1da177e4
LT
1367
1368#include <linux/acpi.h>
1369
1370/* Once we get an ACPI failure, we don't try any more, because we go
1371 through the tables sequentially. Once we don't find a table, there
1372 are no more. */
1373static int acpi_failure = 0;
1374
1375/* For GPE-type interrupts. */
1376static u32 ipmi_acpi_gpe(void *context)
1377{
1378 struct smi_info *smi_info = context;
1379 unsigned long flags;
1380#ifdef DEBUG_TIMING
1381 struct timeval t;
1382#endif
1383
1384 spin_lock_irqsave(&(smi_info->si_lock), flags);
1385
1386 spin_lock(&smi_info->count_lock);
1387 smi_info->interrupts++;
1388 spin_unlock(&smi_info->count_lock);
1389
a9a2c44f 1390 if (atomic_read(&smi_info->stop_operation))
1da177e4
LT
1391 goto out;
1392
1393#ifdef DEBUG_TIMING
1394 do_gettimeofday(&t);
1395 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1396#endif
1397 smi_event_handler(smi_info, 0);
1398 out:
1399 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1400
1401 return ACPI_INTERRUPT_HANDLED;
1402}
1403
b0defcdb
CM
1404static void acpi_gpe_irq_cleanup(struct smi_info *info)
1405{
1406 if (!info->irq)
1407 return;
1408
1409 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1410}
1411
1da177e4
LT
1412static int acpi_gpe_irq_setup(struct smi_info *info)
1413{
1414 acpi_status status;
1415
b0defcdb 1416 if (!info->irq)
1da177e4
LT
1417 return 0;
1418
1419 /* FIXME - is level triggered right? */
1420 status = acpi_install_gpe_handler(NULL,
1421 info->irq,
1422 ACPI_GPE_LEVEL_TRIGGERED,
1423 &ipmi_acpi_gpe,
1424 info);
1425 if (status != AE_OK) {
1426 printk(KERN_WARNING
1427 "ipmi_si: %s unable to claim ACPI GPE %d,"
1428 " running polled\n",
1429 DEVICE_NAME, info->irq);
1430 info->irq = 0;
1431 return -EINVAL;
1432 } else {
b0defcdb 1433 info->irq_cleanup = acpi_gpe_irq_cleanup;
1da177e4
LT
1434 printk(" Using ACPI GPE %d\n", info->irq);
1435 return 0;
1436 }
1437}
1438
1da177e4
LT
1439/*
1440 * Defined at
1441 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1442 */
1443struct SPMITable {
1444 s8 Signature[4];
1445 u32 Length;
1446 u8 Revision;
1447 u8 Checksum;
1448 s8 OEMID[6];
1449 s8 OEMTableID[8];
1450 s8 OEMRevision[4];
1451 s8 CreatorID[4];
1452 s8 CreatorRevision[4];
1453 u8 InterfaceType;
1454 u8 IPMIlegacy;
1455 s16 SpecificationRevision;
1456
1457 /*
1458 * Bit 0 - SCI interrupt supported
1459 * Bit 1 - I/O APIC/SAPIC
1460 */
1461 u8 InterruptType;
1462
1463 /* If bit 0 of InterruptType is set, then this is the SCI
1464 interrupt in the GPEx_STS register. */
1465 u8 GPE;
1466
1467 s16 Reserved;
1468
1469 /* If bit 1 of InterruptType is set, then this is the I/O
1470 APIC/SAPIC interrupt. */
1471 u32 GlobalSystemInterrupt;
1472
1473 /* The actual register address. */
1474 struct acpi_generic_address addr;
1475
1476 u8 UID[4];
1477
1478 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1479};
1480
b0defcdb 1481static __devinit int try_init_acpi(struct SPMITable *spmi)
1da177e4
LT
1482{
1483 struct smi_info *info;
1da177e4
LT
1484 char *io_type;
1485 u8 addr_space;
1486
1da177e4
LT
1487 if (spmi->IPMIlegacy != 1) {
1488 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1489 return -ENODEV;
1490 }
1491
1492 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1493 addr_space = IPMI_MEM_ADDR_SPACE;
1494 else
1495 addr_space = IPMI_IO_ADDR_SPACE;
b0defcdb
CM
1496
1497 info = kzalloc(sizeof(*info), GFP_KERNEL);
1498 if (!info) {
1499 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1500 return -ENOMEM;
1501 }
1502
1503 info->addr_source = "ACPI";
1da177e4 1504
1da177e4
LT
1505 /* Figure out the interface type. */
1506 switch (spmi->InterfaceType)
1507 {
1508 case 1: /* KCS */
b0defcdb 1509 info->si_type = SI_KCS;
1da177e4 1510 break;
1da177e4 1511 case 2: /* SMIC */
b0defcdb 1512 info->si_type = SI_SMIC;
1da177e4 1513 break;
1da177e4 1514 case 3: /* BT */
b0defcdb 1515 info->si_type = SI_BT;
1da177e4 1516 break;
1da177e4
LT
1517 default:
1518 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1519 spmi->InterfaceType);
b0defcdb 1520 kfree(info);
1da177e4
LT
1521 return -EIO;
1522 }
1523
1da177e4
LT
1524 if (spmi->InterruptType & 1) {
1525 /* We've got a GPE interrupt. */
1526 info->irq = spmi->GPE;
1527 info->irq_setup = acpi_gpe_irq_setup;
1da177e4
LT
1528 } else if (spmi->InterruptType & 2) {
1529 /* We've got an APIC/SAPIC interrupt. */
1530 info->irq = spmi->GlobalSystemInterrupt;
1531 info->irq_setup = std_irq_setup;
1da177e4
LT
1532 } else {
1533 /* Use the default interrupt setting. */
1534 info->irq = 0;
1535 info->irq_setup = NULL;
1536 }
1537
35bc37a0
CM
1538 if (spmi->addr.register_bit_width) {
1539 /* A (hopefully) properly formed register bit width. */
35bc37a0
CM
1540 info->io.regspacing = spmi->addr.register_bit_width / 8;
1541 } else {
35bc37a0
CM
1542 info->io.regspacing = DEFAULT_REGSPACING;
1543 }
b0defcdb
CM
1544 info->io.regsize = info->io.regspacing;
1545 info->io.regshift = spmi->addr.register_bit_offset;
1da177e4
LT
1546
1547 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1548 io_type = "memory";
1549 info->io_setup = mem_setup;
b0defcdb 1550 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1da177e4
LT
1551 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1552 io_type = "I/O";
1553 info->io_setup = port_setup;
b0defcdb 1554 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1da177e4
LT
1555 } else {
1556 kfree(info);
1557 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1558 return -EIO;
1559 }
b0defcdb 1560 info->io.addr_data = spmi->addr.address;
1da177e4 1561
b0defcdb 1562 try_smi_init(info);
1da177e4 1563
1da177e4
LT
1564 return 0;
1565}
b0defcdb
CM
1566
1567static __devinit void acpi_find_bmc(void)
1568{
1569 acpi_status status;
1570 struct SPMITable *spmi;
1571 int i;
1572
1573 if (acpi_disabled)
1574 return;
1575
1576 if (acpi_failure)
1577 return;
1578
1579 for (i = 0; ; i++) {
1580 status = acpi_get_firmware_table("SPMI", i+1,
1581 ACPI_LOGICAL_ADDRESSING,
1582 (struct acpi_table_header **)
1583 &spmi);
1584 if (status != AE_OK)
1585 return;
1586
1587 try_init_acpi(spmi);
1588 }
1589}
1da177e4
LT
1590#endif
1591
a9fad4cc 1592#ifdef CONFIG_DMI
b0defcdb 1593struct dmi_ipmi_data
1da177e4
LT
1594{
1595 u8 type;
1596 u8 addr_space;
1597 unsigned long base_addr;
1598 u8 irq;
1599 u8 offset;
1600 u8 slave_addr;
b0defcdb 1601};
1da177e4 1602
b0defcdb
CM
1603static int __devinit decode_dmi(struct dmi_header *dm,
1604 struct dmi_ipmi_data *dmi)
1da177e4 1605{
e8b33617 1606 u8 *data = (u8 *)dm;
1da177e4
LT
1607 unsigned long base_addr;
1608 u8 reg_spacing;
b224cd3a 1609 u8 len = dm->length;
1da177e4 1610
b0defcdb 1611 dmi->type = data[4];
1da177e4
LT
1612
1613 memcpy(&base_addr, data+8, sizeof(unsigned long));
1614 if (len >= 0x11) {
1615 if (base_addr & 1) {
1616 /* I/O */
1617 base_addr &= 0xFFFE;
b0defcdb 1618 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1da177e4
LT
1619 }
1620 else {
1621 /* Memory */
b0defcdb 1622 dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1da177e4
LT
1623 }
1624 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1625 is odd. */
b0defcdb 1626 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1da177e4 1627
b0defcdb 1628 dmi->irq = data[0x11];
1da177e4
LT
1629
1630 /* The top two bits of byte 0x10 hold the register spacing. */
b224cd3a 1631 reg_spacing = (data[0x10] & 0xC0) >> 6;
1da177e4
LT
1632 switch(reg_spacing){
1633 case 0x00: /* Byte boundaries */
b0defcdb 1634 dmi->offset = 1;
1da177e4
LT
1635 break;
1636 case 0x01: /* 32-bit boundaries */
b0defcdb 1637 dmi->offset = 4;
1da177e4
LT
1638 break;
1639 case 0x02: /* 16-byte boundaries */
b0defcdb 1640 dmi->offset = 16;
1da177e4
LT
1641 break;
1642 default:
1643 /* Some other interface, just ignore it. */
1644 return -EIO;
1645 }
1646 } else {
1647 /* Old DMI spec. */
92068801
CM
1648 /* Note that technically, the lower bit of the base
1649 * address should be 1 if the address is I/O and 0 if
1650 * the address is in memory. So many systems get that
1651 * wrong (and all that I have seen are I/O) so we just
1652 * ignore that bit and assume I/O. Systems that use
1653 * memory should use the newer spec, anyway. */
b0defcdb
CM
1654 dmi->base_addr = base_addr & 0xfffe;
1655 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1656 dmi->offset = 1;
1da177e4
LT
1657 }
1658
b0defcdb 1659 dmi->slave_addr = data[6];
1da177e4 1660
b0defcdb 1661 return 0;
1da177e4
LT
1662}
1663
b0defcdb 1664static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1da177e4 1665{
b0defcdb 1666 struct smi_info *info;
1da177e4 1667
b0defcdb
CM
1668 info = kzalloc(sizeof(*info), GFP_KERNEL);
1669 if (!info) {
1670 printk(KERN_ERR
1671 "ipmi_si: Could not allocate SI data\n");
1672 return;
1da177e4 1673 }
1da177e4 1674
b0defcdb 1675 info->addr_source = "SMBIOS";
1da177e4 1676
e8b33617 1677 switch (ipmi_data->type) {
b0defcdb
CM
1678 case 0x01: /* KCS */
1679 info->si_type = SI_KCS;
1680 break;
1681 case 0x02: /* SMIC */
1682 info->si_type = SI_SMIC;
1683 break;
1684 case 0x03: /* BT */
1685 info->si_type = SI_BT;
1686 break;
1687 default:
1688 return;
1da177e4 1689 }
1da177e4 1690
b0defcdb
CM
1691 switch (ipmi_data->addr_space) {
1692 case IPMI_MEM_ADDR_SPACE:
1da177e4 1693 info->io_setup = mem_setup;
b0defcdb
CM
1694 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1695 break;
1696
1697 case IPMI_IO_ADDR_SPACE:
1da177e4 1698 info->io_setup = port_setup;
b0defcdb
CM
1699 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1700 break;
1701
1702 default:
1da177e4 1703 kfree(info);
b0defcdb
CM
1704 printk(KERN_WARNING
1705 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
1706 ipmi_data->addr_space);
1707 return;
1da177e4 1708 }
b0defcdb 1709 info->io.addr_data = ipmi_data->base_addr;
1da177e4 1710
b0defcdb
CM
1711 info->io.regspacing = ipmi_data->offset;
1712 if (!info->io.regspacing)
1da177e4
LT
1713 info->io.regspacing = DEFAULT_REGSPACING;
1714 info->io.regsize = DEFAULT_REGSPACING;
b0defcdb 1715 info->io.regshift = 0;
1da177e4
LT
1716
1717 info->slave_addr = ipmi_data->slave_addr;
1718
b0defcdb
CM
1719 info->irq = ipmi_data->irq;
1720 if (info->irq)
1721 info->irq_setup = std_irq_setup;
1da177e4 1722
b0defcdb
CM
1723 try_smi_init(info);
1724}
1da177e4 1725
b0defcdb
CM
1726static void __devinit dmi_find_bmc(void)
1727{
1728 struct dmi_device *dev = NULL;
1729 struct dmi_ipmi_data data;
1730 int rv;
1731
1732 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
1733 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
1734 if (!rv)
1735 try_init_dmi(&data);
1736 }
1da177e4 1737}
a9fad4cc 1738#endif /* CONFIG_DMI */
1da177e4
LT
1739
1740#ifdef CONFIG_PCI
1741
b0defcdb
CM
1742#define PCI_ERMC_CLASSCODE 0x0C0700
1743#define PCI_ERMC_CLASSCODE_MASK 0xffffff00
1744#define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
1745#define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
1746#define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
1747#define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
1748
1da177e4
LT
1749#define PCI_HP_VENDOR_ID 0x103C
1750#define PCI_MMC_DEVICE_ID 0x121A
1751#define PCI_MMC_ADDR_CW 0x10
1752
b0defcdb
CM
1753static void ipmi_pci_cleanup(struct smi_info *info)
1754{
1755 struct pci_dev *pdev = info->addr_source_data;
1756
1757 pci_disable_device(pdev);
1758}
1da177e4 1759
b0defcdb
CM
1760static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
1761 const struct pci_device_id *ent)
1da177e4 1762{
b0defcdb
CM
1763 int rv;
1764 int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
1765 struct smi_info *info;
1766 int first_reg_offset = 0;
1da177e4 1767
b0defcdb
CM
1768 info = kzalloc(sizeof(*info), GFP_KERNEL);
1769 if (!info)
1770 return ENOMEM;
1da177e4 1771
b0defcdb 1772 info->addr_source = "PCI";
1da177e4 1773
b0defcdb
CM
1774 switch (class_type) {
1775 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
1776 info->si_type = SI_SMIC;
1777 break;
1da177e4 1778
b0defcdb
CM
1779 case PCI_ERMC_CLASSCODE_TYPE_KCS:
1780 info->si_type = SI_KCS;
1781 break;
1782
1783 case PCI_ERMC_CLASSCODE_TYPE_BT:
1784 info->si_type = SI_BT;
1785 break;
1786
1787 default:
1788 kfree(info);
1789 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
1790 pci_name(pdev), class_type);
1791 return ENOMEM;
1da177e4
LT
1792 }
1793
b0defcdb
CM
1794 rv = pci_enable_device(pdev);
1795 if (rv) {
1796 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
1797 pci_name(pdev));
1798 kfree(info);
1799 return rv;
1da177e4
LT
1800 }
1801
b0defcdb
CM
1802 info->addr_source_cleanup = ipmi_pci_cleanup;
1803 info->addr_source_data = pdev;
1da177e4 1804
b0defcdb
CM
1805 if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
1806 first_reg_offset = 1;
1da177e4 1807
b0defcdb
CM
1808 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
1809 info->io_setup = port_setup;
1810 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1811 } else {
1812 info->io_setup = mem_setup;
1813 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1da177e4 1814 }
b0defcdb 1815 info->io.addr_data = pci_resource_start(pdev, 0);
1da177e4 1816
b0defcdb 1817 info->io.regspacing = DEFAULT_REGSPACING;
1da177e4 1818 info->io.regsize = DEFAULT_REGSPACING;
b0defcdb 1819 info->io.regshift = 0;
1da177e4 1820
b0defcdb
CM
1821 info->irq = pdev->irq;
1822 if (info->irq)
1823 info->irq_setup = std_irq_setup;
1da177e4 1824
50c812b2
CM
1825 info->dev = &pdev->dev;
1826
b0defcdb
CM
1827 return try_smi_init(info);
1828}
1da177e4 1829
b0defcdb
CM
1830static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
1831{
1832}
1da177e4 1833
b0defcdb
CM
1834#ifdef CONFIG_PM
1835static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1836{
1da177e4
LT
1837 return 0;
1838}
1da177e4 1839
b0defcdb 1840static int ipmi_pci_resume(struct pci_dev *pdev)
1da177e4 1841{
b0defcdb
CM
1842 return 0;
1843}
1da177e4 1844#endif
1da177e4 1845
b0defcdb
CM
1846static struct pci_device_id ipmi_pci_devices[] = {
1847 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
1848 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE) }
1849};
1850MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
1851
1852static struct pci_driver ipmi_pci_driver = {
1853 .name = DEVICE_NAME,
1854 .id_table = ipmi_pci_devices,
1855 .probe = ipmi_pci_probe,
1856 .remove = __devexit_p(ipmi_pci_remove),
1857#ifdef CONFIG_PM
1858 .suspend = ipmi_pci_suspend,
1859 .resume = ipmi_pci_resume,
1860#endif
1861};
1862#endif /* CONFIG_PCI */
1da177e4
LT
1863
1864
1865static int try_get_dev_id(struct smi_info *smi_info)
1866{
50c812b2
CM
1867 unsigned char msg[2];
1868 unsigned char *resp;
1869 unsigned long resp_len;
1870 enum si_sm_result smi_result;
1871 int rv = 0;
1da177e4
LT
1872
1873 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
b0defcdb 1874 if (!resp)
1da177e4
LT
1875 return -ENOMEM;
1876
1877 /* Do a Get Device ID command, since it comes back with some
1878 useful info. */
1879 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1880 msg[1] = IPMI_GET_DEVICE_ID_CMD;
1881 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1882
1883 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1884 for (;;)
1885 {
c3e7e791
CM
1886 if (smi_result == SI_SM_CALL_WITH_DELAY ||
1887 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
da4cd8df 1888 schedule_timeout_uninterruptible(1);
1da177e4
LT
1889 smi_result = smi_info->handlers->event(
1890 smi_info->si_sm, 100);
1891 }
1892 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1893 {
1894 smi_result = smi_info->handlers->event(
1895 smi_info->si_sm, 0);
1896 }
1897 else
1898 break;
1899 }
1900 if (smi_result == SI_SM_HOSED) {
1901 /* We couldn't get the state machine to run, so whatever's at
1902 the port is probably not an IPMI SMI interface. */
1903 rv = -ENODEV;
1904 goto out;
1905 }
1906
1907 /* Otherwise, we got some data. */
1908 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1909 resp, IPMI_MAX_MSG_LENGTH);
50c812b2 1910 if (resp_len < 14) {
1da177e4
LT
1911 /* That's odd, it should be longer. */
1912 rv = -EINVAL;
1913 goto out;
1914 }
1915
1916 if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
1917 /* That's odd, it shouldn't be able to fail. */
1918 rv = -EINVAL;
1919 goto out;
1920 }
1921
1922 /* Record info from the get device id, in case we need it. */
50c812b2 1923 ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
1da177e4
LT
1924
1925 out:
1926 kfree(resp);
1927 return rv;
1928}
1929
1930static int type_file_read_proc(char *page, char **start, off_t off,
1931 int count, int *eof, void *data)
1932{
1933 char *out = (char *) page;
1934 struct smi_info *smi = data;
1935
1936 switch (smi->si_type) {
1937 case SI_KCS:
1938 return sprintf(out, "kcs\n");
1939 case SI_SMIC:
1940 return sprintf(out, "smic\n");
1941 case SI_BT:
1942 return sprintf(out, "bt\n");
1943 default:
1944 return 0;
1945 }
1946}
1947
1948static int stat_file_read_proc(char *page, char **start, off_t off,
1949 int count, int *eof, void *data)
1950{
1951 char *out = (char *) page;
1952 struct smi_info *smi = data;
1953
1954 out += sprintf(out, "interrupts_enabled: %d\n",
b0defcdb 1955 smi->irq && !smi->interrupt_disabled);
1da177e4
LT
1956 out += sprintf(out, "short_timeouts: %ld\n",
1957 smi->short_timeouts);
1958 out += sprintf(out, "long_timeouts: %ld\n",
1959 smi->long_timeouts);
1960 out += sprintf(out, "timeout_restarts: %ld\n",
1961 smi->timeout_restarts);
1962 out += sprintf(out, "idles: %ld\n",
1963 smi->idles);
1964 out += sprintf(out, "interrupts: %ld\n",
1965 smi->interrupts);
1966 out += sprintf(out, "attentions: %ld\n",
1967 smi->attentions);
1968 out += sprintf(out, "flag_fetches: %ld\n",
1969 smi->flag_fetches);
1970 out += sprintf(out, "hosed_count: %ld\n",
1971 smi->hosed_count);
1972 out += sprintf(out, "complete_transactions: %ld\n",
1973 smi->complete_transactions);
1974 out += sprintf(out, "events: %ld\n",
1975 smi->events);
1976 out += sprintf(out, "watchdog_pretimeouts: %ld\n",
1977 smi->watchdog_pretimeouts);
1978 out += sprintf(out, "incoming_messages: %ld\n",
1979 smi->incoming_messages);
1980
1981 return (out - ((char *) page));
1982}
1983
3ae0e0f9
CM
1984/*
1985 * oem_data_avail_to_receive_msg_avail
1986 * @info - smi_info structure with msg_flags set
1987 *
1988 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
1989 * Returns 1 indicating need to re-run handle_flags().
1990 */
1991static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
1992{
e8b33617
CM
1993 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
1994 RECEIVE_MSG_AVAIL);
3ae0e0f9
CM
1995 return 1;
1996}
1997
1998/*
1999 * setup_dell_poweredge_oem_data_handler
2000 * @info - smi_info.device_id must be populated
2001 *
2002 * Systems that match, but have firmware version < 1.40 may assert
2003 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2004 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
2005 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2006 * as RECEIVE_MSG_AVAIL instead.
2007 *
2008 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2009 * assert the OEM[012] bits, and if it did, the driver would have to
2010 * change to handle that properly, we don't actually check for the
2011 * firmware version.
2012 * Device ID = 0x20 BMC on PowerEdge 8G servers
2013 * Device Revision = 0x80
2014 * Firmware Revision1 = 0x01 BMC version 1.40
2015 * Firmware Revision2 = 0x40 BCD encoded
2016 * IPMI Version = 0x51 IPMI 1.5
2017 * Manufacturer ID = A2 02 00 Dell IANA
2018 *
d5a2b89a
CM
2019 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2020 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2021 *
3ae0e0f9
CM
2022 */
2023#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2024#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2025#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
50c812b2 2026#define DELL_IANA_MFR_ID 0x0002a2
3ae0e0f9
CM
2027static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2028{
2029 struct ipmi_device_id *id = &smi_info->device_id;
50c812b2 2030 if (id->manufacturer_id == DELL_IANA_MFR_ID) {
d5a2b89a
CM
2031 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
2032 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
50c812b2 2033 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
d5a2b89a
CM
2034 smi_info->oem_data_avail_handler =
2035 oem_data_avail_to_receive_msg_avail;
2036 }
2037 else if (ipmi_version_major(id) < 1 ||
2038 (ipmi_version_major(id) == 1 &&
2039 ipmi_version_minor(id) < 5)) {
2040 smi_info->oem_data_avail_handler =
2041 oem_data_avail_to_receive_msg_avail;
2042 }
3ae0e0f9
CM
2043 }
2044}
2045
ea94027b
CM
2046#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2047static void return_hosed_msg_badsize(struct smi_info *smi_info)
2048{
2049 struct ipmi_smi_msg *msg = smi_info->curr_msg;
2050
2051 /* Make it a reponse */
2052 msg->rsp[0] = msg->data[0] | 4;
2053 msg->rsp[1] = msg->data[1];
2054 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2055 msg->rsp_size = 3;
2056 smi_info->curr_msg = NULL;
2057 deliver_recv_msg(smi_info, msg);
2058}
2059
2060/*
2061 * dell_poweredge_bt_xaction_handler
2062 * @info - smi_info.device_id must be populated
2063 *
2064 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2065 * not respond to a Get SDR command if the length of the data
2066 * requested is exactly 0x3A, which leads to command timeouts and no
2067 * data returned. This intercepts such commands, and causes userspace
2068 * callers to try again with a different-sized buffer, which succeeds.
2069 */
2070
2071#define STORAGE_NETFN 0x0A
2072#define STORAGE_CMD_GET_SDR 0x23
2073static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2074 unsigned long unused,
2075 void *in)
2076{
2077 struct smi_info *smi_info = in;
2078 unsigned char *data = smi_info->curr_msg->data;
2079 unsigned int size = smi_info->curr_msg->data_size;
2080 if (size >= 8 &&
2081 (data[0]>>2) == STORAGE_NETFN &&
2082 data[1] == STORAGE_CMD_GET_SDR &&
2083 data[7] == 0x3A) {
2084 return_hosed_msg_badsize(smi_info);
2085 return NOTIFY_STOP;
2086 }
2087 return NOTIFY_DONE;
2088}
2089
2090static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2091 .notifier_call = dell_poweredge_bt_xaction_handler,
2092};
2093
2094/*
2095 * setup_dell_poweredge_bt_xaction_handler
2096 * @info - smi_info.device_id must be filled in already
2097 *
2098 * Fills in smi_info.device_id.start_transaction_pre_hook
2099 * when we know what function to use there.
2100 */
2101static void
2102setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2103{
2104 struct ipmi_device_id *id = &smi_info->device_id;
50c812b2 2105 if (id->manufacturer_id == DELL_IANA_MFR_ID &&
ea94027b
CM
2106 smi_info->si_type == SI_BT)
2107 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2108}
2109
3ae0e0f9
CM
2110/*
2111 * setup_oem_data_handler
2112 * @info - smi_info.device_id must be filled in already
2113 *
2114 * Fills in smi_info.device_id.oem_data_available_handler
2115 * when we know what function to use there.
2116 */
2117
2118static void setup_oem_data_handler(struct smi_info *smi_info)
2119{
2120 setup_dell_poweredge_oem_data_handler(smi_info);
2121}
2122
ea94027b
CM
2123static void setup_xaction_handlers(struct smi_info *smi_info)
2124{
2125 setup_dell_poweredge_bt_xaction_handler(smi_info);
2126}
2127
a9a2c44f
CM
2128static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2129{
453823ba
CM
2130 if (smi_info->intf) {
2131 /* The timer and thread are only running if the
2132 interface has been started up and registered. */
2133 if (smi_info->thread != NULL)
2134 kthread_stop(smi_info->thread);
2135 del_timer_sync(&smi_info->si_timer);
2136 }
a9a2c44f
CM
2137}
2138
7420884c 2139static __devinitdata struct ipmi_default_vals
b0defcdb
CM
2140{
2141 int type;
2142 int port;
7420884c 2143} ipmi_defaults[] =
b0defcdb
CM
2144{
2145 { .type = SI_KCS, .port = 0xca2 },
2146 { .type = SI_SMIC, .port = 0xca9 },
2147 { .type = SI_BT, .port = 0xe4 },
2148 { .port = 0 }
2149};
2150
2151static __devinit void default_find_bmc(void)
2152{
2153 struct smi_info *info;
2154 int i;
2155
2156 for (i = 0; ; i++) {
2157 if (!ipmi_defaults[i].port)
2158 break;
2159
2160 info = kzalloc(sizeof(*info), GFP_KERNEL);
2161 if (!info)
2162 return;
2163
2164 info->addr_source = NULL;
2165
2166 info->si_type = ipmi_defaults[i].type;
2167 info->io_setup = port_setup;
2168 info->io.addr_data = ipmi_defaults[i].port;
2169 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2170
2171 info->io.addr = NULL;
2172 info->io.regspacing = DEFAULT_REGSPACING;
2173 info->io.regsize = DEFAULT_REGSPACING;
2174 info->io.regshift = 0;
2175
2176 if (try_smi_init(info) == 0) {
2177 /* Found one... */
2178 printk(KERN_INFO "ipmi_si: Found default %s state"
2179 " machine at %s address 0x%lx\n",
2180 si_to_str[info->si_type],
2181 addr_space_to_str[info->io.addr_type],
2182 info->io.addr_data);
2183 return;
2184 }
2185 }
2186}
2187
2188static int is_new_interface(struct smi_info *info)
1da177e4 2189{
b0defcdb 2190 struct smi_info *e;
1da177e4 2191
b0defcdb
CM
2192 list_for_each_entry(e, &smi_infos, link) {
2193 if (e->io.addr_type != info->io.addr_type)
2194 continue;
2195 if (e->io.addr_data == info->io.addr_data)
2196 return 0;
2197 }
1da177e4 2198
b0defcdb
CM
2199 return 1;
2200}
1da177e4 2201
b0defcdb
CM
2202static int try_smi_init(struct smi_info *new_smi)
2203{
2204 int rv;
2205
2206 if (new_smi->addr_source) {
2207 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2208 " machine at %s address 0x%lx, slave address 0x%x,"
2209 " irq %d\n",
2210 new_smi->addr_source,
2211 si_to_str[new_smi->si_type],
2212 addr_space_to_str[new_smi->io.addr_type],
2213 new_smi->io.addr_data,
2214 new_smi->slave_addr, new_smi->irq);
2215 }
2216
d6dfd131 2217 mutex_lock(&smi_infos_lock);
b0defcdb
CM
2218 if (!is_new_interface(new_smi)) {
2219 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2220 rv = -EBUSY;
2221 goto out_err;
2222 }
1da177e4
LT
2223
2224 /* So we know not to free it unless we have allocated one. */
2225 new_smi->intf = NULL;
2226 new_smi->si_sm = NULL;
2227 new_smi->handlers = NULL;
2228
b0defcdb
CM
2229 switch (new_smi->si_type) {
2230 case SI_KCS:
1da177e4 2231 new_smi->handlers = &kcs_smi_handlers;
b0defcdb
CM
2232 break;
2233
2234 case SI_SMIC:
1da177e4 2235 new_smi->handlers = &smic_smi_handlers;
b0defcdb
CM
2236 break;
2237
2238 case SI_BT:
1da177e4 2239 new_smi->handlers = &bt_smi_handlers;
b0defcdb
CM
2240 break;
2241
2242 default:
1da177e4
LT
2243 /* No support for anything else yet. */
2244 rv = -EIO;
2245 goto out_err;
2246 }
2247
2248 /* Allocate the state machine's data and initialize it. */
2249 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
b0defcdb 2250 if (!new_smi->si_sm) {
1da177e4
LT
2251 printk(" Could not allocate state machine memory\n");
2252 rv = -ENOMEM;
2253 goto out_err;
2254 }
2255 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2256 &new_smi->io);
2257
2258 /* Now that we know the I/O size, we can set up the I/O. */
2259 rv = new_smi->io_setup(new_smi);
2260 if (rv) {
2261 printk(" Could not set up I/O space\n");
2262 goto out_err;
2263 }
2264
2265 spin_lock_init(&(new_smi->si_lock));
2266 spin_lock_init(&(new_smi->msg_lock));
2267 spin_lock_init(&(new_smi->count_lock));
2268
2269 /* Do low-level detection first. */
2270 if (new_smi->handlers->detect(new_smi->si_sm)) {
b0defcdb
CM
2271 if (new_smi->addr_source)
2272 printk(KERN_INFO "ipmi_si: Interface detection"
2273 " failed\n");
1da177e4
LT
2274 rv = -ENODEV;
2275 goto out_err;
2276 }
2277
2278 /* Attempt a get device id command. If it fails, we probably
b0defcdb 2279 don't have a BMC here. */
1da177e4 2280 rv = try_get_dev_id(new_smi);
b0defcdb
CM
2281 if (rv) {
2282 if (new_smi->addr_source)
2283 printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2284 " at this location\n");
1da177e4 2285 goto out_err;
b0defcdb 2286 }
1da177e4 2287
3ae0e0f9 2288 setup_oem_data_handler(new_smi);
ea94027b 2289 setup_xaction_handlers(new_smi);
3ae0e0f9 2290
1da177e4 2291 /* Try to claim any interrupts. */
b0defcdb
CM
2292 if (new_smi->irq_setup)
2293 new_smi->irq_setup(new_smi);
1da177e4
LT
2294
2295 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2296 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2297 new_smi->curr_msg = NULL;
2298 atomic_set(&new_smi->req_events, 0);
2299 new_smi->run_to_completion = 0;
2300
2301 new_smi->interrupt_disabled = 0;
a9a2c44f 2302 atomic_set(&new_smi->stop_operation, 0);
b0defcdb
CM
2303 new_smi->intf_num = smi_num;
2304 smi_num++;
1da177e4
LT
2305
2306 /* Start clearing the flags before we enable interrupts or the
2307 timer to avoid racing with the timer. */
2308 start_clear_flags(new_smi);
2309 /* IRQ is defined to be set when non-zero. */
2310 if (new_smi->irq)
2311 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2312
50c812b2
CM
2313 if (!new_smi->dev) {
2314 /* If we don't already have a device from something
2315 * else (like PCI), then register a new one. */
2316 new_smi->pdev = platform_device_alloc("ipmi_si",
2317 new_smi->intf_num);
2318 if (rv) {
2319 printk(KERN_ERR
2320 "ipmi_si_intf:"
2321 " Unable to allocate platform device\n");
453823ba 2322 goto out_err;
50c812b2
CM
2323 }
2324 new_smi->dev = &new_smi->pdev->dev;
2325 new_smi->dev->driver = &ipmi_driver;
2326
2327 rv = platform_device_register(new_smi->pdev);
2328 if (rv) {
2329 printk(KERN_ERR
2330 "ipmi_si_intf:"
2331 " Unable to register system interface device:"
2332 " %d\n",
2333 rv);
453823ba 2334 goto out_err;
50c812b2
CM
2335 }
2336 new_smi->dev_registered = 1;
2337 }
2338
1da177e4
LT
2339 rv = ipmi_register_smi(&handlers,
2340 new_smi,
50c812b2
CM
2341 &new_smi->device_id,
2342 new_smi->dev,
453823ba 2343 new_smi->slave_addr);
1da177e4
LT
2344 if (rv) {
2345 printk(KERN_ERR
2346 "ipmi_si: Unable to register device: error %d\n",
2347 rv);
2348 goto out_err_stop_timer;
2349 }
2350
2351 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2352 type_file_read_proc, NULL,
2353 new_smi, THIS_MODULE);
2354 if (rv) {
2355 printk(KERN_ERR
2356 "ipmi_si: Unable to create proc entry: %d\n",
2357 rv);
2358 goto out_err_stop_timer;
2359 }
2360
2361 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2362 stat_file_read_proc, NULL,
2363 new_smi, THIS_MODULE);
2364 if (rv) {
2365 printk(KERN_ERR
2366 "ipmi_si: Unable to create proc entry: %d\n",
2367 rv);
2368 goto out_err_stop_timer;
2369 }
2370
b0defcdb
CM
2371 list_add_tail(&new_smi->link, &smi_infos);
2372
d6dfd131 2373 mutex_unlock(&smi_infos_lock);
1da177e4 2374
b0defcdb 2375 printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
1da177e4
LT
2376
2377 return 0;
2378
2379 out_err_stop_timer:
a9a2c44f
CM
2380 atomic_inc(&new_smi->stop_operation);
2381 wait_for_timer_and_thread(new_smi);
1da177e4
LT
2382
2383 out_err:
2384 if (new_smi->intf)
2385 ipmi_unregister_smi(new_smi->intf);
2386
b0defcdb
CM
2387 if (new_smi->irq_cleanup)
2388 new_smi->irq_cleanup(new_smi);
1da177e4
LT
2389
2390 /* Wait until we know that we are out of any interrupt
2391 handlers might have been running before we freed the
2392 interrupt. */
fbd568a3 2393 synchronize_sched();
1da177e4
LT
2394
2395 if (new_smi->si_sm) {
2396 if (new_smi->handlers)
2397 new_smi->handlers->cleanup(new_smi->si_sm);
2398 kfree(new_smi->si_sm);
2399 }
b0defcdb
CM
2400 if (new_smi->addr_source_cleanup)
2401 new_smi->addr_source_cleanup(new_smi);
7767e126
PG
2402 if (new_smi->io_cleanup)
2403 new_smi->io_cleanup(new_smi);
1da177e4 2404
50c812b2
CM
2405 if (new_smi->dev_registered)
2406 platform_device_unregister(new_smi->pdev);
2407
2408 kfree(new_smi);
2409
d6dfd131 2410 mutex_unlock(&smi_infos_lock);
b0defcdb 2411
1da177e4
LT
2412 return rv;
2413}
2414
b0defcdb 2415static __devinit int init_ipmi_si(void)
1da177e4 2416{
1da177e4
LT
2417 int i;
2418 char *str;
50c812b2 2419 int rv;
1da177e4
LT
2420
2421 if (initialized)
2422 return 0;
2423 initialized = 1;
2424
50c812b2
CM
2425 /* Register the device drivers. */
2426 rv = driver_register(&ipmi_driver);
2427 if (rv) {
2428 printk(KERN_ERR
2429 "init_ipmi_si: Unable to register driver: %d\n",
2430 rv);
2431 return rv;
2432 }
2433
2434
1da177e4
LT
2435 /* Parse out the si_type string into its components. */
2436 str = si_type_str;
2437 if (*str != '\0') {
e8b33617 2438 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
1da177e4
LT
2439 si_type[i] = str;
2440 str = strchr(str, ',');
2441 if (str) {
2442 *str = '\0';
2443 str++;
2444 } else {
2445 break;
2446 }
2447 }
2448 }
2449
1fdd75bd 2450 printk(KERN_INFO "IPMI System Interface driver.\n");
1da177e4 2451
b0defcdb
CM
2452 hardcode_find_bmc();
2453
a9fad4cc 2454#ifdef CONFIG_DMI
b224cd3a 2455 dmi_find_bmc();
1da177e4
LT
2456#endif
2457
b0defcdb
CM
2458#ifdef CONFIG_ACPI
2459 if (si_trydefaults)
2460 acpi_find_bmc();
2461#endif
1da177e4 2462
b0defcdb
CM
2463#ifdef CONFIG_PCI
2464 pci_module_init(&ipmi_pci_driver);
2465#endif
2466
2467 if (si_trydefaults) {
d6dfd131 2468 mutex_lock(&smi_infos_lock);
b0defcdb
CM
2469 if (list_empty(&smi_infos)) {
2470 /* No BMC was found, try defaults. */
d6dfd131 2471 mutex_unlock(&smi_infos_lock);
b0defcdb
CM
2472 default_find_bmc();
2473 } else {
d6dfd131 2474 mutex_unlock(&smi_infos_lock);
b0defcdb 2475 }
1da177e4
LT
2476 }
2477
d6dfd131 2478 mutex_lock(&smi_infos_lock);
b0defcdb 2479 if (list_empty(&smi_infos)) {
d6dfd131 2480 mutex_unlock(&smi_infos_lock);
b0defcdb
CM
2481#ifdef CONFIG_PCI
2482 pci_unregister_driver(&ipmi_pci_driver);
2483#endif
1da177e4
LT
2484 printk("ipmi_si: Unable to find any System Interface(s)\n");
2485 return -ENODEV;
b0defcdb 2486 } else {
d6dfd131 2487 mutex_unlock(&smi_infos_lock);
b0defcdb 2488 return 0;
1da177e4 2489 }
1da177e4
LT
2490}
2491module_init(init_ipmi_si);
2492
b0defcdb 2493static void __devexit cleanup_one_si(struct smi_info *to_clean)
1da177e4
LT
2494{
2495 int rv;
2496 unsigned long flags;
2497
b0defcdb 2498 if (!to_clean)
1da177e4
LT
2499 return;
2500
b0defcdb
CM
2501 list_del(&to_clean->link);
2502
1da177e4
LT
2503 /* Tell the timer and interrupt handlers that we are shutting
2504 down. */
2505 spin_lock_irqsave(&(to_clean->si_lock), flags);
2506 spin_lock(&(to_clean->msg_lock));
2507
a9a2c44f 2508 atomic_inc(&to_clean->stop_operation);
b0defcdb
CM
2509
2510 if (to_clean->irq_cleanup)
2511 to_clean->irq_cleanup(to_clean);
1da177e4
LT
2512
2513 spin_unlock(&(to_clean->msg_lock));
2514 spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2515
2516 /* Wait until we know that we are out of any interrupt
2517 handlers might have been running before we freed the
2518 interrupt. */
fbd568a3 2519 synchronize_sched();
1da177e4 2520
a9a2c44f 2521 wait_for_timer_and_thread(to_clean);
1da177e4
LT
2522
2523 /* Interrupts and timeouts are stopped, now make sure the
2524 interface is in a clean state. */
e8b33617 2525 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
1da177e4 2526 poll(to_clean);
da4cd8df 2527 schedule_timeout_uninterruptible(1);
1da177e4
LT
2528 }
2529
2530 rv = ipmi_unregister_smi(to_clean->intf);
2531 if (rv) {
2532 printk(KERN_ERR
2533 "ipmi_si: Unable to unregister device: errno=%d\n",
2534 rv);
2535 }
2536
2537 to_clean->handlers->cleanup(to_clean->si_sm);
2538
2539 kfree(to_clean->si_sm);
2540
b0defcdb
CM
2541 if (to_clean->addr_source_cleanup)
2542 to_clean->addr_source_cleanup(to_clean);
7767e126
PG
2543 if (to_clean->io_cleanup)
2544 to_clean->io_cleanup(to_clean);
50c812b2
CM
2545
2546 if (to_clean->dev_registered)
2547 platform_device_unregister(to_clean->pdev);
2548
2549 kfree(to_clean);
1da177e4
LT
2550}
2551
2552static __exit void cleanup_ipmi_si(void)
2553{
b0defcdb 2554 struct smi_info *e, *tmp_e;
1da177e4 2555
b0defcdb 2556 if (!initialized)
1da177e4
LT
2557 return;
2558
b0defcdb
CM
2559#ifdef CONFIG_PCI
2560 pci_unregister_driver(&ipmi_pci_driver);
2561#endif
2562
d6dfd131 2563 mutex_lock(&smi_infos_lock);
b0defcdb
CM
2564 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2565 cleanup_one_si(e);
d6dfd131 2566 mutex_unlock(&smi_infos_lock);
50c812b2
CM
2567
2568 driver_unregister(&ipmi_driver);
1da177e4
LT
2569}
2570module_exit(cleanup_ipmi_si);
2571
2572MODULE_LICENSE("GPL");
1fdd75bd
CM
2573MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2574MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");