ipmi: fix use-after-free of user->release_barrier.rda
[linux-2.6-block.git] / drivers / char / ipmi / ipmi_msghandler.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * ipmi_msghandler.c
4  *
5  * Incoming and outgoing message routing for an IPMI interface.
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  */
13
14 #define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
15 #define dev_fmt pr_fmt
16
17 #include <linux/module.h>
18 #include <linux/errno.h>
19 #include <linux/poll.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/spinlock.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/ipmi.h>
26 #include <linux/ipmi_smi.h>
27 #include <linux/notifier.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/rcupdate.h>
31 #include <linux/interrupt.h>
32 #include <linux/moduleparam.h>
33 #include <linux/workqueue.h>
34 #include <linux/uuid.h>
35 #include <linux/nospec.h>
36
37 #define IPMI_DRIVER_VERSION "39.2"
38
39 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
40 static int ipmi_init_msghandler(void);
41 static void smi_recv_tasklet(unsigned long);
42 static void handle_new_recv_msgs(struct ipmi_smi *intf);
43 static void need_waiter(struct ipmi_smi *intf);
44 static int handle_one_recv_msg(struct ipmi_smi *intf,
45                                struct ipmi_smi_msg *msg);
46
47 #ifdef DEBUG
48 static void ipmi_debug_msg(const char *title, unsigned char *data,
49                            unsigned int len)
50 {
51         int i, pos;
52         char buf[100];
53
54         pos = snprintf(buf, sizeof(buf), "%s: ", title);
55         for (i = 0; i < len; i++)
56                 pos += snprintf(buf + pos, sizeof(buf) - pos,
57                                 " %2.2x", data[i]);
58         pr_debug("%s\n", buf);
59 }
60 #else
61 static void ipmi_debug_msg(const char *title, unsigned char *data,
62                            unsigned int len)
63 { }
64 #endif
65
66 static int initialized;
67
68 enum ipmi_panic_event_op {
69         IPMI_SEND_PANIC_EVENT_NONE,
70         IPMI_SEND_PANIC_EVENT,
71         IPMI_SEND_PANIC_EVENT_STRING
72 };
73 #ifdef CONFIG_IPMI_PANIC_STRING
74 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
75 #elif defined(CONFIG_IPMI_PANIC_EVENT)
76 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
77 #else
78 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
79 #endif
80 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
81
82 static int panic_op_write_handler(const char *val,
83                                   const struct kernel_param *kp)
84 {
85         char valcp[16];
86         char *s;
87
88         strncpy(valcp, val, 15);
89         valcp[15] = '\0';
90
91         s = strstrip(valcp);
92
93         if (strcmp(s, "none") == 0)
94                 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
95         else if (strcmp(s, "event") == 0)
96                 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
97         else if (strcmp(s, "string") == 0)
98                 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
99         else
100                 return -EINVAL;
101
102         return 0;
103 }
104
105 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
106 {
107         switch (ipmi_send_panic_event) {
108         case IPMI_SEND_PANIC_EVENT_NONE:
109                 strcpy(buffer, "none");
110                 break;
111
112         case IPMI_SEND_PANIC_EVENT:
113                 strcpy(buffer, "event");
114                 break;
115
116         case IPMI_SEND_PANIC_EVENT_STRING:
117                 strcpy(buffer, "string");
118                 break;
119
120         default:
121                 strcpy(buffer, "???");
122                 break;
123         }
124
125         return strlen(buffer);
126 }
127
128 static const struct kernel_param_ops panic_op_ops = {
129         .set = panic_op_write_handler,
130         .get = panic_op_read_handler
131 };
132 module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
133 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic.  Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
134
135
136 #define MAX_EVENTS_IN_QUEUE     25
137
138 /* Remain in auto-maintenance mode for this amount of time (in ms). */
139 static unsigned long maintenance_mode_timeout_ms = 30000;
140 module_param(maintenance_mode_timeout_ms, ulong, 0644);
141 MODULE_PARM_DESC(maintenance_mode_timeout_ms,
142                  "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
143
144 /*
145  * Don't let a message sit in a queue forever, always time it with at lest
146  * the max message timer.  This is in milliseconds.
147  */
148 #define MAX_MSG_TIMEOUT         60000
149
150 /*
151  * Timeout times below are in milliseconds, and are done off a 1
152  * second timer.  So setting the value to 1000 would mean anything
153  * between 0 and 1000ms.  So really the only reasonable minimum
154  * setting it 2000ms, which is between 1 and 2 seconds.
155  */
156
157 /* The default timeout for message retries. */
158 static unsigned long default_retry_ms = 2000;
159 module_param(default_retry_ms, ulong, 0644);
160 MODULE_PARM_DESC(default_retry_ms,
161                  "The time (milliseconds) between retry sends");
162
163 /* The default timeout for maintenance mode message retries. */
164 static unsigned long default_maintenance_retry_ms = 3000;
165 module_param(default_maintenance_retry_ms, ulong, 0644);
166 MODULE_PARM_DESC(default_maintenance_retry_ms,
167                  "The time (milliseconds) between retry sends in maintenance mode");
168
169 /* The default maximum number of retries */
170 static unsigned int default_max_retries = 4;
171 module_param(default_max_retries, uint, 0644);
172 MODULE_PARM_DESC(default_max_retries,
173                  "The time (milliseconds) between retry sends in maintenance mode");
174
175 /* Call every ~1000 ms. */
176 #define IPMI_TIMEOUT_TIME       1000
177
178 /* How many jiffies does it take to get to the timeout time. */
179 #define IPMI_TIMEOUT_JIFFIES    ((IPMI_TIMEOUT_TIME * HZ) / 1000)
180
181 /*
182  * Request events from the queue every second (this is the number of
183  * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
184  * future, IPMI will add a way to know immediately if an event is in
185  * the queue and this silliness can go away.
186  */
187 #define IPMI_REQUEST_EV_TIME    (1000 / (IPMI_TIMEOUT_TIME))
188
189 /* How long should we cache dynamic device IDs? */
190 #define IPMI_DYN_DEV_ID_EXPIRY  (10 * HZ)
191
192 /*
193  * The main "user" data structure.
194  */
195 struct ipmi_user {
196         struct list_head link;
197
198         /*
199          * Set to NULL when the user is destroyed, a pointer to myself
200          * so srcu_dereference can be used on it.
201          */
202         struct ipmi_user *self;
203         struct srcu_struct release_barrier;
204
205         struct kref refcount;
206
207         /* The upper layer that handles receive messages. */
208         const struct ipmi_user_hndl *handler;
209         void             *handler_data;
210
211         /* The interface this user is bound to. */
212         struct ipmi_smi *intf;
213
214         /* Does this interface receive IPMI events? */
215         bool gets_events;
216 };
217
218 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
219         __acquires(user->release_barrier)
220 {
221         struct ipmi_user *ruser;
222
223         *index = srcu_read_lock(&user->release_barrier);
224         ruser = srcu_dereference(user->self, &user->release_barrier);
225         if (!ruser)
226                 srcu_read_unlock(&user->release_barrier, *index);
227         return ruser;
228 }
229
230 static void release_ipmi_user(struct ipmi_user *user, int index)
231 {
232         srcu_read_unlock(&user->release_barrier, index);
233 }
234
235 struct cmd_rcvr {
236         struct list_head link;
237
238         struct ipmi_user *user;
239         unsigned char netfn;
240         unsigned char cmd;
241         unsigned int  chans;
242
243         /*
244          * This is used to form a linked lised during mass deletion.
245          * Since this is in an RCU list, we cannot use the link above
246          * or change any data until the RCU period completes.  So we
247          * use this next variable during mass deletion so we can have
248          * a list and don't have to wait and restart the search on
249          * every individual deletion of a command.
250          */
251         struct cmd_rcvr *next;
252 };
253
254 struct seq_table {
255         unsigned int         inuse : 1;
256         unsigned int         broadcast : 1;
257
258         unsigned long        timeout;
259         unsigned long        orig_timeout;
260         unsigned int         retries_left;
261
262         /*
263          * To verify on an incoming send message response that this is
264          * the message that the response is for, we keep a sequence id
265          * and increment it every time we send a message.
266          */
267         long                 seqid;
268
269         /*
270          * This is held so we can properly respond to the message on a
271          * timeout, and it is used to hold the temporary data for
272          * retransmission, too.
273          */
274         struct ipmi_recv_msg *recv_msg;
275 };
276
277 /*
278  * Store the information in a msgid (long) to allow us to find a
279  * sequence table entry from the msgid.
280  */
281 #define STORE_SEQ_IN_MSGID(seq, seqid) \
282         ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
283
284 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
285         do {                                                            \
286                 seq = (((msgid) >> 26) & 0x3f);                         \
287                 seqid = ((msgid) & 0x3ffffff);                          \
288         } while (0)
289
290 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
291
292 #define IPMI_MAX_CHANNELS       16
293 struct ipmi_channel {
294         unsigned char medium;
295         unsigned char protocol;
296 };
297
298 struct ipmi_channel_set {
299         struct ipmi_channel c[IPMI_MAX_CHANNELS];
300 };
301
302 struct ipmi_my_addrinfo {
303         /*
304          * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
305          * but may be changed by the user.
306          */
307         unsigned char address;
308
309         /*
310          * My LUN.  This should generally stay the SMS LUN, but just in
311          * case...
312          */
313         unsigned char lun;
314 };
315
316 /*
317  * Note that the product id, manufacturer id, guid, and device id are
318  * immutable in this structure, so dyn_mutex is not required for
319  * accessing those.  If those change on a BMC, a new BMC is allocated.
320  */
321 struct bmc_device {
322         struct platform_device pdev;
323         struct list_head       intfs; /* Interfaces on this BMC. */
324         struct ipmi_device_id  id;
325         struct ipmi_device_id  fetch_id;
326         int                    dyn_id_set;
327         unsigned long          dyn_id_expiry;
328         struct mutex           dyn_mutex; /* Protects id, intfs, & dyn* */
329         guid_t                 guid;
330         guid_t                 fetch_guid;
331         int                    dyn_guid_set;
332         struct kref            usecount;
333         struct work_struct     remove_work;
334 };
335 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
336
337 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
338                              struct ipmi_device_id *id,
339                              bool *guid_set, guid_t *guid);
340
341 /*
342  * Various statistics for IPMI, these index stats[] in the ipmi_smi
343  * structure.
344  */
345 enum ipmi_stat_indexes {
346         /* Commands we got from the user that were invalid. */
347         IPMI_STAT_sent_invalid_commands = 0,
348
349         /* Commands we sent to the MC. */
350         IPMI_STAT_sent_local_commands,
351
352         /* Responses from the MC that were delivered to a user. */
353         IPMI_STAT_handled_local_responses,
354
355         /* Responses from the MC that were not delivered to a user. */
356         IPMI_STAT_unhandled_local_responses,
357
358         /* Commands we sent out to the IPMB bus. */
359         IPMI_STAT_sent_ipmb_commands,
360
361         /* Commands sent on the IPMB that had errors on the SEND CMD */
362         IPMI_STAT_sent_ipmb_command_errs,
363
364         /* Each retransmit increments this count. */
365         IPMI_STAT_retransmitted_ipmb_commands,
366
367         /*
368          * When a message times out (runs out of retransmits) this is
369          * incremented.
370          */
371         IPMI_STAT_timed_out_ipmb_commands,
372
373         /*
374          * This is like above, but for broadcasts.  Broadcasts are
375          * *not* included in the above count (they are expected to
376          * time out).
377          */
378         IPMI_STAT_timed_out_ipmb_broadcasts,
379
380         /* Responses I have sent to the IPMB bus. */
381         IPMI_STAT_sent_ipmb_responses,
382
383         /* The response was delivered to the user. */
384         IPMI_STAT_handled_ipmb_responses,
385
386         /* The response had invalid data in it. */
387         IPMI_STAT_invalid_ipmb_responses,
388
389         /* The response didn't have anyone waiting for it. */
390         IPMI_STAT_unhandled_ipmb_responses,
391
392         /* Commands we sent out to the IPMB bus. */
393         IPMI_STAT_sent_lan_commands,
394
395         /* Commands sent on the IPMB that had errors on the SEND CMD */
396         IPMI_STAT_sent_lan_command_errs,
397
398         /* Each retransmit increments this count. */
399         IPMI_STAT_retransmitted_lan_commands,
400
401         /*
402          * When a message times out (runs out of retransmits) this is
403          * incremented.
404          */
405         IPMI_STAT_timed_out_lan_commands,
406
407         /* Responses I have sent to the IPMB bus. */
408         IPMI_STAT_sent_lan_responses,
409
410         /* The response was delivered to the user. */
411         IPMI_STAT_handled_lan_responses,
412
413         /* The response had invalid data in it. */
414         IPMI_STAT_invalid_lan_responses,
415
416         /* The response didn't have anyone waiting for it. */
417         IPMI_STAT_unhandled_lan_responses,
418
419         /* The command was delivered to the user. */
420         IPMI_STAT_handled_commands,
421
422         /* The command had invalid data in it. */
423         IPMI_STAT_invalid_commands,
424
425         /* The command didn't have anyone waiting for it. */
426         IPMI_STAT_unhandled_commands,
427
428         /* Invalid data in an event. */
429         IPMI_STAT_invalid_events,
430
431         /* Events that were received with the proper format. */
432         IPMI_STAT_events,
433
434         /* Retransmissions on IPMB that failed. */
435         IPMI_STAT_dropped_rexmit_ipmb_commands,
436
437         /* Retransmissions on LAN that failed. */
438         IPMI_STAT_dropped_rexmit_lan_commands,
439
440         /* This *must* remain last, add new values above this. */
441         IPMI_NUM_STATS
442 };
443
444
445 #define IPMI_IPMB_NUM_SEQ       64
446 struct ipmi_smi {
447         /* What interface number are we? */
448         int intf_num;
449
450         struct kref refcount;
451
452         /* Set when the interface is being unregistered. */
453         bool in_shutdown;
454
455         /* Used for a list of interfaces. */
456         struct list_head link;
457
458         /*
459          * The list of upper layers that are using me.  seq_lock write
460          * protects this.  Read protection is with srcu.
461          */
462         struct list_head users;
463         struct srcu_struct users_srcu;
464
465         /* Used for wake ups at startup. */
466         wait_queue_head_t waitq;
467
468         /*
469          * Prevents the interface from being unregistered when the
470          * interface is used by being looked up through the BMC
471          * structure.
472          */
473         struct mutex bmc_reg_mutex;
474
475         struct bmc_device tmp_bmc;
476         struct bmc_device *bmc;
477         bool bmc_registered;
478         struct list_head bmc_link;
479         char *my_dev_name;
480         bool in_bmc_register;  /* Handle recursive situations.  Yuck. */
481         struct work_struct bmc_reg_work;
482
483         const struct ipmi_smi_handlers *handlers;
484         void                     *send_info;
485
486         /* Driver-model device for the system interface. */
487         struct device          *si_dev;
488
489         /*
490          * A table of sequence numbers for this interface.  We use the
491          * sequence numbers for IPMB messages that go out of the
492          * interface to match them up with their responses.  A routine
493          * is called periodically to time the items in this list.
494          */
495         spinlock_t       seq_lock;
496         struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
497         int curr_seq;
498
499         /*
500          * Messages queued for delivery.  If delivery fails (out of memory
501          * for instance), They will stay in here to be processed later in a
502          * periodic timer interrupt.  The tasklet is for handling received
503          * messages directly from the handler.
504          */
505         spinlock_t       waiting_rcv_msgs_lock;
506         struct list_head waiting_rcv_msgs;
507         atomic_t         watchdog_pretimeouts_to_deliver;
508         struct tasklet_struct recv_tasklet;
509
510         spinlock_t             xmit_msgs_lock;
511         struct list_head       xmit_msgs;
512         struct ipmi_smi_msg    *curr_msg;
513         struct list_head       hp_xmit_msgs;
514
515         /*
516          * The list of command receivers that are registered for commands
517          * on this interface.
518          */
519         struct mutex     cmd_rcvrs_mutex;
520         struct list_head cmd_rcvrs;
521
522         /*
523          * Events that were queues because no one was there to receive
524          * them.
525          */
526         spinlock_t       events_lock; /* For dealing with event stuff. */
527         struct list_head waiting_events;
528         unsigned int     waiting_events_count; /* How many events in queue? */
529         char             delivering_events;
530         char             event_msg_printed;
531         atomic_t         event_waiters;
532         unsigned int     ticks_to_req_ev;
533         int              last_needs_timer;
534
535         /*
536          * The event receiver for my BMC, only really used at panic
537          * shutdown as a place to store this.
538          */
539         unsigned char event_receiver;
540         unsigned char event_receiver_lun;
541         unsigned char local_sel_device;
542         unsigned char local_event_generator;
543
544         /* For handling of maintenance mode. */
545         int maintenance_mode;
546         bool maintenance_mode_enable;
547         int auto_maintenance_timeout;
548         spinlock_t maintenance_mode_lock; /* Used in a timer... */
549
550         /*
551          * If we are doing maintenance on something on IPMB, extend
552          * the timeout time to avoid timeouts writing firmware and
553          * such.
554          */
555         int ipmb_maintenance_mode_timeout;
556
557         /*
558          * A cheap hack, if this is non-null and a message to an
559          * interface comes in with a NULL user, call this routine with
560          * it.  Note that the message will still be freed by the
561          * caller.  This only works on the system interface.
562          *
563          * Protected by bmc_reg_mutex.
564          */
565         void (*null_user_handler)(struct ipmi_smi *intf,
566                                   struct ipmi_recv_msg *msg);
567
568         /*
569          * When we are scanning the channels for an SMI, this will
570          * tell which channel we are scanning.
571          */
572         int curr_channel;
573
574         /* Channel information */
575         struct ipmi_channel_set *channel_list;
576         unsigned int curr_working_cset; /* First index into the following. */
577         struct ipmi_channel_set wchannels[2];
578         struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
579         bool channels_ready;
580
581         atomic_t stats[IPMI_NUM_STATS];
582
583         /*
584          * run_to_completion duplicate of smb_info, smi_info
585          * and ipmi_serial_info structures. Used to decrease numbers of
586          * parameters passed by "low" level IPMI code.
587          */
588         int run_to_completion;
589 };
590 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
591
592 static void __get_guid(struct ipmi_smi *intf);
593 static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
594 static int __ipmi_bmc_register(struct ipmi_smi *intf,
595                                struct ipmi_device_id *id,
596                                bool guid_set, guid_t *guid, int intf_num);
597 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
598
599
600 /**
601  * The driver model view of the IPMI messaging driver.
602  */
603 static struct platform_driver ipmidriver = {
604         .driver = {
605                 .name = "ipmi",
606                 .bus = &platform_bus_type
607         }
608 };
609 /*
610  * This mutex keeps us from adding the same BMC twice.
611  */
612 static DEFINE_MUTEX(ipmidriver_mutex);
613
614 static LIST_HEAD(ipmi_interfaces);
615 static DEFINE_MUTEX(ipmi_interfaces_mutex);
616 DEFINE_STATIC_SRCU(ipmi_interfaces_srcu);
617
618 /*
619  * List of watchers that want to know when smi's are added and deleted.
620  */
621 static LIST_HEAD(smi_watchers);
622 static DEFINE_MUTEX(smi_watchers_mutex);
623
624 #define ipmi_inc_stat(intf, stat) \
625         atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
626 #define ipmi_get_stat(intf, stat) \
627         ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
628
629 static const char * const addr_src_to_str[] = {
630         "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
631         "device-tree", "platform"
632 };
633
634 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
635 {
636         if (src >= SI_LAST)
637                 src = 0; /* Invalid */
638         return addr_src_to_str[src];
639 }
640 EXPORT_SYMBOL(ipmi_addr_src_to_str);
641
642 static int is_lan_addr(struct ipmi_addr *addr)
643 {
644         return addr->addr_type == IPMI_LAN_ADDR_TYPE;
645 }
646
647 static int is_ipmb_addr(struct ipmi_addr *addr)
648 {
649         return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
650 }
651
652 static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
653 {
654         return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
655 }
656
657 static void free_recv_msg_list(struct list_head *q)
658 {
659         struct ipmi_recv_msg *msg, *msg2;
660
661         list_for_each_entry_safe(msg, msg2, q, link) {
662                 list_del(&msg->link);
663                 ipmi_free_recv_msg(msg);
664         }
665 }
666
667 static void free_smi_msg_list(struct list_head *q)
668 {
669         struct ipmi_smi_msg *msg, *msg2;
670
671         list_for_each_entry_safe(msg, msg2, q, link) {
672                 list_del(&msg->link);
673                 ipmi_free_smi_msg(msg);
674         }
675 }
676
677 static void clean_up_interface_data(struct ipmi_smi *intf)
678 {
679         int              i;
680         struct cmd_rcvr  *rcvr, *rcvr2;
681         struct list_head list;
682
683         tasklet_kill(&intf->recv_tasklet);
684
685         free_smi_msg_list(&intf->waiting_rcv_msgs);
686         free_recv_msg_list(&intf->waiting_events);
687
688         /*
689          * Wholesale remove all the entries from the list in the
690          * interface and wait for RCU to know that none are in use.
691          */
692         mutex_lock(&intf->cmd_rcvrs_mutex);
693         INIT_LIST_HEAD(&list);
694         list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
695         mutex_unlock(&intf->cmd_rcvrs_mutex);
696
697         list_for_each_entry_safe(rcvr, rcvr2, &list, link)
698                 kfree(rcvr);
699
700         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
701                 if ((intf->seq_table[i].inuse)
702                                         && (intf->seq_table[i].recv_msg))
703                         ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
704         }
705 }
706
707 static void intf_free(struct kref *ref)
708 {
709         struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
710
711         clean_up_interface_data(intf);
712         kfree(intf);
713 }
714
715 struct watcher_entry {
716         int              intf_num;
717         struct ipmi_smi  *intf;
718         struct list_head link;
719 };
720
721 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
722 {
723         struct ipmi_smi *intf;
724         int index;
725
726         mutex_lock(&smi_watchers_mutex);
727
728         list_add(&watcher->link, &smi_watchers);
729
730         index = srcu_read_lock(&ipmi_interfaces_srcu);
731         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
732                 int intf_num = READ_ONCE(intf->intf_num);
733
734                 if (intf_num == -1)
735                         continue;
736                 watcher->new_smi(intf_num, intf->si_dev);
737         }
738         srcu_read_unlock(&ipmi_interfaces_srcu, index);
739
740         mutex_unlock(&smi_watchers_mutex);
741
742         return 0;
743 }
744 EXPORT_SYMBOL(ipmi_smi_watcher_register);
745
746 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
747 {
748         mutex_lock(&smi_watchers_mutex);
749         list_del(&watcher->link);
750         mutex_unlock(&smi_watchers_mutex);
751         return 0;
752 }
753 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
754
755 /*
756  * Must be called with smi_watchers_mutex held.
757  */
758 static void
759 call_smi_watchers(int i, struct device *dev)
760 {
761         struct ipmi_smi_watcher *w;
762
763         mutex_lock(&smi_watchers_mutex);
764         list_for_each_entry(w, &smi_watchers, link) {
765                 if (try_module_get(w->owner)) {
766                         w->new_smi(i, dev);
767                         module_put(w->owner);
768                 }
769         }
770         mutex_unlock(&smi_watchers_mutex);
771 }
772
773 static int
774 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
775 {
776         if (addr1->addr_type != addr2->addr_type)
777                 return 0;
778
779         if (addr1->channel != addr2->channel)
780                 return 0;
781
782         if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
783                 struct ipmi_system_interface_addr *smi_addr1
784                     = (struct ipmi_system_interface_addr *) addr1;
785                 struct ipmi_system_interface_addr *smi_addr2
786                     = (struct ipmi_system_interface_addr *) addr2;
787                 return (smi_addr1->lun == smi_addr2->lun);
788         }
789
790         if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
791                 struct ipmi_ipmb_addr *ipmb_addr1
792                     = (struct ipmi_ipmb_addr *) addr1;
793                 struct ipmi_ipmb_addr *ipmb_addr2
794                     = (struct ipmi_ipmb_addr *) addr2;
795
796                 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
797                         && (ipmb_addr1->lun == ipmb_addr2->lun));
798         }
799
800         if (is_lan_addr(addr1)) {
801                 struct ipmi_lan_addr *lan_addr1
802                         = (struct ipmi_lan_addr *) addr1;
803                 struct ipmi_lan_addr *lan_addr2
804                     = (struct ipmi_lan_addr *) addr2;
805
806                 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
807                         && (lan_addr1->local_SWID == lan_addr2->local_SWID)
808                         && (lan_addr1->session_handle
809                             == lan_addr2->session_handle)
810                         && (lan_addr1->lun == lan_addr2->lun));
811         }
812
813         return 1;
814 }
815
816 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
817 {
818         if (len < sizeof(struct ipmi_system_interface_addr))
819                 return -EINVAL;
820
821         if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
822                 if (addr->channel != IPMI_BMC_CHANNEL)
823                         return -EINVAL;
824                 return 0;
825         }
826
827         if ((addr->channel == IPMI_BMC_CHANNEL)
828             || (addr->channel >= IPMI_MAX_CHANNELS)
829             || (addr->channel < 0))
830                 return -EINVAL;
831
832         if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
833                 if (len < sizeof(struct ipmi_ipmb_addr))
834                         return -EINVAL;
835                 return 0;
836         }
837
838         if (is_lan_addr(addr)) {
839                 if (len < sizeof(struct ipmi_lan_addr))
840                         return -EINVAL;
841                 return 0;
842         }
843
844         return -EINVAL;
845 }
846 EXPORT_SYMBOL(ipmi_validate_addr);
847
848 unsigned int ipmi_addr_length(int addr_type)
849 {
850         if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
851                 return sizeof(struct ipmi_system_interface_addr);
852
853         if ((addr_type == IPMI_IPMB_ADDR_TYPE)
854                         || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
855                 return sizeof(struct ipmi_ipmb_addr);
856
857         if (addr_type == IPMI_LAN_ADDR_TYPE)
858                 return sizeof(struct ipmi_lan_addr);
859
860         return 0;
861 }
862 EXPORT_SYMBOL(ipmi_addr_length);
863
864 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
865 {
866         int rv = 0;
867
868         if (!msg->user) {
869                 /* Special handling for NULL users. */
870                 if (intf->null_user_handler) {
871                         intf->null_user_handler(intf, msg);
872                 } else {
873                         /* No handler, so give up. */
874                         rv = -EINVAL;
875                 }
876                 ipmi_free_recv_msg(msg);
877         } else if (!oops_in_progress) {
878                 /*
879                  * If we are running in the panic context, calling the
880                  * receive handler doesn't much meaning and has a deadlock
881                  * risk.  At this moment, simply skip it in that case.
882                  */
883                 int index;
884                 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
885
886                 if (user) {
887                         user->handler->ipmi_recv_hndl(msg, user->handler_data);
888                         release_ipmi_user(user, index);
889                 } else {
890                         /* User went away, give up. */
891                         ipmi_free_recv_msg(msg);
892                         rv = -EINVAL;
893                 }
894         }
895
896         return rv;
897 }
898
899 static void deliver_local_response(struct ipmi_smi *intf,
900                                    struct ipmi_recv_msg *msg)
901 {
902         if (deliver_response(intf, msg))
903                 ipmi_inc_stat(intf, unhandled_local_responses);
904         else
905                 ipmi_inc_stat(intf, handled_local_responses);
906 }
907
908 static void deliver_err_response(struct ipmi_smi *intf,
909                                  struct ipmi_recv_msg *msg, int err)
910 {
911         msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
912         msg->msg_data[0] = err;
913         msg->msg.netfn |= 1; /* Convert to a response. */
914         msg->msg.data_len = 1;
915         msg->msg.data = msg->msg_data;
916         deliver_local_response(intf, msg);
917 }
918
919 /*
920  * Find the next sequence number not being used and add the given
921  * message with the given timeout to the sequence table.  This must be
922  * called with the interface's seq_lock held.
923  */
924 static int intf_next_seq(struct ipmi_smi      *intf,
925                          struct ipmi_recv_msg *recv_msg,
926                          unsigned long        timeout,
927                          int                  retries,
928                          int                  broadcast,
929                          unsigned char        *seq,
930                          long                 *seqid)
931 {
932         int          rv = 0;
933         unsigned int i;
934
935         if (timeout == 0)
936                 timeout = default_retry_ms;
937         if (retries < 0)
938                 retries = default_max_retries;
939
940         for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
941                                         i = (i+1)%IPMI_IPMB_NUM_SEQ) {
942                 if (!intf->seq_table[i].inuse)
943                         break;
944         }
945
946         if (!intf->seq_table[i].inuse) {
947                 intf->seq_table[i].recv_msg = recv_msg;
948
949                 /*
950                  * Start with the maximum timeout, when the send response
951                  * comes in we will start the real timer.
952                  */
953                 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
954                 intf->seq_table[i].orig_timeout = timeout;
955                 intf->seq_table[i].retries_left = retries;
956                 intf->seq_table[i].broadcast = broadcast;
957                 intf->seq_table[i].inuse = 1;
958                 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
959                 *seq = i;
960                 *seqid = intf->seq_table[i].seqid;
961                 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
962                 need_waiter(intf);
963         } else {
964                 rv = -EAGAIN;
965         }
966
967         return rv;
968 }
969
970 /*
971  * Return the receive message for the given sequence number and
972  * release the sequence number so it can be reused.  Some other data
973  * is passed in to be sure the message matches up correctly (to help
974  * guard against message coming in after their timeout and the
975  * sequence number being reused).
976  */
977 static int intf_find_seq(struct ipmi_smi      *intf,
978                          unsigned char        seq,
979                          short                channel,
980                          unsigned char        cmd,
981                          unsigned char        netfn,
982                          struct ipmi_addr     *addr,
983                          struct ipmi_recv_msg **recv_msg)
984 {
985         int           rv = -ENODEV;
986         unsigned long flags;
987
988         if (seq >= IPMI_IPMB_NUM_SEQ)
989                 return -EINVAL;
990
991         spin_lock_irqsave(&intf->seq_lock, flags);
992         if (intf->seq_table[seq].inuse) {
993                 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
994
995                 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
996                                 && (msg->msg.netfn == netfn)
997                                 && (ipmi_addr_equal(addr, &msg->addr))) {
998                         *recv_msg = msg;
999                         intf->seq_table[seq].inuse = 0;
1000                         rv = 0;
1001                 }
1002         }
1003         spin_unlock_irqrestore(&intf->seq_lock, flags);
1004
1005         return rv;
1006 }
1007
1008
1009 /* Start the timer for a specific sequence table entry. */
1010 static int intf_start_seq_timer(struct ipmi_smi *intf,
1011                                 long       msgid)
1012 {
1013         int           rv = -ENODEV;
1014         unsigned long flags;
1015         unsigned char seq;
1016         unsigned long seqid;
1017
1018
1019         GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1020
1021         spin_lock_irqsave(&intf->seq_lock, flags);
1022         /*
1023          * We do this verification because the user can be deleted
1024          * while a message is outstanding.
1025          */
1026         if ((intf->seq_table[seq].inuse)
1027                                 && (intf->seq_table[seq].seqid == seqid)) {
1028                 struct seq_table *ent = &intf->seq_table[seq];
1029                 ent->timeout = ent->orig_timeout;
1030                 rv = 0;
1031         }
1032         spin_unlock_irqrestore(&intf->seq_lock, flags);
1033
1034         return rv;
1035 }
1036
1037 /* Got an error for the send message for a specific sequence number. */
1038 static int intf_err_seq(struct ipmi_smi *intf,
1039                         long         msgid,
1040                         unsigned int err)
1041 {
1042         int                  rv = -ENODEV;
1043         unsigned long        flags;
1044         unsigned char        seq;
1045         unsigned long        seqid;
1046         struct ipmi_recv_msg *msg = NULL;
1047
1048
1049         GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1050
1051         spin_lock_irqsave(&intf->seq_lock, flags);
1052         /*
1053          * We do this verification because the user can be deleted
1054          * while a message is outstanding.
1055          */
1056         if ((intf->seq_table[seq].inuse)
1057                                 && (intf->seq_table[seq].seqid == seqid)) {
1058                 struct seq_table *ent = &intf->seq_table[seq];
1059
1060                 ent->inuse = 0;
1061                 msg = ent->recv_msg;
1062                 rv = 0;
1063         }
1064         spin_unlock_irqrestore(&intf->seq_lock, flags);
1065
1066         if (msg)
1067                 deliver_err_response(intf, msg, err);
1068
1069         return rv;
1070 }
1071
1072
1073 int ipmi_create_user(unsigned int          if_num,
1074                      const struct ipmi_user_hndl *handler,
1075                      void                  *handler_data,
1076                      struct ipmi_user      **user)
1077 {
1078         unsigned long flags;
1079         struct ipmi_user *new_user;
1080         int           rv = 0, index;
1081         struct ipmi_smi *intf;
1082
1083         /*
1084          * There is no module usecount here, because it's not
1085          * required.  Since this can only be used by and called from
1086          * other modules, they will implicitly use this module, and
1087          * thus this can't be removed unless the other modules are
1088          * removed.
1089          */
1090
1091         if (handler == NULL)
1092                 return -EINVAL;
1093
1094         /*
1095          * Make sure the driver is actually initialized, this handles
1096          * problems with initialization order.
1097          */
1098         if (!initialized) {
1099                 rv = ipmi_init_msghandler();
1100                 if (rv)
1101                         return rv;
1102
1103                 /*
1104                  * The init code doesn't return an error if it was turned
1105                  * off, but it won't initialize.  Check that.
1106                  */
1107                 if (!initialized)
1108                         return -ENODEV;
1109         }
1110
1111         new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
1112         if (!new_user)
1113                 return -ENOMEM;
1114
1115         index = srcu_read_lock(&ipmi_interfaces_srcu);
1116         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1117                 if (intf->intf_num == if_num)
1118                         goto found;
1119         }
1120         /* Not found, return an error */
1121         rv = -EINVAL;
1122         goto out_kfree;
1123
1124  found:
1125         rv = init_srcu_struct(&new_user->release_barrier);
1126         if (rv)
1127                 goto out_kfree;
1128
1129         /* Note that each existing user holds a refcount to the interface. */
1130         kref_get(&intf->refcount);
1131
1132         kref_init(&new_user->refcount);
1133         new_user->handler = handler;
1134         new_user->handler_data = handler_data;
1135         new_user->intf = intf;
1136         new_user->gets_events = false;
1137
1138         rcu_assign_pointer(new_user->self, new_user);
1139         spin_lock_irqsave(&intf->seq_lock, flags);
1140         list_add_rcu(&new_user->link, &intf->users);
1141         spin_unlock_irqrestore(&intf->seq_lock, flags);
1142         if (handler->ipmi_watchdog_pretimeout) {
1143                 /* User wants pretimeouts, so make sure to watch for them. */
1144                 if (atomic_inc_return(&intf->event_waiters) == 1)
1145                         need_waiter(intf);
1146         }
1147         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1148         *user = new_user;
1149         return 0;
1150
1151 out_kfree:
1152         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1153         kfree(new_user);
1154         return rv;
1155 }
1156 EXPORT_SYMBOL(ipmi_create_user);
1157
1158 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1159 {
1160         int rv, index;
1161         struct ipmi_smi *intf;
1162
1163         index = srcu_read_lock(&ipmi_interfaces_srcu);
1164         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1165                 if (intf->intf_num == if_num)
1166                         goto found;
1167         }
1168         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1169
1170         /* Not found, return an error */
1171         return -EINVAL;
1172
1173 found:
1174         if (!intf->handlers->get_smi_info)
1175                 rv = -ENOTTY;
1176         else
1177                 rv = intf->handlers->get_smi_info(intf->send_info, data);
1178         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1179
1180         return rv;
1181 }
1182 EXPORT_SYMBOL(ipmi_get_smi_info);
1183
1184 static void free_user(struct kref *ref)
1185 {
1186         struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1187         cleanup_srcu_struct(&user->release_barrier);
1188         kfree(user);
1189 }
1190
1191 static void _ipmi_destroy_user(struct ipmi_user *user)
1192 {
1193         struct ipmi_smi  *intf = user->intf;
1194         int              i;
1195         unsigned long    flags;
1196         struct cmd_rcvr  *rcvr;
1197         struct cmd_rcvr  *rcvrs = NULL;
1198
1199         if (!acquire_ipmi_user(user, &i)) {
1200                 /*
1201                  * The user has already been cleaned up, just make sure
1202                  * nothing is using it and return.
1203                  */
1204                 synchronize_srcu(&user->release_barrier);
1205                 return;
1206         }
1207
1208         rcu_assign_pointer(user->self, NULL);
1209         release_ipmi_user(user, i);
1210
1211         synchronize_srcu(&user->release_barrier);
1212
1213         if (user->handler->shutdown)
1214                 user->handler->shutdown(user->handler_data);
1215
1216         if (user->handler->ipmi_watchdog_pretimeout)
1217                 atomic_dec(&intf->event_waiters);
1218
1219         if (user->gets_events)
1220                 atomic_dec(&intf->event_waiters);
1221
1222         /* Remove the user from the interface's sequence table. */
1223         spin_lock_irqsave(&intf->seq_lock, flags);
1224         list_del_rcu(&user->link);
1225
1226         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1227                 if (intf->seq_table[i].inuse
1228                     && (intf->seq_table[i].recv_msg->user == user)) {
1229                         intf->seq_table[i].inuse = 0;
1230                         ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1231                 }
1232         }
1233         spin_unlock_irqrestore(&intf->seq_lock, flags);
1234
1235         /*
1236          * Remove the user from the command receiver's table.  First
1237          * we build a list of everything (not using the standard link,
1238          * since other things may be using it till we do
1239          * synchronize_srcu()) then free everything in that list.
1240          */
1241         mutex_lock(&intf->cmd_rcvrs_mutex);
1242         list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1243                 if (rcvr->user == user) {
1244                         list_del_rcu(&rcvr->link);
1245                         rcvr->next = rcvrs;
1246                         rcvrs = rcvr;
1247                 }
1248         }
1249         mutex_unlock(&intf->cmd_rcvrs_mutex);
1250         synchronize_rcu();
1251         while (rcvrs) {
1252                 rcvr = rcvrs;
1253                 rcvrs = rcvr->next;
1254                 kfree(rcvr);
1255         }
1256
1257         kref_put(&intf->refcount, intf_free);
1258 }
1259
1260 int ipmi_destroy_user(struct ipmi_user *user)
1261 {
1262         _ipmi_destroy_user(user);
1263
1264         kref_put(&user->refcount, free_user);
1265
1266         return 0;
1267 }
1268 EXPORT_SYMBOL(ipmi_destroy_user);
1269
1270 int ipmi_get_version(struct ipmi_user *user,
1271                      unsigned char *major,
1272                      unsigned char *minor)
1273 {
1274         struct ipmi_device_id id;
1275         int rv, index;
1276
1277         user = acquire_ipmi_user(user, &index);
1278         if (!user)
1279                 return -ENODEV;
1280
1281         rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1282         if (!rv) {
1283                 *major = ipmi_version_major(&id);
1284                 *minor = ipmi_version_minor(&id);
1285         }
1286         release_ipmi_user(user, index);
1287
1288         return rv;
1289 }
1290 EXPORT_SYMBOL(ipmi_get_version);
1291
1292 int ipmi_set_my_address(struct ipmi_user *user,
1293                         unsigned int  channel,
1294                         unsigned char address)
1295 {
1296         int index, rv = 0;
1297
1298         user = acquire_ipmi_user(user, &index);
1299         if (!user)
1300                 return -ENODEV;
1301
1302         if (channel >= IPMI_MAX_CHANNELS) {
1303                 rv = -EINVAL;
1304         } else {
1305                 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1306                 user->intf->addrinfo[channel].address = address;
1307         }
1308         release_ipmi_user(user, index);
1309
1310         return rv;
1311 }
1312 EXPORT_SYMBOL(ipmi_set_my_address);
1313
1314 int ipmi_get_my_address(struct ipmi_user *user,
1315                         unsigned int  channel,
1316                         unsigned char *address)
1317 {
1318         int index, rv = 0;
1319
1320         user = acquire_ipmi_user(user, &index);
1321         if (!user)
1322                 return -ENODEV;
1323
1324         if (channel >= IPMI_MAX_CHANNELS) {
1325                 rv = -EINVAL;
1326         } else {
1327                 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1328                 *address = user->intf->addrinfo[channel].address;
1329         }
1330         release_ipmi_user(user, index);
1331
1332         return rv;
1333 }
1334 EXPORT_SYMBOL(ipmi_get_my_address);
1335
1336 int ipmi_set_my_LUN(struct ipmi_user *user,
1337                     unsigned int  channel,
1338                     unsigned char LUN)
1339 {
1340         int index, rv = 0;
1341
1342         user = acquire_ipmi_user(user, &index);
1343         if (!user)
1344                 return -ENODEV;
1345
1346         if (channel >= IPMI_MAX_CHANNELS) {
1347                 rv = -EINVAL;
1348         } else {
1349                 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1350                 user->intf->addrinfo[channel].lun = LUN & 0x3;
1351         }
1352         release_ipmi_user(user, index);
1353
1354         return rv;
1355 }
1356 EXPORT_SYMBOL(ipmi_set_my_LUN);
1357
1358 int ipmi_get_my_LUN(struct ipmi_user *user,
1359                     unsigned int  channel,
1360                     unsigned char *address)
1361 {
1362         int index, rv = 0;
1363
1364         user = acquire_ipmi_user(user, &index);
1365         if (!user)
1366                 return -ENODEV;
1367
1368         if (channel >= IPMI_MAX_CHANNELS) {
1369                 rv = -EINVAL;
1370         } else {
1371                 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1372                 *address = user->intf->addrinfo[channel].lun;
1373         }
1374         release_ipmi_user(user, index);
1375
1376         return rv;
1377 }
1378 EXPORT_SYMBOL(ipmi_get_my_LUN);
1379
1380 int ipmi_get_maintenance_mode(struct ipmi_user *user)
1381 {
1382         int mode, index;
1383         unsigned long flags;
1384
1385         user = acquire_ipmi_user(user, &index);
1386         if (!user)
1387                 return -ENODEV;
1388
1389         spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1390         mode = user->intf->maintenance_mode;
1391         spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1392         release_ipmi_user(user, index);
1393
1394         return mode;
1395 }
1396 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1397
1398 static void maintenance_mode_update(struct ipmi_smi *intf)
1399 {
1400         if (intf->handlers->set_maintenance_mode)
1401                 intf->handlers->set_maintenance_mode(
1402                         intf->send_info, intf->maintenance_mode_enable);
1403 }
1404
1405 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
1406 {
1407         int rv = 0, index;
1408         unsigned long flags;
1409         struct ipmi_smi *intf = user->intf;
1410
1411         user = acquire_ipmi_user(user, &index);
1412         if (!user)
1413                 return -ENODEV;
1414
1415         spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1416         if (intf->maintenance_mode != mode) {
1417                 switch (mode) {
1418                 case IPMI_MAINTENANCE_MODE_AUTO:
1419                         intf->maintenance_mode_enable
1420                                 = (intf->auto_maintenance_timeout > 0);
1421                         break;
1422
1423                 case IPMI_MAINTENANCE_MODE_OFF:
1424                         intf->maintenance_mode_enable = false;
1425                         break;
1426
1427                 case IPMI_MAINTENANCE_MODE_ON:
1428                         intf->maintenance_mode_enable = true;
1429                         break;
1430
1431                 default:
1432                         rv = -EINVAL;
1433                         goto out_unlock;
1434                 }
1435                 intf->maintenance_mode = mode;
1436
1437                 maintenance_mode_update(intf);
1438         }
1439  out_unlock:
1440         spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1441         release_ipmi_user(user, index);
1442
1443         return rv;
1444 }
1445 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1446
1447 int ipmi_set_gets_events(struct ipmi_user *user, bool val)
1448 {
1449         unsigned long        flags;
1450         struct ipmi_smi      *intf = user->intf;
1451         struct ipmi_recv_msg *msg, *msg2;
1452         struct list_head     msgs;
1453         int index;
1454
1455         user = acquire_ipmi_user(user, &index);
1456         if (!user)
1457                 return -ENODEV;
1458
1459         INIT_LIST_HEAD(&msgs);
1460
1461         spin_lock_irqsave(&intf->events_lock, flags);
1462         if (user->gets_events == val)
1463                 goto out;
1464
1465         user->gets_events = val;
1466
1467         if (val) {
1468                 if (atomic_inc_return(&intf->event_waiters) == 1)
1469                         need_waiter(intf);
1470         } else {
1471                 atomic_dec(&intf->event_waiters);
1472         }
1473
1474         if (intf->delivering_events)
1475                 /*
1476                  * Another thread is delivering events for this, so
1477                  * let it handle any new events.
1478                  */
1479                 goto out;
1480
1481         /* Deliver any queued events. */
1482         while (user->gets_events && !list_empty(&intf->waiting_events)) {
1483                 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1484                         list_move_tail(&msg->link, &msgs);
1485                 intf->waiting_events_count = 0;
1486                 if (intf->event_msg_printed) {
1487                         dev_warn(intf->si_dev, "Event queue no longer full\n");
1488                         intf->event_msg_printed = 0;
1489                 }
1490
1491                 intf->delivering_events = 1;
1492                 spin_unlock_irqrestore(&intf->events_lock, flags);
1493
1494                 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1495                         msg->user = user;
1496                         kref_get(&user->refcount);
1497                         deliver_local_response(intf, msg);
1498                 }
1499
1500                 spin_lock_irqsave(&intf->events_lock, flags);
1501                 intf->delivering_events = 0;
1502         }
1503
1504  out:
1505         spin_unlock_irqrestore(&intf->events_lock, flags);
1506         release_ipmi_user(user, index);
1507
1508         return 0;
1509 }
1510 EXPORT_SYMBOL(ipmi_set_gets_events);
1511
1512 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1513                                       unsigned char netfn,
1514                                       unsigned char cmd,
1515                                       unsigned char chan)
1516 {
1517         struct cmd_rcvr *rcvr;
1518
1519         list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1520                 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1521                                         && (rcvr->chans & (1 << chan)))
1522                         return rcvr;
1523         }
1524         return NULL;
1525 }
1526
1527 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1528                                  unsigned char netfn,
1529                                  unsigned char cmd,
1530                                  unsigned int  chans)
1531 {
1532         struct cmd_rcvr *rcvr;
1533
1534         list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1535                 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1536                                         && (rcvr->chans & chans))
1537                         return 0;
1538         }
1539         return 1;
1540 }
1541
1542 int ipmi_register_for_cmd(struct ipmi_user *user,
1543                           unsigned char netfn,
1544                           unsigned char cmd,
1545                           unsigned int  chans)
1546 {
1547         struct ipmi_smi *intf = user->intf;
1548         struct cmd_rcvr *rcvr;
1549         int rv = 0, index;
1550
1551         user = acquire_ipmi_user(user, &index);
1552         if (!user)
1553                 return -ENODEV;
1554
1555         rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1556         if (!rcvr) {
1557                 rv = -ENOMEM;
1558                 goto out_release;
1559         }
1560         rcvr->cmd = cmd;
1561         rcvr->netfn = netfn;
1562         rcvr->chans = chans;
1563         rcvr->user = user;
1564
1565         mutex_lock(&intf->cmd_rcvrs_mutex);
1566         /* Make sure the command/netfn is not already registered. */
1567         if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1568                 rv = -EBUSY;
1569                 goto out_unlock;
1570         }
1571
1572         if (atomic_inc_return(&intf->event_waiters) == 1)
1573                 need_waiter(intf);
1574
1575         list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1576
1577 out_unlock:
1578         mutex_unlock(&intf->cmd_rcvrs_mutex);
1579         if (rv)
1580                 kfree(rcvr);
1581 out_release:
1582         release_ipmi_user(user, index);
1583
1584         return rv;
1585 }
1586 EXPORT_SYMBOL(ipmi_register_for_cmd);
1587
1588 int ipmi_unregister_for_cmd(struct ipmi_user *user,
1589                             unsigned char netfn,
1590                             unsigned char cmd,
1591                             unsigned int  chans)
1592 {
1593         struct ipmi_smi *intf = user->intf;
1594         struct cmd_rcvr *rcvr;
1595         struct cmd_rcvr *rcvrs = NULL;
1596         int i, rv = -ENOENT, index;
1597
1598         user = acquire_ipmi_user(user, &index);
1599         if (!user)
1600                 return -ENODEV;
1601
1602         mutex_lock(&intf->cmd_rcvrs_mutex);
1603         for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1604                 if (((1 << i) & chans) == 0)
1605                         continue;
1606                 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1607                 if (rcvr == NULL)
1608                         continue;
1609                 if (rcvr->user == user) {
1610                         rv = 0;
1611                         rcvr->chans &= ~chans;
1612                         if (rcvr->chans == 0) {
1613                                 list_del_rcu(&rcvr->link);
1614                                 rcvr->next = rcvrs;
1615                                 rcvrs = rcvr;
1616                         }
1617                 }
1618         }
1619         mutex_unlock(&intf->cmd_rcvrs_mutex);
1620         synchronize_rcu();
1621         release_ipmi_user(user, index);
1622         while (rcvrs) {
1623                 atomic_dec(&intf->event_waiters);
1624                 rcvr = rcvrs;
1625                 rcvrs = rcvr->next;
1626                 kfree(rcvr);
1627         }
1628
1629         return rv;
1630 }
1631 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1632
1633 static unsigned char
1634 ipmb_checksum(unsigned char *data, int size)
1635 {
1636         unsigned char csum = 0;
1637
1638         for (; size > 0; size--, data++)
1639                 csum += *data;
1640
1641         return -csum;
1642 }
1643
1644 static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
1645                                    struct kernel_ipmi_msg *msg,
1646                                    struct ipmi_ipmb_addr *ipmb_addr,
1647                                    long                  msgid,
1648                                    unsigned char         ipmb_seq,
1649                                    int                   broadcast,
1650                                    unsigned char         source_address,
1651                                    unsigned char         source_lun)
1652 {
1653         int i = broadcast;
1654
1655         /* Format the IPMB header data. */
1656         smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1657         smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1658         smi_msg->data[2] = ipmb_addr->channel;
1659         if (broadcast)
1660                 smi_msg->data[3] = 0;
1661         smi_msg->data[i+3] = ipmb_addr->slave_addr;
1662         smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1663         smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
1664         smi_msg->data[i+6] = source_address;
1665         smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1666         smi_msg->data[i+8] = msg->cmd;
1667
1668         /* Now tack on the data to the message. */
1669         if (msg->data_len > 0)
1670                 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
1671         smi_msg->data_size = msg->data_len + 9;
1672
1673         /* Now calculate the checksum and tack it on. */
1674         smi_msg->data[i+smi_msg->data_size]
1675                 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
1676
1677         /*
1678          * Add on the checksum size and the offset from the
1679          * broadcast.
1680          */
1681         smi_msg->data_size += 1 + i;
1682
1683         smi_msg->msgid = msgid;
1684 }
1685
1686 static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
1687                                   struct kernel_ipmi_msg *msg,
1688                                   struct ipmi_lan_addr  *lan_addr,
1689                                   long                  msgid,
1690                                   unsigned char         ipmb_seq,
1691                                   unsigned char         source_lun)
1692 {
1693         /* Format the IPMB header data. */
1694         smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1695         smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1696         smi_msg->data[2] = lan_addr->channel;
1697         smi_msg->data[3] = lan_addr->session_handle;
1698         smi_msg->data[4] = lan_addr->remote_SWID;
1699         smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1700         smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
1701         smi_msg->data[7] = lan_addr->local_SWID;
1702         smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1703         smi_msg->data[9] = msg->cmd;
1704
1705         /* Now tack on the data to the message. */
1706         if (msg->data_len > 0)
1707                 memcpy(&smi_msg->data[10], msg->data, msg->data_len);
1708         smi_msg->data_size = msg->data_len + 10;
1709
1710         /* Now calculate the checksum and tack it on. */
1711         smi_msg->data[smi_msg->data_size]
1712                 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
1713
1714         /*
1715          * Add on the checksum size and the offset from the
1716          * broadcast.
1717          */
1718         smi_msg->data_size += 1;
1719
1720         smi_msg->msgid = msgid;
1721 }
1722
1723 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1724                                              struct ipmi_smi_msg *smi_msg,
1725                                              int priority)
1726 {
1727         if (intf->curr_msg) {
1728                 if (priority > 0)
1729                         list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1730                 else
1731                         list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1732                 smi_msg = NULL;
1733         } else {
1734                 intf->curr_msg = smi_msg;
1735         }
1736
1737         return smi_msg;
1738 }
1739
1740
1741 static void smi_send(struct ipmi_smi *intf,
1742                      const struct ipmi_smi_handlers *handlers,
1743                      struct ipmi_smi_msg *smi_msg, int priority)
1744 {
1745         int run_to_completion = intf->run_to_completion;
1746
1747         if (run_to_completion) {
1748                 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1749         } else {
1750                 unsigned long flags;
1751
1752                 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1753                 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1754                 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1755         }
1756
1757         if (smi_msg)
1758                 handlers->sender(intf->send_info, smi_msg);
1759 }
1760
1761 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
1762 {
1763         return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1764                  && ((msg->cmd == IPMI_COLD_RESET_CMD)
1765                      || (msg->cmd == IPMI_WARM_RESET_CMD)))
1766                 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
1767 }
1768
1769 static int i_ipmi_req_sysintf(struct ipmi_smi        *intf,
1770                               struct ipmi_addr       *addr,
1771                               long                   msgid,
1772                               struct kernel_ipmi_msg *msg,
1773                               struct ipmi_smi_msg    *smi_msg,
1774                               struct ipmi_recv_msg   *recv_msg,
1775                               int                    retries,
1776                               unsigned int           retry_time_ms)
1777 {
1778         struct ipmi_system_interface_addr *smi_addr;
1779
1780         if (msg->netfn & 1)
1781                 /* Responses are not allowed to the SMI. */
1782                 return -EINVAL;
1783
1784         smi_addr = (struct ipmi_system_interface_addr *) addr;
1785         if (smi_addr->lun > 3) {
1786                 ipmi_inc_stat(intf, sent_invalid_commands);
1787                 return -EINVAL;
1788         }
1789
1790         memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1791
1792         if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1793             && ((msg->cmd == IPMI_SEND_MSG_CMD)
1794                 || (msg->cmd == IPMI_GET_MSG_CMD)
1795                 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1796                 /*
1797                  * We don't let the user do these, since we manage
1798                  * the sequence numbers.
1799                  */
1800                 ipmi_inc_stat(intf, sent_invalid_commands);
1801                 return -EINVAL;
1802         }
1803
1804         if (is_maintenance_mode_cmd(msg)) {
1805                 unsigned long flags;
1806
1807                 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1808                 intf->auto_maintenance_timeout
1809                         = maintenance_mode_timeout_ms;
1810                 if (!intf->maintenance_mode
1811                     && !intf->maintenance_mode_enable) {
1812                         intf->maintenance_mode_enable = true;
1813                         maintenance_mode_update(intf);
1814                 }
1815                 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1816                                        flags);
1817         }
1818
1819         if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
1820                 ipmi_inc_stat(intf, sent_invalid_commands);
1821                 return -EMSGSIZE;
1822         }
1823
1824         smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1825         smi_msg->data[1] = msg->cmd;
1826         smi_msg->msgid = msgid;
1827         smi_msg->user_data = recv_msg;
1828         if (msg->data_len > 0)
1829                 memcpy(&smi_msg->data[2], msg->data, msg->data_len);
1830         smi_msg->data_size = msg->data_len + 2;
1831         ipmi_inc_stat(intf, sent_local_commands);
1832
1833         return 0;
1834 }
1835
1836 static int i_ipmi_req_ipmb(struct ipmi_smi        *intf,
1837                            struct ipmi_addr       *addr,
1838                            long                   msgid,
1839                            struct kernel_ipmi_msg *msg,
1840                            struct ipmi_smi_msg    *smi_msg,
1841                            struct ipmi_recv_msg   *recv_msg,
1842                            unsigned char          source_address,
1843                            unsigned char          source_lun,
1844                            int                    retries,
1845                            unsigned int           retry_time_ms)
1846 {
1847         struct ipmi_ipmb_addr *ipmb_addr;
1848         unsigned char ipmb_seq;
1849         long seqid;
1850         int broadcast = 0;
1851         struct ipmi_channel *chans;
1852         int rv = 0;
1853
1854         if (addr->channel >= IPMI_MAX_CHANNELS) {
1855                 ipmi_inc_stat(intf, sent_invalid_commands);
1856                 return -EINVAL;
1857         }
1858
1859         chans = READ_ONCE(intf->channel_list)->c;
1860
1861         if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
1862                 ipmi_inc_stat(intf, sent_invalid_commands);
1863                 return -EINVAL;
1864         }
1865
1866         if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1867                 /*
1868                  * Broadcasts add a zero at the beginning of the
1869                  * message, but otherwise is the same as an IPMB
1870                  * address.
1871                  */
1872                 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1873                 broadcast = 1;
1874                 retries = 0; /* Don't retry broadcasts. */
1875         }
1876
1877         /*
1878          * 9 for the header and 1 for the checksum, plus
1879          * possibly one for the broadcast.
1880          */
1881         if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1882                 ipmi_inc_stat(intf, sent_invalid_commands);
1883                 return -EMSGSIZE;
1884         }
1885
1886         ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1887         if (ipmb_addr->lun > 3) {
1888                 ipmi_inc_stat(intf, sent_invalid_commands);
1889                 return -EINVAL;
1890         }
1891
1892         memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1893
1894         if (recv_msg->msg.netfn & 0x1) {
1895                 /*
1896                  * It's a response, so use the user's sequence
1897                  * from msgid.
1898                  */
1899                 ipmi_inc_stat(intf, sent_ipmb_responses);
1900                 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1901                                 msgid, broadcast,
1902                                 source_address, source_lun);
1903
1904                 /*
1905                  * Save the receive message so we can use it
1906                  * to deliver the response.
1907                  */
1908                 smi_msg->user_data = recv_msg;
1909         } else {
1910                 /* It's a command, so get a sequence for it. */
1911                 unsigned long flags;
1912
1913                 spin_lock_irqsave(&intf->seq_lock, flags);
1914
1915                 if (is_maintenance_mode_cmd(msg))
1916                         intf->ipmb_maintenance_mode_timeout =
1917                                 maintenance_mode_timeout_ms;
1918
1919                 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
1920                         /* Different default in maintenance mode */
1921                         retry_time_ms = default_maintenance_retry_ms;
1922
1923                 /*
1924                  * Create a sequence number with a 1 second
1925                  * timeout and 4 retries.
1926                  */
1927                 rv = intf_next_seq(intf,
1928                                    recv_msg,
1929                                    retry_time_ms,
1930                                    retries,
1931                                    broadcast,
1932                                    &ipmb_seq,
1933                                    &seqid);
1934                 if (rv)
1935                         /*
1936                          * We have used up all the sequence numbers,
1937                          * probably, so abort.
1938                          */
1939                         goto out_err;
1940
1941                 ipmi_inc_stat(intf, sent_ipmb_commands);
1942
1943                 /*
1944                  * Store the sequence number in the message,
1945                  * so that when the send message response
1946                  * comes back we can start the timer.
1947                  */
1948                 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1949                                 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1950                                 ipmb_seq, broadcast,
1951                                 source_address, source_lun);
1952
1953                 /*
1954                  * Copy the message into the recv message data, so we
1955                  * can retransmit it later if necessary.
1956                  */
1957                 memcpy(recv_msg->msg_data, smi_msg->data,
1958                        smi_msg->data_size);
1959                 recv_msg->msg.data = recv_msg->msg_data;
1960                 recv_msg->msg.data_len = smi_msg->data_size;
1961
1962                 /*
1963                  * We don't unlock until here, because we need
1964                  * to copy the completed message into the
1965                  * recv_msg before we release the lock.
1966                  * Otherwise, race conditions may bite us.  I
1967                  * know that's pretty paranoid, but I prefer
1968                  * to be correct.
1969                  */
1970 out_err:
1971                 spin_unlock_irqrestore(&intf->seq_lock, flags);
1972         }
1973
1974         return rv;
1975 }
1976
1977 static int i_ipmi_req_lan(struct ipmi_smi        *intf,
1978                           struct ipmi_addr       *addr,
1979                           long                   msgid,
1980                           struct kernel_ipmi_msg *msg,
1981                           struct ipmi_smi_msg    *smi_msg,
1982                           struct ipmi_recv_msg   *recv_msg,
1983                           unsigned char          source_lun,
1984                           int                    retries,
1985                           unsigned int           retry_time_ms)
1986 {
1987         struct ipmi_lan_addr  *lan_addr;
1988         unsigned char ipmb_seq;
1989         long seqid;
1990         struct ipmi_channel *chans;
1991         int rv = 0;
1992
1993         if (addr->channel >= IPMI_MAX_CHANNELS) {
1994                 ipmi_inc_stat(intf, sent_invalid_commands);
1995                 return -EINVAL;
1996         }
1997
1998         chans = READ_ONCE(intf->channel_list)->c;
1999
2000         if ((chans[addr->channel].medium
2001                                 != IPMI_CHANNEL_MEDIUM_8023LAN)
2002                         && (chans[addr->channel].medium
2003                             != IPMI_CHANNEL_MEDIUM_ASYNC)) {
2004                 ipmi_inc_stat(intf, sent_invalid_commands);
2005                 return -EINVAL;
2006         }
2007
2008         /* 11 for the header and 1 for the checksum. */
2009         if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
2010                 ipmi_inc_stat(intf, sent_invalid_commands);
2011                 return -EMSGSIZE;
2012         }
2013
2014         lan_addr = (struct ipmi_lan_addr *) addr;
2015         if (lan_addr->lun > 3) {
2016                 ipmi_inc_stat(intf, sent_invalid_commands);
2017                 return -EINVAL;
2018         }
2019
2020         memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
2021
2022         if (recv_msg->msg.netfn & 0x1) {
2023                 /*
2024                  * It's a response, so use the user's sequence
2025                  * from msgid.
2026                  */
2027                 ipmi_inc_stat(intf, sent_lan_responses);
2028                 format_lan_msg(smi_msg, msg, lan_addr, msgid,
2029                                msgid, source_lun);
2030
2031                 /*
2032                  * Save the receive message so we can use it
2033                  * to deliver the response.
2034                  */
2035                 smi_msg->user_data = recv_msg;
2036         } else {
2037                 /* It's a command, so get a sequence for it. */
2038                 unsigned long flags;
2039
2040                 spin_lock_irqsave(&intf->seq_lock, flags);
2041
2042                 /*
2043                  * Create a sequence number with a 1 second
2044                  * timeout and 4 retries.
2045                  */
2046                 rv = intf_next_seq(intf,
2047                                    recv_msg,
2048                                    retry_time_ms,
2049                                    retries,
2050                                    0,
2051                                    &ipmb_seq,
2052                                    &seqid);
2053                 if (rv)
2054                         /*
2055                          * We have used up all the sequence numbers,
2056                          * probably, so abort.
2057                          */
2058                         goto out_err;
2059
2060                 ipmi_inc_stat(intf, sent_lan_commands);
2061
2062                 /*
2063                  * Store the sequence number in the message,
2064                  * so that when the send message response
2065                  * comes back we can start the timer.
2066                  */
2067                 format_lan_msg(smi_msg, msg, lan_addr,
2068                                STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2069                                ipmb_seq, source_lun);
2070
2071                 /*
2072                  * Copy the message into the recv message data, so we
2073                  * can retransmit it later if necessary.
2074                  */
2075                 memcpy(recv_msg->msg_data, smi_msg->data,
2076                        smi_msg->data_size);
2077                 recv_msg->msg.data = recv_msg->msg_data;
2078                 recv_msg->msg.data_len = smi_msg->data_size;
2079
2080                 /*
2081                  * We don't unlock until here, because we need
2082                  * to copy the completed message into the
2083                  * recv_msg before we release the lock.
2084                  * Otherwise, race conditions may bite us.  I
2085                  * know that's pretty paranoid, but I prefer
2086                  * to be correct.
2087                  */
2088 out_err:
2089                 spin_unlock_irqrestore(&intf->seq_lock, flags);
2090         }
2091
2092         return rv;
2093 }
2094
2095 /*
2096  * Separate from ipmi_request so that the user does not have to be
2097  * supplied in certain circumstances (mainly at panic time).  If
2098  * messages are supplied, they will be freed, even if an error
2099  * occurs.
2100  */
2101 static int i_ipmi_request(struct ipmi_user     *user,
2102                           struct ipmi_smi      *intf,
2103                           struct ipmi_addr     *addr,
2104                           long                 msgid,
2105                           struct kernel_ipmi_msg *msg,
2106                           void                 *user_msg_data,
2107                           void                 *supplied_smi,
2108                           struct ipmi_recv_msg *supplied_recv,
2109                           int                  priority,
2110                           unsigned char        source_address,
2111                           unsigned char        source_lun,
2112                           int                  retries,
2113                           unsigned int         retry_time_ms)
2114 {
2115         struct ipmi_smi_msg *smi_msg;
2116         struct ipmi_recv_msg *recv_msg;
2117         int rv = 0;
2118
2119         if (supplied_recv)
2120                 recv_msg = supplied_recv;
2121         else {
2122                 recv_msg = ipmi_alloc_recv_msg();
2123                 if (recv_msg == NULL) {
2124                         rv = -ENOMEM;
2125                         goto out;
2126                 }
2127         }
2128         recv_msg->user_msg_data = user_msg_data;
2129
2130         if (supplied_smi)
2131                 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
2132         else {
2133                 smi_msg = ipmi_alloc_smi_msg();
2134                 if (smi_msg == NULL) {
2135                         ipmi_free_recv_msg(recv_msg);
2136                         rv = -ENOMEM;
2137                         goto out;
2138                 }
2139         }
2140
2141         rcu_read_lock();
2142         if (intf->in_shutdown) {
2143                 rv = -ENODEV;
2144                 goto out_err;
2145         }
2146
2147         recv_msg->user = user;
2148         if (user)
2149                 /* The put happens when the message is freed. */
2150                 kref_get(&user->refcount);
2151         recv_msg->msgid = msgid;
2152         /*
2153          * Store the message to send in the receive message so timeout
2154          * responses can get the proper response data.
2155          */
2156         recv_msg->msg = *msg;
2157
2158         if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2159                 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2160                                         recv_msg, retries, retry_time_ms);
2161         } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
2162                 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2163                                      source_address, source_lun,
2164                                      retries, retry_time_ms);
2165         } else if (is_lan_addr(addr)) {
2166                 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2167                                     source_lun, retries, retry_time_ms);
2168         } else {
2169             /* Unknown address type. */
2170                 ipmi_inc_stat(intf, sent_invalid_commands);
2171                 rv = -EINVAL;
2172         }
2173
2174         if (rv) {
2175 out_err:
2176                 ipmi_free_smi_msg(smi_msg);
2177                 ipmi_free_recv_msg(recv_msg);
2178         } else {
2179                 ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size);
2180
2181                 smi_send(intf, intf->handlers, smi_msg, priority);
2182         }
2183         rcu_read_unlock();
2184
2185 out:
2186         return rv;
2187 }
2188
2189 static int check_addr(struct ipmi_smi  *intf,
2190                       struct ipmi_addr *addr,
2191                       unsigned char    *saddr,
2192                       unsigned char    *lun)
2193 {
2194         if (addr->channel >= IPMI_MAX_CHANNELS)
2195                 return -EINVAL;
2196         addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2197         *lun = intf->addrinfo[addr->channel].lun;
2198         *saddr = intf->addrinfo[addr->channel].address;
2199         return 0;
2200 }
2201
2202 int ipmi_request_settime(struct ipmi_user *user,
2203                          struct ipmi_addr *addr,
2204                          long             msgid,
2205                          struct kernel_ipmi_msg  *msg,
2206                          void             *user_msg_data,
2207                          int              priority,
2208                          int              retries,
2209                          unsigned int     retry_time_ms)
2210 {
2211         unsigned char saddr = 0, lun = 0;
2212         int rv, index;
2213
2214         if (!user)
2215                 return -EINVAL;
2216
2217         user = acquire_ipmi_user(user, &index);
2218         if (!user)
2219                 return -ENODEV;
2220
2221         rv = check_addr(user->intf, addr, &saddr, &lun);
2222         if (!rv)
2223                 rv = i_ipmi_request(user,
2224                                     user->intf,
2225                                     addr,
2226                                     msgid,
2227                                     msg,
2228                                     user_msg_data,
2229                                     NULL, NULL,
2230                                     priority,
2231                                     saddr,
2232                                     lun,
2233                                     retries,
2234                                     retry_time_ms);
2235
2236         release_ipmi_user(user, index);
2237         return rv;
2238 }
2239 EXPORT_SYMBOL(ipmi_request_settime);
2240
2241 int ipmi_request_supply_msgs(struct ipmi_user     *user,
2242                              struct ipmi_addr     *addr,
2243                              long                 msgid,
2244                              struct kernel_ipmi_msg *msg,
2245                              void                 *user_msg_data,
2246                              void                 *supplied_smi,
2247                              struct ipmi_recv_msg *supplied_recv,
2248                              int                  priority)
2249 {
2250         unsigned char saddr = 0, lun = 0;
2251         int rv, index;
2252
2253         if (!user)
2254                 return -EINVAL;
2255
2256         user = acquire_ipmi_user(user, &index);
2257         if (!user)
2258                 return -ENODEV;
2259
2260         rv = check_addr(user->intf, addr, &saddr, &lun);
2261         if (!rv)
2262                 rv = i_ipmi_request(user,
2263                                     user->intf,
2264                                     addr,
2265                                     msgid,
2266                                     msg,
2267                                     user_msg_data,
2268                                     supplied_smi,
2269                                     supplied_recv,
2270                                     priority,
2271                                     saddr,
2272                                     lun,
2273                                     -1, 0);
2274
2275         release_ipmi_user(user, index);
2276         return rv;
2277 }
2278 EXPORT_SYMBOL(ipmi_request_supply_msgs);
2279
2280 static void bmc_device_id_handler(struct ipmi_smi *intf,
2281                                   struct ipmi_recv_msg *msg)
2282 {
2283         int rv;
2284
2285         if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2286                         || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2287                         || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2288                 dev_warn(intf->si_dev,
2289                          "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2290                          msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2291                 return;
2292         }
2293
2294         rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2295                         msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2296         if (rv) {
2297                 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2298                 intf->bmc->dyn_id_set = 0;
2299         } else {
2300                 /*
2301                  * Make sure the id data is available before setting
2302                  * dyn_id_set.
2303                  */
2304                 smp_wmb();
2305                 intf->bmc->dyn_id_set = 1;
2306         }
2307
2308         wake_up(&intf->waitq);
2309 }
2310
2311 static int
2312 send_get_device_id_cmd(struct ipmi_smi *intf)
2313 {
2314         struct ipmi_system_interface_addr si;
2315         struct kernel_ipmi_msg msg;
2316
2317         si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2318         si.channel = IPMI_BMC_CHANNEL;
2319         si.lun = 0;
2320
2321         msg.netfn = IPMI_NETFN_APP_REQUEST;
2322         msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2323         msg.data = NULL;
2324         msg.data_len = 0;
2325
2326         return i_ipmi_request(NULL,
2327                               intf,
2328                               (struct ipmi_addr *) &si,
2329                               0,
2330                               &msg,
2331                               intf,
2332                               NULL,
2333                               NULL,
2334                               0,
2335                               intf->addrinfo[0].address,
2336                               intf->addrinfo[0].lun,
2337                               -1, 0);
2338 }
2339
2340 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2341 {
2342         int rv;
2343
2344         bmc->dyn_id_set = 2;
2345
2346         intf->null_user_handler = bmc_device_id_handler;
2347
2348         rv = send_get_device_id_cmd(intf);
2349         if (rv)
2350                 return rv;
2351
2352         wait_event(intf->waitq, bmc->dyn_id_set != 2);
2353
2354         if (!bmc->dyn_id_set)
2355                 rv = -EIO; /* Something went wrong in the fetch. */
2356
2357         /* dyn_id_set makes the id data available. */
2358         smp_rmb();
2359
2360         intf->null_user_handler = NULL;
2361
2362         return rv;
2363 }
2364
2365 /*
2366  * Fetch the device id for the bmc/interface.  You must pass in either
2367  * bmc or intf, this code will get the other one.  If the data has
2368  * been recently fetched, this will just use the cached data.  Otherwise
2369  * it will run a new fetch.
2370  *
2371  * Except for the first time this is called (in ipmi_register_smi()),
2372  * this will always return good data;
2373  */
2374 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2375                                struct ipmi_device_id *id,
2376                                bool *guid_set, guid_t *guid, int intf_num)
2377 {
2378         int rv = 0;
2379         int prev_dyn_id_set, prev_guid_set;
2380         bool intf_set = intf != NULL;
2381
2382         if (!intf) {
2383                 mutex_lock(&bmc->dyn_mutex);
2384 retry_bmc_lock:
2385                 if (list_empty(&bmc->intfs)) {
2386                         mutex_unlock(&bmc->dyn_mutex);
2387                         return -ENOENT;
2388                 }
2389                 intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2390                                         bmc_link);
2391                 kref_get(&intf->refcount);
2392                 mutex_unlock(&bmc->dyn_mutex);
2393                 mutex_lock(&intf->bmc_reg_mutex);
2394                 mutex_lock(&bmc->dyn_mutex);
2395                 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2396                                              bmc_link)) {
2397                         mutex_unlock(&intf->bmc_reg_mutex);
2398                         kref_put(&intf->refcount, intf_free);
2399                         goto retry_bmc_lock;
2400                 }
2401         } else {
2402                 mutex_lock(&intf->bmc_reg_mutex);
2403                 bmc = intf->bmc;
2404                 mutex_lock(&bmc->dyn_mutex);
2405                 kref_get(&intf->refcount);
2406         }
2407
2408         /* If we have a valid and current ID, just return that. */
2409         if (intf->in_bmc_register ||
2410             (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2411                 goto out_noprocessing;
2412
2413         prev_guid_set = bmc->dyn_guid_set;
2414         __get_guid(intf);
2415
2416         prev_dyn_id_set = bmc->dyn_id_set;
2417         rv = __get_device_id(intf, bmc);
2418         if (rv)
2419                 goto out;
2420
2421         /*
2422          * The guid, device id, manufacturer id, and product id should
2423          * not change on a BMC.  If it does we have to do some dancing.
2424          */
2425         if (!intf->bmc_registered
2426             || (!prev_guid_set && bmc->dyn_guid_set)
2427             || (!prev_dyn_id_set && bmc->dyn_id_set)
2428             || (prev_guid_set && bmc->dyn_guid_set
2429                 && !guid_equal(&bmc->guid, &bmc->fetch_guid))
2430             || bmc->id.device_id != bmc->fetch_id.device_id
2431             || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2432             || bmc->id.product_id != bmc->fetch_id.product_id) {
2433                 struct ipmi_device_id id = bmc->fetch_id;
2434                 int guid_set = bmc->dyn_guid_set;
2435                 guid_t guid;
2436
2437                 guid = bmc->fetch_guid;
2438                 mutex_unlock(&bmc->dyn_mutex);
2439
2440                 __ipmi_bmc_unregister(intf);
2441                 /* Fill in the temporary BMC for good measure. */
2442                 intf->bmc->id = id;
2443                 intf->bmc->dyn_guid_set = guid_set;
2444                 intf->bmc->guid = guid;
2445                 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2446                         need_waiter(intf); /* Retry later on an error. */
2447                 else
2448                         __scan_channels(intf, &id);
2449
2450
2451                 if (!intf_set) {
2452                         /*
2453                          * We weren't given the interface on the
2454                          * command line, so restart the operation on
2455                          * the next interface for the BMC.
2456                          */
2457                         mutex_unlock(&intf->bmc_reg_mutex);
2458                         mutex_lock(&bmc->dyn_mutex);
2459                         goto retry_bmc_lock;
2460                 }
2461
2462                 /* We have a new BMC, set it up. */
2463                 bmc = intf->bmc;
2464                 mutex_lock(&bmc->dyn_mutex);
2465                 goto out_noprocessing;
2466         } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2467                 /* Version info changes, scan the channels again. */
2468                 __scan_channels(intf, &bmc->fetch_id);
2469
2470         bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2471
2472 out:
2473         if (rv && prev_dyn_id_set) {
2474                 rv = 0; /* Ignore failures if we have previous data. */
2475                 bmc->dyn_id_set = prev_dyn_id_set;
2476         }
2477         if (!rv) {
2478                 bmc->id = bmc->fetch_id;
2479                 if (bmc->dyn_guid_set)
2480                         bmc->guid = bmc->fetch_guid;
2481                 else if (prev_guid_set)
2482                         /*
2483                          * The guid used to be valid and it failed to fetch,
2484                          * just use the cached value.
2485                          */
2486                         bmc->dyn_guid_set = prev_guid_set;
2487         }
2488 out_noprocessing:
2489         if (!rv) {
2490                 if (id)
2491                         *id = bmc->id;
2492
2493                 if (guid_set)
2494                         *guid_set = bmc->dyn_guid_set;
2495
2496                 if (guid && bmc->dyn_guid_set)
2497                         *guid =  bmc->guid;
2498         }
2499
2500         mutex_unlock(&bmc->dyn_mutex);
2501         mutex_unlock(&intf->bmc_reg_mutex);
2502
2503         kref_put(&intf->refcount, intf_free);
2504         return rv;
2505 }
2506
2507 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2508                              struct ipmi_device_id *id,
2509                              bool *guid_set, guid_t *guid)
2510 {
2511         return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2512 }
2513
2514 static ssize_t device_id_show(struct device *dev,
2515                               struct device_attribute *attr,
2516                               char *buf)
2517 {
2518         struct bmc_device *bmc = to_bmc_device(dev);
2519         struct ipmi_device_id id;
2520         int rv;
2521
2522         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2523         if (rv)
2524                 return rv;
2525
2526         return snprintf(buf, 10, "%u\n", id.device_id);
2527 }
2528 static DEVICE_ATTR_RO(device_id);
2529
2530 static ssize_t provides_device_sdrs_show(struct device *dev,
2531                                          struct device_attribute *attr,
2532                                          char *buf)
2533 {
2534         struct bmc_device *bmc = to_bmc_device(dev);
2535         struct ipmi_device_id id;
2536         int rv;
2537
2538         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2539         if (rv)
2540                 return rv;
2541
2542         return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
2543 }
2544 static DEVICE_ATTR_RO(provides_device_sdrs);
2545
2546 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2547                              char *buf)
2548 {
2549         struct bmc_device *bmc = to_bmc_device(dev);
2550         struct ipmi_device_id id;
2551         int rv;
2552
2553         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2554         if (rv)
2555                 return rv;
2556
2557         return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
2558 }
2559 static DEVICE_ATTR_RO(revision);
2560
2561 static ssize_t firmware_revision_show(struct device *dev,
2562                                       struct device_attribute *attr,
2563                                       char *buf)
2564 {
2565         struct bmc_device *bmc = to_bmc_device(dev);
2566         struct ipmi_device_id id;
2567         int rv;
2568
2569         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2570         if (rv)
2571                 return rv;
2572
2573         return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
2574                         id.firmware_revision_2);
2575 }
2576 static DEVICE_ATTR_RO(firmware_revision);
2577
2578 static ssize_t ipmi_version_show(struct device *dev,
2579                                  struct device_attribute *attr,
2580                                  char *buf)
2581 {
2582         struct bmc_device *bmc = to_bmc_device(dev);
2583         struct ipmi_device_id id;
2584         int rv;
2585
2586         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2587         if (rv)
2588                 return rv;
2589
2590         return snprintf(buf, 20, "%u.%u\n",
2591                         ipmi_version_major(&id),
2592                         ipmi_version_minor(&id));
2593 }
2594 static DEVICE_ATTR_RO(ipmi_version);
2595
2596 static ssize_t add_dev_support_show(struct device *dev,
2597                                     struct device_attribute *attr,
2598                                     char *buf)
2599 {
2600         struct bmc_device *bmc = to_bmc_device(dev);
2601         struct ipmi_device_id id;
2602         int rv;
2603
2604         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2605         if (rv)
2606                 return rv;
2607
2608         return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
2609 }
2610 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2611                    NULL);
2612
2613 static ssize_t manufacturer_id_show(struct device *dev,
2614                                     struct device_attribute *attr,
2615                                     char *buf)
2616 {
2617         struct bmc_device *bmc = to_bmc_device(dev);
2618         struct ipmi_device_id id;
2619         int rv;
2620
2621         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2622         if (rv)
2623                 return rv;
2624
2625         return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
2626 }
2627 static DEVICE_ATTR_RO(manufacturer_id);
2628
2629 static ssize_t product_id_show(struct device *dev,
2630                                struct device_attribute *attr,
2631                                char *buf)
2632 {
2633         struct bmc_device *bmc = to_bmc_device(dev);
2634         struct ipmi_device_id id;
2635         int rv;
2636
2637         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2638         if (rv)
2639                 return rv;
2640
2641         return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
2642 }
2643 static DEVICE_ATTR_RO(product_id);
2644
2645 static ssize_t aux_firmware_rev_show(struct device *dev,
2646                                      struct device_attribute *attr,
2647                                      char *buf)
2648 {
2649         struct bmc_device *bmc = to_bmc_device(dev);
2650         struct ipmi_device_id id;
2651         int rv;
2652
2653         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2654         if (rv)
2655                 return rv;
2656
2657         return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2658                         id.aux_firmware_revision[3],
2659                         id.aux_firmware_revision[2],
2660                         id.aux_firmware_revision[1],
2661                         id.aux_firmware_revision[0]);
2662 }
2663 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2664
2665 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2666                          char *buf)
2667 {
2668         struct bmc_device *bmc = to_bmc_device(dev);
2669         bool guid_set;
2670         guid_t guid;
2671         int rv;
2672
2673         rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2674         if (rv)
2675                 return rv;
2676         if (!guid_set)
2677                 return -ENOENT;
2678
2679         return snprintf(buf, 38, "%pUl\n", guid.b);
2680 }
2681 static DEVICE_ATTR_RO(guid);
2682
2683 static struct attribute *bmc_dev_attrs[] = {
2684         &dev_attr_device_id.attr,
2685         &dev_attr_provides_device_sdrs.attr,
2686         &dev_attr_revision.attr,
2687         &dev_attr_firmware_revision.attr,
2688         &dev_attr_ipmi_version.attr,
2689         &dev_attr_additional_device_support.attr,
2690         &dev_attr_manufacturer_id.attr,
2691         &dev_attr_product_id.attr,
2692         &dev_attr_aux_firmware_revision.attr,
2693         &dev_attr_guid.attr,
2694         NULL
2695 };
2696
2697 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2698                                        struct attribute *attr, int idx)
2699 {
2700         struct device *dev = kobj_to_dev(kobj);
2701         struct bmc_device *bmc = to_bmc_device(dev);
2702         umode_t mode = attr->mode;
2703         int rv;
2704
2705         if (attr == &dev_attr_aux_firmware_revision.attr) {
2706                 struct ipmi_device_id id;
2707
2708                 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2709                 return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2710         }
2711         if (attr == &dev_attr_guid.attr) {
2712                 bool guid_set;
2713
2714                 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2715                 return (!rv && guid_set) ? mode : 0;
2716         }
2717         return mode;
2718 }
2719
2720 static const struct attribute_group bmc_dev_attr_group = {
2721         .attrs          = bmc_dev_attrs,
2722         .is_visible     = bmc_dev_attr_is_visible,
2723 };
2724
2725 static const struct attribute_group *bmc_dev_attr_groups[] = {
2726         &bmc_dev_attr_group,
2727         NULL
2728 };
2729
2730 static const struct device_type bmc_device_type = {
2731         .groups         = bmc_dev_attr_groups,
2732 };
2733
2734 static int __find_bmc_guid(struct device *dev, void *data)
2735 {
2736         guid_t *guid = data;
2737         struct bmc_device *bmc;
2738         int rv;
2739
2740         if (dev->type != &bmc_device_type)
2741                 return 0;
2742
2743         bmc = to_bmc_device(dev);
2744         rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2745         if (rv)
2746                 rv = kref_get_unless_zero(&bmc->usecount);
2747         return rv;
2748 }
2749
2750 /*
2751  * Returns with the bmc's usecount incremented, if it is non-NULL.
2752  */
2753 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2754                                              guid_t *guid)
2755 {
2756         struct device *dev;
2757         struct bmc_device *bmc = NULL;
2758
2759         dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2760         if (dev) {
2761                 bmc = to_bmc_device(dev);
2762                 put_device(dev);
2763         }
2764         return bmc;
2765 }
2766
2767 struct prod_dev_id {
2768         unsigned int  product_id;
2769         unsigned char device_id;
2770 };
2771
2772 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
2773 {
2774         struct prod_dev_id *cid = data;
2775         struct bmc_device *bmc;
2776         int rv;
2777
2778         if (dev->type != &bmc_device_type)
2779                 return 0;
2780
2781         bmc = to_bmc_device(dev);
2782         rv = (bmc->id.product_id == cid->product_id
2783               && bmc->id.device_id == cid->device_id);
2784         if (rv)
2785                 rv = kref_get_unless_zero(&bmc->usecount);
2786         return rv;
2787 }
2788
2789 /*
2790  * Returns with the bmc's usecount incremented, if it is non-NULL.
2791  */
2792 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2793         struct device_driver *drv,
2794         unsigned int product_id, unsigned char device_id)
2795 {
2796         struct prod_dev_id id = {
2797                 .product_id = product_id,
2798                 .device_id = device_id,
2799         };
2800         struct device *dev;
2801         struct bmc_device *bmc = NULL;
2802
2803         dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2804         if (dev) {
2805                 bmc = to_bmc_device(dev);
2806                 put_device(dev);
2807         }
2808         return bmc;
2809 }
2810
2811 static DEFINE_IDA(ipmi_bmc_ida);
2812
2813 static void
2814 release_bmc_device(struct device *dev)
2815 {
2816         kfree(to_bmc_device(dev));
2817 }
2818
2819 static void cleanup_bmc_work(struct work_struct *work)
2820 {
2821         struct bmc_device *bmc = container_of(work, struct bmc_device,
2822                                               remove_work);
2823         int id = bmc->pdev.id; /* Unregister overwrites id */
2824
2825         platform_device_unregister(&bmc->pdev);
2826         ida_simple_remove(&ipmi_bmc_ida, id);
2827 }
2828
2829 static void
2830 cleanup_bmc_device(struct kref *ref)
2831 {
2832         struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
2833
2834         /*
2835          * Remove the platform device in a work queue to avoid issues
2836          * with removing the device attributes while reading a device
2837          * attribute.
2838          */
2839         schedule_work(&bmc->remove_work);
2840 }
2841
2842 /*
2843  * Must be called with intf->bmc_reg_mutex held.
2844  */
2845 static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
2846 {
2847         struct bmc_device *bmc = intf->bmc;
2848
2849         if (!intf->bmc_registered)
2850                 return;
2851
2852         sysfs_remove_link(&intf->si_dev->kobj, "bmc");
2853         sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
2854         kfree(intf->my_dev_name);
2855         intf->my_dev_name = NULL;
2856
2857         mutex_lock(&bmc->dyn_mutex);
2858         list_del(&intf->bmc_link);
2859         mutex_unlock(&bmc->dyn_mutex);
2860         intf->bmc = &intf->tmp_bmc;
2861         kref_put(&bmc->usecount, cleanup_bmc_device);
2862         intf->bmc_registered = false;
2863 }
2864
2865 static void ipmi_bmc_unregister(struct ipmi_smi *intf)
2866 {
2867         mutex_lock(&intf->bmc_reg_mutex);
2868         __ipmi_bmc_unregister(intf);
2869         mutex_unlock(&intf->bmc_reg_mutex);
2870 }
2871
2872 /*
2873  * Must be called with intf->bmc_reg_mutex held.
2874  */
2875 static int __ipmi_bmc_register(struct ipmi_smi *intf,
2876                                struct ipmi_device_id *id,
2877                                bool guid_set, guid_t *guid, int intf_num)
2878 {
2879         int               rv;
2880         struct bmc_device *bmc;
2881         struct bmc_device *old_bmc;
2882
2883         /*
2884          * platform_device_register() can cause bmc_reg_mutex to
2885          * be claimed because of the is_visible functions of
2886          * the attributes.  Eliminate possible recursion and
2887          * release the lock.
2888          */
2889         intf->in_bmc_register = true;
2890         mutex_unlock(&intf->bmc_reg_mutex);
2891
2892         /*
2893          * Try to find if there is an bmc_device struct
2894          * representing the interfaced BMC already
2895          */
2896         mutex_lock(&ipmidriver_mutex);
2897         if (guid_set)
2898                 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
2899         else
2900                 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2901                                                     id->product_id,
2902                                                     id->device_id);
2903
2904         /*
2905          * If there is already an bmc_device, free the new one,
2906          * otherwise register the new BMC device
2907          */
2908         if (old_bmc) {
2909                 bmc = old_bmc;
2910                 /*
2911                  * Note: old_bmc already has usecount incremented by
2912                  * the BMC find functions.
2913                  */
2914                 intf->bmc = old_bmc;
2915                 mutex_lock(&bmc->dyn_mutex);
2916                 list_add_tail(&intf->bmc_link, &bmc->intfs);
2917                 mutex_unlock(&bmc->dyn_mutex);
2918
2919                 dev_info(intf->si_dev,
2920                          "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2921                          bmc->id.manufacturer_id,
2922                          bmc->id.product_id,
2923                          bmc->id.device_id);
2924         } else {
2925                 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
2926                 if (!bmc) {
2927                         rv = -ENOMEM;
2928                         goto out;
2929                 }
2930                 INIT_LIST_HEAD(&bmc->intfs);
2931                 mutex_init(&bmc->dyn_mutex);
2932                 INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
2933
2934                 bmc->id = *id;
2935                 bmc->dyn_id_set = 1;
2936                 bmc->dyn_guid_set = guid_set;
2937                 bmc->guid = *guid;
2938                 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2939
2940                 bmc->pdev.name = "ipmi_bmc";
2941
2942                 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
2943                 if (rv < 0)
2944                         goto out;
2945                 bmc->pdev.dev.driver = &ipmidriver.driver;
2946                 bmc->pdev.id = rv;
2947                 bmc->pdev.dev.release = release_bmc_device;
2948                 bmc->pdev.dev.type = &bmc_device_type;
2949                 kref_init(&bmc->usecount);
2950
2951                 intf->bmc = bmc;
2952                 mutex_lock(&bmc->dyn_mutex);
2953                 list_add_tail(&intf->bmc_link, &bmc->intfs);
2954                 mutex_unlock(&bmc->dyn_mutex);
2955
2956                 rv = platform_device_register(&bmc->pdev);
2957                 if (rv) {
2958                         dev_err(intf->si_dev,
2959                                 "Unable to register bmc device: %d\n",
2960                                 rv);
2961                         goto out_list_del;
2962                 }
2963
2964                 dev_info(intf->si_dev,
2965                          "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2966                          bmc->id.manufacturer_id,
2967                          bmc->id.product_id,
2968                          bmc->id.device_id);
2969         }
2970
2971         /*
2972          * create symlink from system interface device to bmc device
2973          * and back.
2974          */
2975         rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
2976         if (rv) {
2977                 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
2978                 goto out_put_bmc;
2979         }
2980
2981         if (intf_num == -1)
2982                 intf_num = intf->intf_num;
2983         intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
2984         if (!intf->my_dev_name) {
2985                 rv = -ENOMEM;
2986                 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
2987                         rv);
2988                 goto out_unlink1;
2989         }
2990
2991         rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
2992                                intf->my_dev_name);
2993         if (rv) {
2994                 kfree(intf->my_dev_name);
2995                 intf->my_dev_name = NULL;
2996                 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
2997                         rv);
2998                 goto out_free_my_dev_name;
2999         }
3000
3001         intf->bmc_registered = true;
3002
3003 out:
3004         mutex_unlock(&ipmidriver_mutex);
3005         mutex_lock(&intf->bmc_reg_mutex);
3006         intf->in_bmc_register = false;
3007         return rv;
3008
3009
3010 out_free_my_dev_name:
3011         kfree(intf->my_dev_name);
3012         intf->my_dev_name = NULL;
3013
3014 out_unlink1:
3015         sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3016
3017 out_put_bmc:
3018         mutex_lock(&bmc->dyn_mutex);
3019         list_del(&intf->bmc_link);
3020         mutex_unlock(&bmc->dyn_mutex);
3021         intf->bmc = &intf->tmp_bmc;
3022         kref_put(&bmc->usecount, cleanup_bmc_device);
3023         goto out;
3024
3025 out_list_del:
3026         mutex_lock(&bmc->dyn_mutex);
3027         list_del(&intf->bmc_link);
3028         mutex_unlock(&bmc->dyn_mutex);
3029         intf->bmc = &intf->tmp_bmc;
3030         put_device(&bmc->pdev.dev);
3031         goto out;
3032 }
3033
3034 static int
3035 send_guid_cmd(struct ipmi_smi *intf, int chan)
3036 {
3037         struct kernel_ipmi_msg            msg;
3038         struct ipmi_system_interface_addr si;
3039
3040         si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3041         si.channel = IPMI_BMC_CHANNEL;
3042         si.lun = 0;
3043
3044         msg.netfn = IPMI_NETFN_APP_REQUEST;
3045         msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3046         msg.data = NULL;
3047         msg.data_len = 0;
3048         return i_ipmi_request(NULL,
3049                               intf,
3050                               (struct ipmi_addr *) &si,
3051                               0,
3052                               &msg,
3053                               intf,
3054                               NULL,
3055                               NULL,
3056                               0,
3057                               intf->addrinfo[0].address,
3058                               intf->addrinfo[0].lun,
3059                               -1, 0);
3060 }
3061
3062 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3063 {
3064         struct bmc_device *bmc = intf->bmc;
3065
3066         if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3067             || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3068             || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3069                 /* Not for me */
3070                 return;
3071
3072         if (msg->msg.data[0] != 0) {
3073                 /* Error from getting the GUID, the BMC doesn't have one. */
3074                 bmc->dyn_guid_set = 0;
3075                 goto out;
3076         }
3077
3078         if (msg->msg.data_len < 17) {
3079                 bmc->dyn_guid_set = 0;
3080                 dev_warn(intf->si_dev,
3081                          "The GUID response from the BMC was too short, it was %d but should have been 17.  Assuming GUID is not available.\n",
3082                          msg->msg.data_len);
3083                 goto out;
3084         }
3085
3086         memcpy(bmc->fetch_guid.b, msg->msg.data + 1, 16);
3087         /*
3088          * Make sure the guid data is available before setting
3089          * dyn_guid_set.
3090          */
3091         smp_wmb();
3092         bmc->dyn_guid_set = 1;
3093  out:
3094         wake_up(&intf->waitq);
3095 }
3096
3097 static void __get_guid(struct ipmi_smi *intf)
3098 {
3099         int rv;
3100         struct bmc_device *bmc = intf->bmc;
3101
3102         bmc->dyn_guid_set = 2;
3103         intf->null_user_handler = guid_handler;
3104         rv = send_guid_cmd(intf, 0);
3105         if (rv)
3106                 /* Send failed, no GUID available. */
3107                 bmc->dyn_guid_set = 0;
3108
3109         wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3110
3111         /* dyn_guid_set makes the guid data available. */
3112         smp_rmb();
3113
3114         intf->null_user_handler = NULL;
3115 }
3116
3117 static int
3118 send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3119 {
3120         struct kernel_ipmi_msg            msg;
3121         unsigned char                     data[1];
3122         struct ipmi_system_interface_addr si;
3123
3124         si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3125         si.channel = IPMI_BMC_CHANNEL;
3126         si.lun = 0;
3127
3128         msg.netfn = IPMI_NETFN_APP_REQUEST;
3129         msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3130         msg.data = data;
3131         msg.data_len = 1;
3132         data[0] = chan;
3133         return i_ipmi_request(NULL,
3134                               intf,
3135                               (struct ipmi_addr *) &si,
3136                               0,
3137                               &msg,
3138                               intf,
3139                               NULL,
3140                               NULL,
3141                               0,
3142                               intf->addrinfo[0].address,
3143                               intf->addrinfo[0].lun,
3144                               -1, 0);
3145 }
3146
3147 static void
3148 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3149 {
3150         int rv = 0;
3151         int ch;
3152         unsigned int set = intf->curr_working_cset;
3153         struct ipmi_channel *chans;
3154
3155         if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3156             && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3157             && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3158                 /* It's the one we want */
3159                 if (msg->msg.data[0] != 0) {
3160                         /* Got an error from the channel, just go on. */
3161
3162                         if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3163                                 /*
3164                                  * If the MC does not support this
3165                                  * command, that is legal.  We just
3166                                  * assume it has one IPMB at channel
3167                                  * zero.
3168                                  */
3169                                 intf->wchannels[set].c[0].medium
3170                                         = IPMI_CHANNEL_MEDIUM_IPMB;
3171                                 intf->wchannels[set].c[0].protocol
3172                                         = IPMI_CHANNEL_PROTOCOL_IPMB;
3173
3174                                 intf->channel_list = intf->wchannels + set;
3175                                 intf->channels_ready = true;
3176                                 wake_up(&intf->waitq);
3177                                 goto out;
3178                         }
3179                         goto next_channel;
3180                 }
3181                 if (msg->msg.data_len < 4) {
3182                         /* Message not big enough, just go on. */
3183                         goto next_channel;
3184                 }
3185                 ch = intf->curr_channel;
3186                 chans = intf->wchannels[set].c;
3187                 chans[ch].medium = msg->msg.data[2] & 0x7f;
3188                 chans[ch].protocol = msg->msg.data[3] & 0x1f;
3189
3190  next_channel:
3191                 intf->curr_channel++;
3192                 if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3193                         intf->channel_list = intf->wchannels + set;
3194                         intf->channels_ready = true;
3195                         wake_up(&intf->waitq);
3196                 } else {
3197                         intf->channel_list = intf->wchannels + set;
3198                         intf->channels_ready = true;
3199                         rv = send_channel_info_cmd(intf, intf->curr_channel);
3200                 }
3201
3202                 if (rv) {
3203                         /* Got an error somehow, just give up. */
3204                         dev_warn(intf->si_dev,
3205                                  "Error sending channel information for channel %d: %d\n",
3206                                  intf->curr_channel, rv);
3207
3208                         intf->channel_list = intf->wchannels + set;
3209                         intf->channels_ready = true;
3210                         wake_up(&intf->waitq);
3211                 }
3212         }
3213  out:
3214         return;
3215 }
3216
3217 /*
3218  * Must be holding intf->bmc_reg_mutex to call this.
3219  */
3220 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3221 {
3222         int rv;
3223
3224         if (ipmi_version_major(id) > 1
3225                         || (ipmi_version_major(id) == 1
3226                             && ipmi_version_minor(id) >= 5)) {
3227                 unsigned int set;
3228
3229                 /*
3230                  * Start scanning the channels to see what is
3231                  * available.
3232                  */
3233                 set = !intf->curr_working_cset;
3234                 intf->curr_working_cset = set;
3235                 memset(&intf->wchannels[set], 0,
3236                        sizeof(struct ipmi_channel_set));
3237
3238                 intf->null_user_handler = channel_handler;
3239                 intf->curr_channel = 0;
3240                 rv = send_channel_info_cmd(intf, 0);
3241                 if (rv) {
3242                         dev_warn(intf->si_dev,
3243                                  "Error sending channel information for channel 0, %d\n",
3244                                  rv);
3245                         return -EIO;
3246                 }
3247
3248                 /* Wait for the channel info to be read. */
3249                 wait_event(intf->waitq, intf->channels_ready);
3250                 intf->null_user_handler = NULL;
3251         } else {
3252                 unsigned int set = intf->curr_working_cset;
3253
3254                 /* Assume a single IPMB channel at zero. */
3255                 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3256                 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3257                 intf->channel_list = intf->wchannels + set;
3258                 intf->channels_ready = true;
3259         }
3260
3261         return 0;
3262 }
3263
3264 static void ipmi_poll(struct ipmi_smi *intf)
3265 {
3266         if (intf->handlers->poll)
3267                 intf->handlers->poll(intf->send_info);
3268         /* In case something came in */
3269         handle_new_recv_msgs(intf);
3270 }
3271
3272 void ipmi_poll_interface(struct ipmi_user *user)
3273 {
3274         ipmi_poll(user->intf);
3275 }
3276 EXPORT_SYMBOL(ipmi_poll_interface);
3277
3278 static void redo_bmc_reg(struct work_struct *work)
3279 {
3280         struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3281                                              bmc_reg_work);
3282
3283         if (!intf->in_shutdown)
3284                 bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3285
3286         kref_put(&intf->refcount, intf_free);
3287 }
3288
3289 int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
3290                       void                     *send_info,
3291                       struct device            *si_dev,
3292                       unsigned char            slave_addr)
3293 {
3294         int              i, j;
3295         int              rv;
3296         struct ipmi_smi *intf, *tintf;
3297         struct list_head *link;
3298         struct ipmi_device_id id;
3299
3300         /*
3301          * Make sure the driver is actually initialized, this handles
3302          * problems with initialization order.
3303          */
3304         if (!initialized) {
3305                 rv = ipmi_init_msghandler();
3306                 if (rv)
3307                         return rv;
3308                 /*
3309                  * The init code doesn't return an error if it was turned
3310                  * off, but it won't initialize.  Check that.
3311                  */
3312                 if (!initialized)
3313                         return -ENODEV;
3314         }
3315
3316         intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3317         if (!intf)
3318                 return -ENOMEM;
3319
3320         rv = init_srcu_struct(&intf->users_srcu);
3321         if (rv) {
3322                 kfree(intf);
3323                 return rv;
3324         }
3325
3326
3327         intf->bmc = &intf->tmp_bmc;
3328         INIT_LIST_HEAD(&intf->bmc->intfs);
3329         mutex_init(&intf->bmc->dyn_mutex);
3330         INIT_LIST_HEAD(&intf->bmc_link);
3331         mutex_init(&intf->bmc_reg_mutex);
3332         intf->intf_num = -1; /* Mark it invalid for now. */
3333         kref_init(&intf->refcount);
3334         INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3335         intf->si_dev = si_dev;
3336         for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3337                 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3338                 intf->addrinfo[j].lun = 2;
3339         }
3340         if (slave_addr != 0)
3341                 intf->addrinfo[0].address = slave_addr;
3342         INIT_LIST_HEAD(&intf->users);
3343         intf->handlers = handlers;
3344         intf->send_info = send_info;
3345         spin_lock_init(&intf->seq_lock);
3346         for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3347                 intf->seq_table[j].inuse = 0;
3348                 intf->seq_table[j].seqid = 0;
3349         }
3350         intf->curr_seq = 0;
3351         spin_lock_init(&intf->waiting_rcv_msgs_lock);
3352         INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3353         tasklet_init(&intf->recv_tasklet,
3354                      smi_recv_tasklet,
3355                      (unsigned long) intf);
3356         atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3357         spin_lock_init(&intf->xmit_msgs_lock);
3358         INIT_LIST_HEAD(&intf->xmit_msgs);
3359         INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3360         spin_lock_init(&intf->events_lock);
3361         atomic_set(&intf->event_waiters, 0);
3362         intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3363         INIT_LIST_HEAD(&intf->waiting_events);
3364         intf->waiting_events_count = 0;
3365         mutex_init(&intf->cmd_rcvrs_mutex);
3366         spin_lock_init(&intf->maintenance_mode_lock);
3367         INIT_LIST_HEAD(&intf->cmd_rcvrs);
3368         init_waitqueue_head(&intf->waitq);
3369         for (i = 0; i < IPMI_NUM_STATS; i++)
3370                 atomic_set(&intf->stats[i], 0);
3371
3372         mutex_lock(&ipmi_interfaces_mutex);
3373         /* Look for a hole in the numbers. */
3374         i = 0;
3375         link = &ipmi_interfaces;
3376         list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
3377                 if (tintf->intf_num != i) {
3378                         link = &tintf->link;
3379                         break;
3380                 }
3381                 i++;
3382         }
3383         /* Add the new interface in numeric order. */
3384         if (i == 0)
3385                 list_add_rcu(&intf->link, &ipmi_interfaces);
3386         else
3387                 list_add_tail_rcu(&intf->link, link);
3388
3389         rv = handlers->start_processing(send_info, intf);
3390         if (rv)
3391                 goto out_err;
3392
3393         rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3394         if (rv) {
3395                 dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3396                 goto out_err_started;
3397         }
3398
3399         mutex_lock(&intf->bmc_reg_mutex);
3400         rv = __scan_channels(intf, &id);
3401         mutex_unlock(&intf->bmc_reg_mutex);
3402         if (rv)
3403                 goto out_err_bmc_reg;
3404
3405         /*
3406          * Keep memory order straight for RCU readers.  Make
3407          * sure everything else is committed to memory before
3408          * setting intf_num to mark the interface valid.
3409          */
3410         smp_wmb();
3411         intf->intf_num = i;
3412         mutex_unlock(&ipmi_interfaces_mutex);
3413
3414         /* After this point the interface is legal to use. */
3415         call_smi_watchers(i, intf->si_dev);
3416
3417         return 0;
3418
3419  out_err_bmc_reg:
3420         ipmi_bmc_unregister(intf);
3421  out_err_started:
3422         if (intf->handlers->shutdown)
3423                 intf->handlers->shutdown(intf->send_info);
3424  out_err:
3425         list_del_rcu(&intf->link);
3426         mutex_unlock(&ipmi_interfaces_mutex);
3427         synchronize_srcu(&ipmi_interfaces_srcu);
3428         cleanup_srcu_struct(&intf->users_srcu);
3429         kref_put(&intf->refcount, intf_free);
3430
3431         return rv;
3432 }
3433 EXPORT_SYMBOL(ipmi_register_smi);
3434
3435 static void deliver_smi_err_response(struct ipmi_smi *intf,
3436                                      struct ipmi_smi_msg *msg,
3437                                      unsigned char err)
3438 {
3439         msg->rsp[0] = msg->data[0] | 4;
3440         msg->rsp[1] = msg->data[1];
3441         msg->rsp[2] = err;
3442         msg->rsp_size = 3;
3443         /* It's an error, so it will never requeue, no need to check return. */
3444         handle_one_recv_msg(intf, msg);
3445 }
3446
3447 static void cleanup_smi_msgs(struct ipmi_smi *intf)
3448 {
3449         int              i;
3450         struct seq_table *ent;
3451         struct ipmi_smi_msg *msg;
3452         struct list_head *entry;
3453         struct list_head tmplist;
3454
3455         /* Clear out our transmit queues and hold the messages. */
3456         INIT_LIST_HEAD(&tmplist);
3457         list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3458         list_splice_tail(&intf->xmit_msgs, &tmplist);
3459
3460         /* Current message first, to preserve order */
3461         while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3462                 /* Wait for the message to clear out. */
3463                 schedule_timeout(1);
3464         }
3465
3466         /* No need for locks, the interface is down. */
3467
3468         /*
3469          * Return errors for all pending messages in queue and in the
3470          * tables waiting for remote responses.
3471          */
3472         while (!list_empty(&tmplist)) {
3473                 entry = tmplist.next;
3474                 list_del(entry);
3475                 msg = list_entry(entry, struct ipmi_smi_msg, link);
3476                 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3477         }
3478
3479         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3480                 ent = &intf->seq_table[i];
3481                 if (!ent->inuse)
3482                         continue;
3483                 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3484         }
3485 }
3486
3487 void ipmi_unregister_smi(struct ipmi_smi *intf)
3488 {
3489         struct ipmi_smi_watcher *w;
3490         int intf_num = intf->intf_num, index;
3491
3492         mutex_lock(&ipmi_interfaces_mutex);
3493         intf->intf_num = -1;
3494         intf->in_shutdown = true;
3495         list_del_rcu(&intf->link);
3496         mutex_unlock(&ipmi_interfaces_mutex);
3497         synchronize_srcu(&ipmi_interfaces_srcu);
3498
3499         /* At this point no users can be added to the interface. */
3500
3501         /*
3502          * Call all the watcher interfaces to tell them that
3503          * an interface is going away.
3504          */
3505         mutex_lock(&smi_watchers_mutex);
3506         list_for_each_entry(w, &smi_watchers, link)
3507                 w->smi_gone(intf_num);
3508         mutex_unlock(&smi_watchers_mutex);
3509
3510         index = srcu_read_lock(&intf->users_srcu);
3511         while (!list_empty(&intf->users)) {
3512                 struct ipmi_user *user =
3513                         container_of(list_next_rcu(&intf->users),
3514                                      struct ipmi_user, link);
3515
3516                 _ipmi_destroy_user(user);
3517         }
3518         srcu_read_unlock(&intf->users_srcu, index);
3519
3520         if (intf->handlers->shutdown)
3521                 intf->handlers->shutdown(intf->send_info);
3522
3523         cleanup_smi_msgs(intf);
3524
3525         ipmi_bmc_unregister(intf);
3526
3527         cleanup_srcu_struct(&intf->users_srcu);
3528         kref_put(&intf->refcount, intf_free);
3529 }
3530 EXPORT_SYMBOL(ipmi_unregister_smi);
3531
3532 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3533                                    struct ipmi_smi_msg *msg)
3534 {
3535         struct ipmi_ipmb_addr ipmb_addr;
3536         struct ipmi_recv_msg  *recv_msg;
3537
3538         /*
3539          * This is 11, not 10, because the response must contain a
3540          * completion code.
3541          */
3542         if (msg->rsp_size < 11) {
3543                 /* Message not big enough, just ignore it. */
3544                 ipmi_inc_stat(intf, invalid_ipmb_responses);
3545                 return 0;
3546         }
3547
3548         if (msg->rsp[2] != 0) {
3549                 /* An error getting the response, just ignore it. */
3550                 return 0;
3551         }
3552
3553         ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3554         ipmb_addr.slave_addr = msg->rsp[6];
3555         ipmb_addr.channel = msg->rsp[3] & 0x0f;
3556         ipmb_addr.lun = msg->rsp[7] & 3;
3557
3558         /*
3559          * It's a response from a remote entity.  Look up the sequence
3560          * number and handle the response.
3561          */
3562         if (intf_find_seq(intf,
3563                           msg->rsp[7] >> 2,
3564                           msg->rsp[3] & 0x0f,
3565                           msg->rsp[8],
3566                           (msg->rsp[4] >> 2) & (~1),
3567                           (struct ipmi_addr *) &ipmb_addr,
3568                           &recv_msg)) {
3569                 /*
3570                  * We were unable to find the sequence number,
3571                  * so just nuke the message.
3572                  */
3573                 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3574                 return 0;
3575         }
3576
3577         memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3578         /*
3579          * The other fields matched, so no need to set them, except
3580          * for netfn, which needs to be the response that was
3581          * returned, not the request value.
3582          */
3583         recv_msg->msg.netfn = msg->rsp[4] >> 2;
3584         recv_msg->msg.data = recv_msg->msg_data;
3585         recv_msg->msg.data_len = msg->rsp_size - 10;
3586         recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3587         if (deliver_response(intf, recv_msg))
3588                 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3589         else
3590                 ipmi_inc_stat(intf, handled_ipmb_responses);
3591
3592         return 0;
3593 }
3594
3595 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3596                                    struct ipmi_smi_msg *msg)
3597 {
3598         struct cmd_rcvr          *rcvr;
3599         int                      rv = 0;
3600         unsigned char            netfn;
3601         unsigned char            cmd;
3602         unsigned char            chan;
3603         struct ipmi_user         *user = NULL;
3604         struct ipmi_ipmb_addr    *ipmb_addr;
3605         struct ipmi_recv_msg     *recv_msg;
3606
3607         if (msg->rsp_size < 10) {
3608                 /* Message not big enough, just ignore it. */
3609                 ipmi_inc_stat(intf, invalid_commands);
3610                 return 0;
3611         }
3612
3613         if (msg->rsp[2] != 0) {
3614                 /* An error getting the response, just ignore it. */
3615                 return 0;
3616         }
3617
3618         netfn = msg->rsp[4] >> 2;
3619         cmd = msg->rsp[8];
3620         chan = msg->rsp[3] & 0xf;
3621
3622         rcu_read_lock();
3623         rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3624         if (rcvr) {
3625                 user = rcvr->user;
3626                 kref_get(&user->refcount);
3627         } else
3628                 user = NULL;
3629         rcu_read_unlock();
3630
3631         if (user == NULL) {
3632                 /* We didn't find a user, deliver an error response. */
3633                 ipmi_inc_stat(intf, unhandled_commands);
3634
3635                 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3636                 msg->data[1] = IPMI_SEND_MSG_CMD;
3637                 msg->data[2] = msg->rsp[3];
3638                 msg->data[3] = msg->rsp[6];
3639                 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3640                 msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3641                 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3642                 /* rqseq/lun */
3643                 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3644                 msg->data[8] = msg->rsp[8]; /* cmd */
3645                 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3646                 msg->data[10] = ipmb_checksum(&msg->data[6], 4);
3647                 msg->data_size = 11;
3648
3649                 ipmi_debug_msg("Invalid command:", msg->data, msg->data_size);
3650
3651                 rcu_read_lock();
3652                 if (!intf->in_shutdown) {
3653                         smi_send(intf, intf->handlers, msg, 0);
3654                         /*
3655                          * We used the message, so return the value
3656                          * that causes it to not be freed or
3657                          * queued.
3658                          */
3659                         rv = -1;
3660                 }
3661                 rcu_read_unlock();
3662         } else {
3663                 recv_msg = ipmi_alloc_recv_msg();
3664                 if (!recv_msg) {
3665                         /*
3666                          * We couldn't allocate memory for the
3667                          * message, so requeue it for handling
3668                          * later.
3669                          */
3670                         rv = 1;
3671                         kref_put(&user->refcount, free_user);
3672                 } else {
3673                         /* Extract the source address from the data. */
3674                         ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3675                         ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3676                         ipmb_addr->slave_addr = msg->rsp[6];
3677                         ipmb_addr->lun = msg->rsp[7] & 3;
3678                         ipmb_addr->channel = msg->rsp[3] & 0xf;
3679
3680                         /*
3681                          * Extract the rest of the message information
3682                          * from the IPMB header.
3683                          */
3684                         recv_msg->user = user;
3685                         recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3686                         recv_msg->msgid = msg->rsp[7] >> 2;
3687                         recv_msg->msg.netfn = msg->rsp[4] >> 2;
3688                         recv_msg->msg.cmd = msg->rsp[8];
3689                         recv_msg->msg.data = recv_msg->msg_data;
3690
3691                         /*
3692                          * We chop off 10, not 9 bytes because the checksum
3693                          * at the end also needs to be removed.
3694                          */
3695                         recv_msg->msg.data_len = msg->rsp_size - 10;
3696                         memcpy(recv_msg->msg_data, &msg->rsp[9],
3697                                msg->rsp_size - 10);
3698                         if (deliver_response(intf, recv_msg))
3699                                 ipmi_inc_stat(intf, unhandled_commands);
3700                         else
3701                                 ipmi_inc_stat(intf, handled_commands);
3702                 }
3703         }
3704
3705         return rv;
3706 }
3707
3708 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
3709                                   struct ipmi_smi_msg *msg)
3710 {
3711         struct ipmi_lan_addr  lan_addr;
3712         struct ipmi_recv_msg  *recv_msg;
3713
3714
3715         /*
3716          * This is 13, not 12, because the response must contain a
3717          * completion code.
3718          */
3719         if (msg->rsp_size < 13) {
3720                 /* Message not big enough, just ignore it. */
3721                 ipmi_inc_stat(intf, invalid_lan_responses);
3722                 return 0;
3723         }
3724
3725         if (msg->rsp[2] != 0) {
3726                 /* An error getting the response, just ignore it. */
3727                 return 0;
3728         }
3729
3730         lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3731         lan_addr.session_handle = msg->rsp[4];
3732         lan_addr.remote_SWID = msg->rsp[8];
3733         lan_addr.local_SWID = msg->rsp[5];
3734         lan_addr.channel = msg->rsp[3] & 0x0f;
3735         lan_addr.privilege = msg->rsp[3] >> 4;
3736         lan_addr.lun = msg->rsp[9] & 3;
3737
3738         /*
3739          * It's a response from a remote entity.  Look up the sequence
3740          * number and handle the response.
3741          */
3742         if (intf_find_seq(intf,
3743                           msg->rsp[9] >> 2,
3744                           msg->rsp[3] & 0x0f,
3745                           msg->rsp[10],
3746                           (msg->rsp[6] >> 2) & (~1),
3747                           (struct ipmi_addr *) &lan_addr,
3748                           &recv_msg)) {
3749                 /*
3750                  * We were unable to find the sequence number,
3751                  * so just nuke the message.
3752                  */
3753                 ipmi_inc_stat(intf, unhandled_lan_responses);
3754                 return 0;
3755         }
3756
3757         memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
3758         /*
3759          * The other fields matched, so no need to set them, except
3760          * for netfn, which needs to be the response that was
3761          * returned, not the request value.
3762          */
3763         recv_msg->msg.netfn = msg->rsp[6] >> 2;
3764         recv_msg->msg.data = recv_msg->msg_data;
3765         recv_msg->msg.data_len = msg->rsp_size - 12;
3766         recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3767         if (deliver_response(intf, recv_msg))
3768                 ipmi_inc_stat(intf, unhandled_lan_responses);
3769         else
3770                 ipmi_inc_stat(intf, handled_lan_responses);
3771
3772         return 0;
3773 }
3774
3775 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
3776                                   struct ipmi_smi_msg *msg)
3777 {
3778         struct cmd_rcvr          *rcvr;
3779         int                      rv = 0;
3780         unsigned char            netfn;
3781         unsigned char            cmd;
3782         unsigned char            chan;
3783         struct ipmi_user         *user = NULL;
3784         struct ipmi_lan_addr     *lan_addr;
3785         struct ipmi_recv_msg     *recv_msg;
3786
3787         if (msg->rsp_size < 12) {
3788                 /* Message not big enough, just ignore it. */
3789                 ipmi_inc_stat(intf, invalid_commands);
3790                 return 0;
3791         }
3792
3793         if (msg->rsp[2] != 0) {
3794                 /* An error getting the response, just ignore it. */
3795                 return 0;
3796         }
3797
3798         netfn = msg->rsp[6] >> 2;
3799         cmd = msg->rsp[10];
3800         chan = msg->rsp[3] & 0xf;
3801
3802         rcu_read_lock();
3803         rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3804         if (rcvr) {
3805                 user = rcvr->user;
3806                 kref_get(&user->refcount);
3807         } else
3808                 user = NULL;
3809         rcu_read_unlock();
3810
3811         if (user == NULL) {
3812                 /* We didn't find a user, just give up. */
3813                 ipmi_inc_stat(intf, unhandled_commands);
3814
3815                 /*
3816                  * Don't do anything with these messages, just allow
3817                  * them to be freed.
3818                  */
3819                 rv = 0;
3820         } else {
3821                 recv_msg = ipmi_alloc_recv_msg();
3822                 if (!recv_msg) {
3823                         /*
3824                          * We couldn't allocate memory for the
3825                          * message, so requeue it for handling later.
3826                          */
3827                         rv = 1;
3828                         kref_put(&user->refcount, free_user);
3829                 } else {
3830                         /* Extract the source address from the data. */
3831                         lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3832                         lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3833                         lan_addr->session_handle = msg->rsp[4];
3834                         lan_addr->remote_SWID = msg->rsp[8];
3835                         lan_addr->local_SWID = msg->rsp[5];
3836                         lan_addr->lun = msg->rsp[9] & 3;
3837                         lan_addr->channel = msg->rsp[3] & 0xf;
3838                         lan_addr->privilege = msg->rsp[3] >> 4;
3839
3840                         /*
3841                          * Extract the rest of the message information
3842                          * from the IPMB header.
3843                          */
3844                         recv_msg->user = user;
3845                         recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3846                         recv_msg->msgid = msg->rsp[9] >> 2;
3847                         recv_msg->msg.netfn = msg->rsp[6] >> 2;
3848                         recv_msg->msg.cmd = msg->rsp[10];
3849                         recv_msg->msg.data = recv_msg->msg_data;
3850
3851                         /*
3852                          * We chop off 12, not 11 bytes because the checksum
3853                          * at the end also needs to be removed.
3854                          */
3855                         recv_msg->msg.data_len = msg->rsp_size - 12;
3856                         memcpy(recv_msg->msg_data, &msg->rsp[11],
3857                                msg->rsp_size - 12);
3858                         if (deliver_response(intf, recv_msg))
3859                                 ipmi_inc_stat(intf, unhandled_commands);
3860                         else
3861                                 ipmi_inc_stat(intf, handled_commands);
3862                 }
3863         }
3864
3865         return rv;
3866 }
3867
3868 /*
3869  * This routine will handle "Get Message" command responses with
3870  * channels that use an OEM Medium. The message format belongs to
3871  * the OEM.  See IPMI 2.0 specification, Chapter 6 and
3872  * Chapter 22, sections 22.6 and 22.24 for more details.
3873  */
3874 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
3875                                   struct ipmi_smi_msg *msg)
3876 {
3877         struct cmd_rcvr       *rcvr;
3878         int                   rv = 0;
3879         unsigned char         netfn;
3880         unsigned char         cmd;
3881         unsigned char         chan;
3882         struct ipmi_user *user = NULL;
3883         struct ipmi_system_interface_addr *smi_addr;
3884         struct ipmi_recv_msg  *recv_msg;
3885
3886         /*
3887          * We expect the OEM SW to perform error checking
3888          * so we just do some basic sanity checks
3889          */
3890         if (msg->rsp_size < 4) {
3891                 /* Message not big enough, just ignore it. */
3892                 ipmi_inc_stat(intf, invalid_commands);
3893                 return 0;
3894         }
3895
3896         if (msg->rsp[2] != 0) {
3897                 /* An error getting the response, just ignore it. */
3898                 return 0;
3899         }
3900
3901         /*
3902          * This is an OEM Message so the OEM needs to know how
3903          * handle the message. We do no interpretation.
3904          */
3905         netfn = msg->rsp[0] >> 2;
3906         cmd = msg->rsp[1];
3907         chan = msg->rsp[3] & 0xf;
3908
3909         rcu_read_lock();
3910         rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3911         if (rcvr) {
3912                 user = rcvr->user;
3913                 kref_get(&user->refcount);
3914         } else
3915                 user = NULL;
3916         rcu_read_unlock();
3917
3918         if (user == NULL) {
3919                 /* We didn't find a user, just give up. */
3920                 ipmi_inc_stat(intf, unhandled_commands);
3921
3922                 /*
3923                  * Don't do anything with these messages, just allow
3924                  * them to be freed.
3925                  */
3926
3927                 rv = 0;
3928         } else {
3929                 recv_msg = ipmi_alloc_recv_msg();
3930                 if (!recv_msg) {
3931                         /*
3932                          * We couldn't allocate memory for the
3933                          * message, so requeue it for handling
3934                          * later.
3935                          */
3936                         rv = 1;
3937                         kref_put(&user->refcount, free_user);
3938                 } else {
3939                         /*
3940                          * OEM Messages are expected to be delivered via
3941                          * the system interface to SMS software.  We might
3942                          * need to visit this again depending on OEM
3943                          * requirements
3944                          */
3945                         smi_addr = ((struct ipmi_system_interface_addr *)
3946                                     &recv_msg->addr);
3947                         smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3948                         smi_addr->channel = IPMI_BMC_CHANNEL;
3949                         smi_addr->lun = msg->rsp[0] & 3;
3950
3951                         recv_msg->user = user;
3952                         recv_msg->user_msg_data = NULL;
3953                         recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
3954                         recv_msg->msg.netfn = msg->rsp[0] >> 2;
3955                         recv_msg->msg.cmd = msg->rsp[1];
3956                         recv_msg->msg.data = recv_msg->msg_data;
3957
3958                         /*
3959                          * The message starts at byte 4 which follows the
3960                          * the Channel Byte in the "GET MESSAGE" command
3961                          */
3962                         recv_msg->msg.data_len = msg->rsp_size - 4;
3963                         memcpy(recv_msg->msg_data, &msg->rsp[4],
3964                                msg->rsp_size - 4);
3965                         if (deliver_response(intf, recv_msg))
3966                                 ipmi_inc_stat(intf, unhandled_commands);
3967                         else
3968                                 ipmi_inc_stat(intf, handled_commands);
3969                 }
3970         }
3971
3972         return rv;
3973 }
3974
3975 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3976                                      struct ipmi_smi_msg  *msg)
3977 {
3978         struct ipmi_system_interface_addr *smi_addr;
3979
3980         recv_msg->msgid = 0;
3981         smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
3982         smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3983         smi_addr->channel = IPMI_BMC_CHANNEL;
3984         smi_addr->lun = msg->rsp[0] & 3;
3985         recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3986         recv_msg->msg.netfn = msg->rsp[0] >> 2;
3987         recv_msg->msg.cmd = msg->rsp[1];
3988         memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
3989         recv_msg->msg.data = recv_msg->msg_data;
3990         recv_msg->msg.data_len = msg->rsp_size - 3;
3991 }
3992
3993 static int handle_read_event_rsp(struct ipmi_smi *intf,
3994                                  struct ipmi_smi_msg *msg)
3995 {
3996         struct ipmi_recv_msg *recv_msg, *recv_msg2;
3997         struct list_head     msgs;
3998         struct ipmi_user     *user;
3999         int rv = 0, deliver_count = 0, index;
4000         unsigned long        flags;
4001
4002         if (msg->rsp_size < 19) {
4003                 /* Message is too small to be an IPMB event. */
4004                 ipmi_inc_stat(intf, invalid_events);
4005                 return 0;
4006         }
4007
4008         if (msg->rsp[2] != 0) {
4009                 /* An error getting the event, just ignore it. */
4010                 return 0;
4011         }
4012
4013         INIT_LIST_HEAD(&msgs);
4014
4015         spin_lock_irqsave(&intf->events_lock, flags);
4016
4017         ipmi_inc_stat(intf, events);
4018
4019         /*
4020          * Allocate and fill in one message for every user that is
4021          * getting events.
4022          */
4023         index = srcu_read_lock(&intf->users_srcu);
4024         list_for_each_entry_rcu(user, &intf->users, link) {
4025                 if (!user->gets_events)
4026                         continue;
4027
4028                 recv_msg = ipmi_alloc_recv_msg();
4029                 if (!recv_msg) {
4030                         rcu_read_unlock();
4031                         list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4032                                                  link) {
4033                                 list_del(&recv_msg->link);
4034                                 ipmi_free_recv_msg(recv_msg);
4035                         }
4036                         /*
4037                          * We couldn't allocate memory for the
4038                          * message, so requeue it for handling
4039                          * later.
4040                          */
4041                         rv = 1;
4042                         goto out;
4043                 }
4044
4045                 deliver_count++;
4046
4047                 copy_event_into_recv_msg(recv_msg, msg);
4048                 recv_msg->user = user;
4049                 kref_get(&user->refcount);
4050                 list_add_tail(&recv_msg->link, &msgs);
4051         }
4052         srcu_read_unlock(&intf->users_srcu, index);
4053
4054         if (deliver_count) {
4055                 /* Now deliver all the messages. */
4056                 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4057                         list_del(&recv_msg->link);
4058                         deliver_local_response(intf, recv_msg);
4059                 }
4060         } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4061                 /*
4062                  * No one to receive the message, put it in queue if there's
4063                  * not already too many things in the queue.
4064                  */
4065                 recv_msg = ipmi_alloc_recv_msg();
4066                 if (!recv_msg) {
4067                         /*
4068                          * We couldn't allocate memory for the
4069                          * message, so requeue it for handling
4070                          * later.
4071                          */
4072                         rv = 1;
4073                         goto out;
4074                 }
4075
4076                 copy_event_into_recv_msg(recv_msg, msg);
4077                 list_add_tail(&recv_msg->link, &intf->waiting_events);
4078                 intf->waiting_events_count++;
4079         } else if (!intf->event_msg_printed) {
4080                 /*
4081                  * There's too many things in the queue, discard this
4082                  * message.
4083                  */
4084                 dev_warn(intf->si_dev,
4085                          "Event queue full, discarding incoming events\n");
4086                 intf->event_msg_printed = 1;
4087         }
4088
4089  out:
4090         spin_unlock_irqrestore(&intf->events_lock, flags);
4091
4092         return rv;
4093 }
4094
4095 static int handle_bmc_rsp(struct ipmi_smi *intf,
4096                           struct ipmi_smi_msg *msg)
4097 {
4098         struct ipmi_recv_msg *recv_msg;
4099         struct ipmi_system_interface_addr *smi_addr;
4100
4101         recv_msg = (struct ipmi_recv_msg *) msg->user_data;
4102         if (recv_msg == NULL) {
4103                 dev_warn(intf->si_dev,
4104                          "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error.  Contact your hardware vendor for assistance.\n");
4105                 return 0;
4106         }
4107
4108         recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4109         recv_msg->msgid = msg->msgid;
4110         smi_addr = ((struct ipmi_system_interface_addr *)
4111                     &recv_msg->addr);
4112         smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4113         smi_addr->channel = IPMI_BMC_CHANNEL;
4114         smi_addr->lun = msg->rsp[0] & 3;
4115         recv_msg->msg.netfn = msg->rsp[0] >> 2;
4116         recv_msg->msg.cmd = msg->rsp[1];
4117         memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
4118         recv_msg->msg.data = recv_msg->msg_data;
4119         recv_msg->msg.data_len = msg->rsp_size - 2;
4120         deliver_local_response(intf, recv_msg);
4121
4122         return 0;
4123 }
4124
4125 /*
4126  * Handle a received message.  Return 1 if the message should be requeued,
4127  * 0 if the message should be freed, or -1 if the message should not
4128  * be freed or requeued.
4129  */
4130 static int handle_one_recv_msg(struct ipmi_smi *intf,
4131                                struct ipmi_smi_msg *msg)
4132 {
4133         int requeue;
4134         int chan;
4135
4136         ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size);
4137         if (msg->rsp_size < 2) {
4138                 /* Message is too small to be correct. */
4139                 dev_warn(intf->si_dev,
4140                          "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4141                          (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
4142
4143                 /* Generate an error response for the message. */
4144                 msg->rsp[0] = msg->data[0] | (1 << 2);
4145                 msg->rsp[1] = msg->data[1];
4146                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4147                 msg->rsp_size = 3;
4148         } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4149                    || (msg->rsp[1] != msg->data[1])) {
4150                 /*
4151                  * The NetFN and Command in the response is not even
4152                  * marginally correct.
4153                  */
4154                 dev_warn(intf->si_dev,
4155                          "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4156                          (msg->data[0] >> 2) | 1, msg->data[1],
4157                          msg->rsp[0] >> 2, msg->rsp[1]);
4158
4159                 /* Generate an error response for the message. */
4160                 msg->rsp[0] = msg->data[0] | (1 << 2);
4161                 msg->rsp[1] = msg->data[1];
4162                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4163                 msg->rsp_size = 3;
4164         }
4165
4166         if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4167             && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4168             && (msg->user_data != NULL)) {
4169                 /*
4170                  * It's a response to a response we sent.  For this we
4171                  * deliver a send message response to the user.
4172                  */
4173                 struct ipmi_recv_msg *recv_msg = msg->user_data;
4174
4175                 requeue = 0;
4176                 if (msg->rsp_size < 2)
4177                         /* Message is too small to be correct. */
4178                         goto out;
4179
4180                 chan = msg->data[2] & 0x0f;
4181                 if (chan >= IPMI_MAX_CHANNELS)
4182                         /* Invalid channel number */
4183                         goto out;
4184
4185                 if (!recv_msg)
4186                         goto out;
4187
4188                 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4189                 recv_msg->msg.data = recv_msg->msg_data;
4190                 recv_msg->msg.data_len = 1;
4191                 recv_msg->msg_data[0] = msg->rsp[2];
4192                 deliver_local_response(intf, recv_msg);
4193         } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4194                    && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4195                 struct ipmi_channel   *chans;
4196
4197                 /* It's from the receive queue. */
4198                 chan = msg->rsp[3] & 0xf;
4199                 if (chan >= IPMI_MAX_CHANNELS) {
4200                         /* Invalid channel number */
4201                         requeue = 0;
4202                         goto out;
4203                 }
4204
4205                 /*
4206                  * We need to make sure the channels have been initialized.
4207                  * The channel_handler routine will set the "curr_channel"
4208                  * equal to or greater than IPMI_MAX_CHANNELS when all the
4209                  * channels for this interface have been initialized.
4210                  */
4211                 if (!intf->channels_ready) {
4212                         requeue = 0; /* Throw the message away */
4213                         goto out;
4214                 }
4215
4216                 chans = READ_ONCE(intf->channel_list)->c;
4217
4218                 switch (chans[chan].medium) {
4219                 case IPMI_CHANNEL_MEDIUM_IPMB:
4220                         if (msg->rsp[4] & 0x04) {
4221                                 /*
4222                                  * It's a response, so find the
4223                                  * requesting message and send it up.
4224                                  */
4225                                 requeue = handle_ipmb_get_msg_rsp(intf, msg);
4226                         } else {
4227                                 /*
4228                                  * It's a command to the SMS from some other
4229                                  * entity.  Handle that.
4230                                  */
4231                                 requeue = handle_ipmb_get_msg_cmd(intf, msg);
4232                         }
4233                         break;
4234
4235                 case IPMI_CHANNEL_MEDIUM_8023LAN:
4236                 case IPMI_CHANNEL_MEDIUM_ASYNC:
4237                         if (msg->rsp[6] & 0x04) {
4238                                 /*
4239                                  * It's a response, so find the
4240                                  * requesting message and send it up.
4241                                  */
4242                                 requeue = handle_lan_get_msg_rsp(intf, msg);
4243                         } else {
4244                                 /*
4245                                  * It's a command to the SMS from some other
4246                                  * entity.  Handle that.
4247                                  */
4248                                 requeue = handle_lan_get_msg_cmd(intf, msg);
4249                         }
4250                         break;
4251
4252                 default:
4253                         /* Check for OEM Channels.  Clients had better
4254                            register for these commands. */
4255                         if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4256                             && (chans[chan].medium
4257                                 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4258                                 requeue = handle_oem_get_msg_cmd(intf, msg);
4259                         } else {
4260                                 /*
4261                                  * We don't handle the channel type, so just
4262                                  * free the message.
4263                                  */
4264                                 requeue = 0;
4265                         }
4266                 }
4267
4268         } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4269                    && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4270                 /* It's an asynchronous event. */
4271                 requeue = handle_read_event_rsp(intf, msg);
4272         } else {
4273                 /* It's a response from the local BMC. */
4274                 requeue = handle_bmc_rsp(intf, msg);
4275         }
4276
4277  out:
4278         return requeue;
4279 }
4280
4281 /*
4282  * If there are messages in the queue or pretimeouts, handle them.
4283  */
4284 static void handle_new_recv_msgs(struct ipmi_smi *intf)
4285 {
4286         struct ipmi_smi_msg  *smi_msg;
4287         unsigned long        flags = 0;
4288         int                  rv;
4289         int                  run_to_completion = intf->run_to_completion;
4290
4291         /* See if any waiting messages need to be processed. */
4292         if (!run_to_completion)
4293                 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4294         while (!list_empty(&intf->waiting_rcv_msgs)) {
4295                 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4296                                      struct ipmi_smi_msg, link);
4297                 list_del(&smi_msg->link);
4298                 if (!run_to_completion)
4299                         spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4300                                                flags);
4301                 rv = handle_one_recv_msg(intf, smi_msg);
4302                 if (!run_to_completion)
4303                         spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4304                 if (rv > 0) {
4305                         /*
4306                          * To preserve message order, quit if we
4307                          * can't handle a message.  Add the message
4308                          * back at the head, this is safe because this
4309                          * tasklet is the only thing that pulls the
4310                          * messages.
4311                          */
4312                         list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4313                         break;
4314                 } else {
4315                         if (rv == 0)
4316                                 /* Message handled */
4317                                 ipmi_free_smi_msg(smi_msg);
4318                         /* If rv < 0, fatal error, del but don't free. */
4319                 }
4320         }
4321         if (!run_to_completion)
4322                 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4323
4324         /*
4325          * If the pretimout count is non-zero, decrement one from it and
4326          * deliver pretimeouts to all the users.
4327          */
4328         if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4329                 struct ipmi_user *user;
4330                 int index;
4331
4332                 index = srcu_read_lock(&intf->users_srcu);
4333                 list_for_each_entry_rcu(user, &intf->users, link) {
4334                         if (user->handler->ipmi_watchdog_pretimeout)
4335                                 user->handler->ipmi_watchdog_pretimeout(
4336                                         user->handler_data);
4337                 }
4338                 srcu_read_unlock(&intf->users_srcu, index);
4339         }
4340 }
4341
4342 static void smi_recv_tasklet(unsigned long val)
4343 {
4344         unsigned long flags = 0; /* keep us warning-free. */
4345         struct ipmi_smi *intf = (struct ipmi_smi *) val;
4346         int run_to_completion = intf->run_to_completion;
4347         struct ipmi_smi_msg *newmsg = NULL;
4348
4349         /*
4350          * Start the next message if available.
4351          *
4352          * Do this here, not in the actual receiver, because we may deadlock
4353          * because the lower layer is allowed to hold locks while calling
4354          * message delivery.
4355          */
4356
4357         rcu_read_lock();
4358
4359         if (!run_to_completion)
4360                 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4361         if (intf->curr_msg == NULL && !intf->in_shutdown) {
4362                 struct list_head *entry = NULL;
4363
4364                 /* Pick the high priority queue first. */
4365                 if (!list_empty(&intf->hp_xmit_msgs))
4366                         entry = intf->hp_xmit_msgs.next;
4367                 else if (!list_empty(&intf->xmit_msgs))
4368                         entry = intf->xmit_msgs.next;
4369
4370                 if (entry) {
4371                         list_del(entry);
4372                         newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4373                         intf->curr_msg = newmsg;
4374                 }
4375         }
4376         if (!run_to_completion)
4377                 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4378         if (newmsg)
4379                 intf->handlers->sender(intf->send_info, newmsg);
4380
4381         rcu_read_unlock();
4382
4383         handle_new_recv_msgs(intf);
4384 }
4385
4386 /* Handle a new message from the lower layer. */
4387 void ipmi_smi_msg_received(struct ipmi_smi *intf,
4388                            struct ipmi_smi_msg *msg)
4389 {
4390         unsigned long flags = 0; /* keep us warning-free. */
4391         int run_to_completion = intf->run_to_completion;
4392
4393         if ((msg->data_size >= 2)
4394             && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4395             && (msg->data[1] == IPMI_SEND_MSG_CMD)
4396             && (msg->user_data == NULL)) {
4397
4398                 if (intf->in_shutdown)
4399                         goto free_msg;
4400
4401                 /*
4402                  * This is the local response to a command send, start
4403                  * the timer for these.  The user_data will not be
4404                  * NULL if this is a response send, and we will let
4405                  * response sends just go through.
4406                  */
4407
4408                 /*
4409                  * Check for errors, if we get certain errors (ones
4410                  * that mean basically we can try again later), we
4411                  * ignore them and start the timer.  Otherwise we
4412                  * report the error immediately.
4413                  */
4414                 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4415                     && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4416                     && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4417                     && (msg->rsp[2] != IPMI_BUS_ERR)
4418                     && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4419                         int ch = msg->rsp[3] & 0xf;
4420                         struct ipmi_channel *chans;
4421
4422                         /* Got an error sending the message, handle it. */
4423
4424                         chans = READ_ONCE(intf->channel_list)->c;
4425                         if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4426                             || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4427                                 ipmi_inc_stat(intf, sent_lan_command_errs);
4428                         else
4429                                 ipmi_inc_stat(intf, sent_ipmb_command_errs);
4430                         intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4431                 } else
4432                         /* The message was sent, start the timer. */
4433                         intf_start_seq_timer(intf, msg->msgid);
4434
4435 free_msg:
4436                 ipmi_free_smi_msg(msg);
4437         } else {
4438                 /*
4439                  * To preserve message order, we keep a queue and deliver from
4440                  * a tasklet.
4441                  */
4442                 if (!run_to_completion)
4443                         spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4444                 list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4445                 if (!run_to_completion)
4446                         spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4447                                                flags);
4448         }
4449
4450         if (!run_to_completion)
4451                 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4452         /*
4453          * We can get an asynchronous event or receive message in addition
4454          * to commands we send.
4455          */
4456         if (msg == intf->curr_msg)
4457                 intf->curr_msg = NULL;
4458         if (!run_to_completion)
4459                 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4460
4461         if (run_to_completion)
4462                 smi_recv_tasklet((unsigned long) intf);
4463         else
4464                 tasklet_schedule(&intf->recv_tasklet);
4465 }
4466 EXPORT_SYMBOL(ipmi_smi_msg_received);
4467
4468 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4469 {
4470         if (intf->in_shutdown)
4471                 return;
4472
4473         atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4474         tasklet_schedule(&intf->recv_tasklet);
4475 }
4476 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4477
4478 static struct ipmi_smi_msg *
4479 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4480                   unsigned char seq, long seqid)
4481 {
4482         struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4483         if (!smi_msg)
4484                 /*
4485                  * If we can't allocate the message, then just return, we
4486                  * get 4 retries, so this should be ok.
4487                  */
4488                 return NULL;
4489
4490         memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4491         smi_msg->data_size = recv_msg->msg.data_len;
4492         smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4493
4494         ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size);
4495
4496         return smi_msg;
4497 }
4498
4499 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4500                               struct list_head *timeouts,
4501                               unsigned long timeout_period,
4502                               int slot, unsigned long *flags,
4503                               unsigned int *waiting_msgs)
4504 {
4505         struct ipmi_recv_msg *msg;
4506
4507         if (intf->in_shutdown)
4508                 return;
4509
4510         if (!ent->inuse)
4511                 return;
4512
4513         if (timeout_period < ent->timeout) {
4514                 ent->timeout -= timeout_period;
4515                 (*waiting_msgs)++;
4516                 return;
4517         }
4518
4519         if (ent->retries_left == 0) {
4520                 /* The message has used all its retries. */
4521                 ent->inuse = 0;
4522                 msg = ent->recv_msg;
4523                 list_add_tail(&msg->link, timeouts);
4524                 if (ent->broadcast)
4525                         ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4526                 else if (is_lan_addr(&ent->recv_msg->addr))
4527                         ipmi_inc_stat(intf, timed_out_lan_commands);
4528                 else
4529                         ipmi_inc_stat(intf, timed_out_ipmb_commands);
4530         } else {
4531                 struct ipmi_smi_msg *smi_msg;
4532                 /* More retries, send again. */
4533
4534                 (*waiting_msgs)++;
4535
4536                 /*
4537                  * Start with the max timer, set to normal timer after
4538                  * the message is sent.
4539                  */
4540                 ent->timeout = MAX_MSG_TIMEOUT;
4541                 ent->retries_left--;
4542                 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4543                                             ent->seqid);
4544                 if (!smi_msg) {
4545                         if (is_lan_addr(&ent->recv_msg->addr))
4546                                 ipmi_inc_stat(intf,
4547                                               dropped_rexmit_lan_commands);
4548                         else
4549                                 ipmi_inc_stat(intf,
4550                                               dropped_rexmit_ipmb_commands);
4551                         return;
4552                 }
4553
4554                 spin_unlock_irqrestore(&intf->seq_lock, *flags);
4555
4556                 /*
4557                  * Send the new message.  We send with a zero
4558                  * priority.  It timed out, I doubt time is that
4559                  * critical now, and high priority messages are really
4560                  * only for messages to the local MC, which don't get
4561                  * resent.
4562                  */
4563                 if (intf->handlers) {
4564                         if (is_lan_addr(&ent->recv_msg->addr))
4565                                 ipmi_inc_stat(intf,
4566                                               retransmitted_lan_commands);
4567                         else
4568                                 ipmi_inc_stat(intf,
4569                                               retransmitted_ipmb_commands);
4570
4571                         smi_send(intf, intf->handlers, smi_msg, 0);
4572                 } else
4573                         ipmi_free_smi_msg(smi_msg);
4574
4575                 spin_lock_irqsave(&intf->seq_lock, *flags);
4576         }
4577 }
4578
4579 static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
4580                                          unsigned long timeout_period)
4581 {
4582         struct list_head     timeouts;
4583         struct ipmi_recv_msg *msg, *msg2;
4584         unsigned long        flags;
4585         int                  i;
4586         unsigned int         waiting_msgs = 0;
4587
4588         if (!intf->bmc_registered) {
4589                 kref_get(&intf->refcount);
4590                 if (!schedule_work(&intf->bmc_reg_work)) {
4591                         kref_put(&intf->refcount, intf_free);
4592                         waiting_msgs++;
4593                 }
4594         }
4595
4596         /*
4597          * Go through the seq table and find any messages that
4598          * have timed out, putting them in the timeouts
4599          * list.
4600          */
4601         INIT_LIST_HEAD(&timeouts);
4602         spin_lock_irqsave(&intf->seq_lock, flags);
4603         if (intf->ipmb_maintenance_mode_timeout) {
4604                 if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
4605                         intf->ipmb_maintenance_mode_timeout = 0;
4606                 else
4607                         intf->ipmb_maintenance_mode_timeout -= timeout_period;
4608         }
4609         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4610                 check_msg_timeout(intf, &intf->seq_table[i],
4611                                   &timeouts, timeout_period, i,
4612                                   &flags, &waiting_msgs);
4613         spin_unlock_irqrestore(&intf->seq_lock, flags);
4614
4615         list_for_each_entry_safe(msg, msg2, &timeouts, link)
4616                 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
4617
4618         /*
4619          * Maintenance mode handling.  Check the timeout
4620          * optimistically before we claim the lock.  It may
4621          * mean a timeout gets missed occasionally, but that
4622          * only means the timeout gets extended by one period
4623          * in that case.  No big deal, and it avoids the lock
4624          * most of the time.
4625          */
4626         if (intf->auto_maintenance_timeout > 0) {
4627                 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4628                 if (intf->auto_maintenance_timeout > 0) {
4629                         intf->auto_maintenance_timeout
4630                                 -= timeout_period;
4631                         if (!intf->maintenance_mode
4632                             && (intf->auto_maintenance_timeout <= 0)) {
4633                                 intf->maintenance_mode_enable = false;
4634                                 maintenance_mode_update(intf);
4635                         }
4636                 }
4637                 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4638                                        flags);
4639         }
4640
4641         tasklet_schedule(&intf->recv_tasklet);
4642
4643         return waiting_msgs;
4644 }
4645
4646 static void ipmi_request_event(struct ipmi_smi *intf)
4647 {
4648         /* No event requests when in maintenance mode. */
4649         if (intf->maintenance_mode_enable)
4650                 return;
4651
4652         if (!intf->in_shutdown)
4653                 intf->handlers->request_events(intf->send_info);
4654 }
4655
4656 static struct timer_list ipmi_timer;
4657
4658 static atomic_t stop_operation;
4659
4660 static void ipmi_timeout(struct timer_list *unused)
4661 {
4662         struct ipmi_smi *intf;
4663         int nt = 0, index;
4664
4665         if (atomic_read(&stop_operation))
4666                 return;
4667
4668         index = srcu_read_lock(&ipmi_interfaces_srcu);
4669         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4670                 int lnt = 0;
4671
4672                 if (atomic_read(&intf->event_waiters)) {
4673                         intf->ticks_to_req_ev--;
4674                         if (intf->ticks_to_req_ev == 0) {
4675                                 ipmi_request_event(intf);
4676                                 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4677                         }
4678                         lnt++;
4679                 }
4680
4681                 lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4682
4683                 lnt = !!lnt;
4684                 if (lnt != intf->last_needs_timer &&
4685                                         intf->handlers->set_need_watch)
4686                         intf->handlers->set_need_watch(intf->send_info, lnt);
4687                 intf->last_needs_timer = lnt;
4688
4689                 nt += lnt;
4690         }
4691         srcu_read_unlock(&ipmi_interfaces_srcu, index);
4692
4693         if (nt)
4694                 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4695 }
4696
4697 static void need_waiter(struct ipmi_smi *intf)
4698 {
4699         /* Racy, but worst case we start the timer twice. */
4700         if (!timer_pending(&ipmi_timer))
4701                 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4702 }
4703
4704 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4705 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4706
4707 static void free_smi_msg(struct ipmi_smi_msg *msg)
4708 {
4709         atomic_dec(&smi_msg_inuse_count);
4710         kfree(msg);
4711 }
4712
4713 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
4714 {
4715         struct ipmi_smi_msg *rv;
4716         rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
4717         if (rv) {
4718                 rv->done = free_smi_msg;
4719                 rv->user_data = NULL;
4720                 atomic_inc(&smi_msg_inuse_count);
4721         }
4722         return rv;
4723 }
4724 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4725
4726 static void free_recv_msg(struct ipmi_recv_msg *msg)
4727 {
4728         atomic_dec(&recv_msg_inuse_count);
4729         kfree(msg);
4730 }
4731
4732 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
4733 {
4734         struct ipmi_recv_msg *rv;
4735
4736         rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
4737         if (rv) {
4738                 rv->user = NULL;
4739                 rv->done = free_recv_msg;
4740                 atomic_inc(&recv_msg_inuse_count);
4741         }
4742         return rv;
4743 }
4744
4745 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
4746 {
4747         if (msg->user)
4748                 kref_put(&msg->user->refcount, free_user);
4749         msg->done(msg);
4750 }
4751 EXPORT_SYMBOL(ipmi_free_recv_msg);
4752
4753 static atomic_t panic_done_count = ATOMIC_INIT(0);
4754
4755 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4756 {
4757         atomic_dec(&panic_done_count);
4758 }
4759
4760 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4761 {
4762         atomic_dec(&panic_done_count);
4763 }
4764
4765 /*
4766  * Inside a panic, send a message and wait for a response.
4767  */
4768 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
4769                                         struct ipmi_addr *addr,
4770                                         struct kernel_ipmi_msg *msg)
4771 {
4772         struct ipmi_smi_msg  smi_msg;
4773         struct ipmi_recv_msg recv_msg;
4774         int rv;
4775
4776         smi_msg.done = dummy_smi_done_handler;
4777         recv_msg.done = dummy_recv_done_handler;
4778         atomic_add(2, &panic_done_count);
4779         rv = i_ipmi_request(NULL,
4780                             intf,
4781                             addr,
4782                             0,
4783                             msg,
4784                             intf,
4785                             &smi_msg,
4786                             &recv_msg,
4787                             0,
4788                             intf->addrinfo[0].address,
4789                             intf->addrinfo[0].lun,
4790                             0, 1); /* Don't retry, and don't wait. */
4791         if (rv)
4792                 atomic_sub(2, &panic_done_count);
4793         else if (intf->handlers->flush_messages)
4794                 intf->handlers->flush_messages(intf->send_info);
4795
4796         while (atomic_read(&panic_done_count) != 0)
4797                 ipmi_poll(intf);
4798 }
4799
4800 static void event_receiver_fetcher(struct ipmi_smi *intf,
4801                                    struct ipmi_recv_msg *msg)
4802 {
4803         if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4804             && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
4805             && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
4806             && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4807                 /* A get event receiver command, save it. */
4808                 intf->event_receiver = msg->msg.data[1];
4809                 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
4810         }
4811 }
4812
4813 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
4814 {
4815         if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4816             && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
4817             && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
4818             && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4819                 /*
4820                  * A get device id command, save if we are an event
4821                  * receiver or generator.
4822                  */
4823                 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
4824                 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
4825         }
4826 }
4827
4828 static void send_panic_events(struct ipmi_smi *intf, char *str)
4829 {
4830         struct kernel_ipmi_msg msg;
4831         unsigned char data[16];
4832         struct ipmi_system_interface_addr *si;
4833         struct ipmi_addr addr;
4834         char *p = str;
4835         struct ipmi_ipmb_addr *ipmb;
4836         int j;
4837
4838         if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
4839                 return;
4840
4841         si = (struct ipmi_system_interface_addr *) &addr;
4842         si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4843         si->channel = IPMI_BMC_CHANNEL;
4844         si->lun = 0;
4845
4846         /* Fill in an event telling that we have failed. */
4847         msg.netfn = 0x04; /* Sensor or Event. */
4848         msg.cmd = 2; /* Platform event command. */
4849         msg.data = data;
4850         msg.data_len = 8;
4851         data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
4852         data[1] = 0x03; /* This is for IPMI 1.0. */
4853         data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4854         data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4855         data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4856
4857         /*
4858          * Put a few breadcrumbs in.  Hopefully later we can add more things
4859          * to make the panic events more useful.
4860          */
4861         if (str) {
4862                 data[3] = str[0];
4863                 data[6] = str[1];
4864                 data[7] = str[2];
4865         }
4866
4867         /* Send the event announcing the panic. */
4868         ipmi_panic_request_and_wait(intf, &addr, &msg);
4869
4870         /*
4871          * On every interface, dump a bunch of OEM event holding the
4872          * string.
4873          */
4874         if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
4875                 return;
4876
4877         /*
4878          * intf_num is used as an marker to tell if the
4879          * interface is valid.  Thus we need a read barrier to
4880          * make sure data fetched before checking intf_num
4881          * won't be used.
4882          */
4883         smp_rmb();
4884
4885         /*
4886          * First job here is to figure out where to send the
4887          * OEM events.  There's no way in IPMI to send OEM
4888          * events using an event send command, so we have to
4889          * find the SEL to put them in and stick them in
4890          * there.
4891          */
4892
4893         /* Get capabilities from the get device id. */
4894         intf->local_sel_device = 0;
4895         intf->local_event_generator = 0;
4896         intf->event_receiver = 0;
4897
4898         /* Request the device info from the local MC. */
4899         msg.netfn = IPMI_NETFN_APP_REQUEST;
4900         msg.cmd = IPMI_GET_DEVICE_ID_CMD;
4901         msg.data = NULL;
4902         msg.data_len = 0;
4903         intf->null_user_handler = device_id_fetcher;
4904         ipmi_panic_request_and_wait(intf, &addr, &msg);
4905
4906         if (intf->local_event_generator) {
4907                 /* Request the event receiver from the local MC. */
4908                 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
4909                 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
4910                 msg.data = NULL;
4911                 msg.data_len = 0;
4912                 intf->null_user_handler = event_receiver_fetcher;
4913                 ipmi_panic_request_and_wait(intf, &addr, &msg);
4914         }
4915         intf->null_user_handler = NULL;
4916
4917         /*
4918          * Validate the event receiver.  The low bit must not
4919          * be 1 (it must be a valid IPMB address), it cannot
4920          * be zero, and it must not be my address.
4921          */
4922         if (((intf->event_receiver & 1) == 0)
4923             && (intf->event_receiver != 0)
4924             && (intf->event_receiver != intf->addrinfo[0].address)) {
4925                 /*
4926                  * The event receiver is valid, send an IPMB
4927                  * message.
4928                  */
4929                 ipmb = (struct ipmi_ipmb_addr *) &addr;
4930                 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
4931                 ipmb->channel = 0; /* FIXME - is this right? */
4932                 ipmb->lun = intf->event_receiver_lun;
4933                 ipmb->slave_addr = intf->event_receiver;
4934         } else if (intf->local_sel_device) {
4935                 /*
4936                  * The event receiver was not valid (or was
4937                  * me), but I am an SEL device, just dump it
4938                  * in my SEL.
4939                  */
4940                 si = (struct ipmi_system_interface_addr *) &addr;
4941                 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4942                 si->channel = IPMI_BMC_CHANNEL;
4943                 si->lun = 0;
4944         } else
4945                 return; /* No where to send the event. */
4946
4947         msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
4948         msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
4949         msg.data = data;
4950         msg.data_len = 16;
4951
4952         j = 0;
4953         while (*p) {
4954                 int size = strlen(p);
4955
4956                 if (size > 11)
4957                         size = 11;
4958                 data[0] = 0;
4959                 data[1] = 0;
4960                 data[2] = 0xf0; /* OEM event without timestamp. */
4961                 data[3] = intf->addrinfo[0].address;
4962                 data[4] = j++; /* sequence # */
4963                 /*
4964                  * Always give 11 bytes, so strncpy will fill
4965                  * it with zeroes for me.
4966                  */
4967                 strncpy(data+5, p, 11);
4968                 p += size;
4969
4970                 ipmi_panic_request_and_wait(intf, &addr, &msg);
4971         }
4972 }
4973
4974 static int has_panicked;
4975
4976 static int panic_event(struct notifier_block *this,
4977                        unsigned long         event,
4978                        void                  *ptr)
4979 {
4980         struct ipmi_smi *intf;
4981         struct ipmi_user *user;
4982
4983         if (has_panicked)
4984                 return NOTIFY_DONE;
4985         has_panicked = 1;
4986
4987         /* For every registered interface, set it to run to completion. */
4988         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4989                 if (!intf->handlers || intf->intf_num == -1)
4990                         /* Interface is not ready. */
4991                         continue;
4992
4993                 if (!intf->handlers->poll)
4994                         continue;
4995
4996                 /*
4997                  * If we were interrupted while locking xmit_msgs_lock or
4998                  * waiting_rcv_msgs_lock, the corresponding list may be
4999                  * corrupted.  In this case, drop items on the list for
5000                  * the safety.
5001                  */
5002                 if (!spin_trylock(&intf->xmit_msgs_lock)) {
5003                         INIT_LIST_HEAD(&intf->xmit_msgs);
5004                         INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5005                 } else
5006                         spin_unlock(&intf->xmit_msgs_lock);
5007
5008                 if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5009                         INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5010                 else
5011                         spin_unlock(&intf->waiting_rcv_msgs_lock);
5012
5013                 intf->run_to_completion = 1;
5014                 if (intf->handlers->set_run_to_completion)
5015                         intf->handlers->set_run_to_completion(intf->send_info,
5016                                                               1);
5017
5018                 list_for_each_entry_rcu(user, &intf->users, link) {
5019                         if (user->handler->ipmi_panic_handler)
5020                                 user->handler->ipmi_panic_handler(
5021                                         user->handler_data);
5022                 }
5023
5024                 send_panic_events(intf, ptr);
5025         }
5026
5027         return NOTIFY_DONE;
5028 }
5029
5030 static struct notifier_block panic_block = {
5031         .notifier_call  = panic_event,
5032         .next           = NULL,
5033         .priority       = 200   /* priority: INT_MAX >= x >= 0 */
5034 };
5035
5036 static int ipmi_init_msghandler(void)
5037 {
5038         int rv;
5039
5040         if (initialized)
5041                 return 0;
5042
5043         rv = driver_register(&ipmidriver.driver);
5044         if (rv) {
5045                 pr_err("Could not register IPMI driver\n");
5046                 return rv;
5047         }
5048
5049         pr_info("version " IPMI_DRIVER_VERSION "\n");
5050
5051         timer_setup(&ipmi_timer, ipmi_timeout, 0);
5052         mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5053
5054         atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5055
5056         initialized = 1;
5057
5058         return 0;
5059 }
5060
5061 static int __init ipmi_init_msghandler_mod(void)
5062 {
5063         ipmi_init_msghandler();
5064         return 0;
5065 }
5066
5067 static void __exit cleanup_ipmi(void)
5068 {
5069         int count;
5070
5071         if (!initialized)
5072                 return;
5073
5074         atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
5075
5076         /*
5077          * This can't be called if any interfaces exist, so no worry
5078          * about shutting down the interfaces.
5079          */
5080
5081         /*
5082          * Tell the timer to stop, then wait for it to stop.  This
5083          * avoids problems with race conditions removing the timer
5084          * here.
5085          */
5086         atomic_inc(&stop_operation);
5087         del_timer_sync(&ipmi_timer);
5088
5089         driver_unregister(&ipmidriver.driver);
5090
5091         initialized = 0;
5092
5093         /* Check for buffer leaks. */
5094         count = atomic_read(&smi_msg_inuse_count);
5095         if (count != 0)
5096                 pr_warn("SMI message count %d at exit\n", count);
5097         count = atomic_read(&recv_msg_inuse_count);
5098         if (count != 0)
5099                 pr_warn("recv message count %d at exit\n", count);
5100 }
5101 module_exit(cleanup_ipmi);
5102
5103 module_init(ipmi_init_msghandler_mod);
5104 MODULE_LICENSE("GPL");
5105 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5106 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
5107                    " interface.");
5108 MODULE_VERSION(IPMI_DRIVER_VERSION);
5109 MODULE_SOFTDEP("post: ipmi_devintf");