1 /*
2 * ipmi_msghandler.c
3 *
4 * Incoming and outgoing message routing for an IPMI interface.
5 *
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
9 *
10 * Copyright 2002 MontaVista Software Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/seq_file.h>
39 #include <linux/spinlock.h>
40 #include <linux/mutex.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/ipmi_smi.h>
44 #include <linux/notifier.h>
45 #include <linux/init.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
48 #include <linux/interrupt.h>
49
50 #define PFX "IPMI message handler: "
51
52 #define IPMI_DRIVER_VERSION "39.2"
53
54 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
55 static int ipmi_init_msghandler(void);
56 static void smi_recv_tasklet(unsigned long);
57 static void handle_new_recv_msgs(ipmi_smi_t intf);
58
59 static int initialized;
60
61 #ifdef CONFIG_PROC_FS
62 static struct proc_dir_entry *proc_ipmi_root;
63 #endif /* CONFIG_PROC_FS */
64
65 /* Remain in auto-maintenance mode for this amount of time (in ms). */
66 #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
67
68 #define MAX_EVENTS_IN_QUEUE 25
69
70 /*
71 * Don't let a message sit in a queue forever, always time it with at lest
72 * the max message timer. This is in milliseconds.
73 */
74 #define MAX_MSG_TIMEOUT 60000
75
76 /*
77 * The main "user" data structure.
78 */
79 struct ipmi_user {
80 struct list_head link;
81
82 /* Set to "0" when the user is destroyed. */
83 int valid;
84
85 struct kref refcount;
86
87 /* The upper layer that handles receive messages. */
88 struct ipmi_user_hndl *handler;
89 void *handler_data;
90
91 /* The interface this user is bound to. */
92 ipmi_smi_t intf;
93
94 /* Does this interface receive IPMI events? */
95 int gets_events;
96 };
97
98 struct cmd_rcvr {
99 struct list_head link;
100
101 ipmi_user_t user;
102 unsigned char netfn;
103 unsigned char cmd;
104 unsigned int chans;
105
106 /*
107 * This is used to form a linked lised during mass deletion.
108 * Since this is in an RCU list, we cannot use the link above
109 * or change any data until the RCU period completes. So we
110 * use this next variable during mass deletion so we can have
111 * a list and don't have to wait and restart the search on
112 * every individual deletion of a command.
113 */
114 struct cmd_rcvr *next;
115 };
116
117 struct seq_table {
118 unsigned int inuse : 1;
119 unsigned int broadcast : 1;
120
121 unsigned long timeout;
122 unsigned long orig_timeout;
123 unsigned int retries_left;
124
125 /*
126 * To verify on an incoming send message response that this is
127 * the message that the response is for, we keep a sequence id
128 * and increment it every time we send a message.
129 */
130 long seqid;
131
132 /*
133 * This is held so we can properly respond to the message on a
134 * timeout, and it is used to hold the temporary data for
135 * retransmission, too.
136 */
137 struct ipmi_recv_msg *recv_msg;
138 };
139
140 /*
141 * Store the information in a msgid (long) to allow us to find a
142 * sequence table entry from the msgid.
143 */
144 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
145
146 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
147 do { \
148 seq = ((msgid >> 26) & 0x3f); \
149 seqid = (msgid & 0x3fffff); \
150 } while (0)
151
152 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
153
154 struct ipmi_channel {
155 unsigned char medium;
156 unsigned char protocol;
157
158 /*
159 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
160 * but may be changed by the user.
161 */
162 unsigned char address;
163
164 /*
165 * My LUN. This should generally stay the SMS LUN, but just in
166 * case...
167 */
168 unsigned char lun;
169 };
170
171 #ifdef CONFIG_PROC_FS
172 struct ipmi_proc_entry {
173 char *name;
174 struct ipmi_proc_entry *next;
175 };
176 #endif
177
178 struct bmc_device {
179 struct platform_device *dev;
180 struct ipmi_device_id id;
181 unsigned char guid[16];
182 int guid_set;
183
184 struct kref refcount;
185
186 /* bmc device attributes */
187 struct device_attribute device_id_attr;
188 struct device_attribute provides_dev_sdrs_attr;
189 struct device_attribute revision_attr;
190 struct device_attribute firmware_rev_attr;
191 struct device_attribute version_attr;
192 struct device_attribute add_dev_support_attr;
193 struct device_attribute manufacturer_id_attr;
194 struct device_attribute product_id_attr;
195 struct device_attribute guid_attr;
196 struct device_attribute aux_firmware_rev_attr;
197 };
198
199 /*
200 * Various statistics for IPMI, these index stats[] in the ipmi_smi
201 * structure.
202 */
203 enum ipmi_stat_indexes {
204 /* Commands we got from the user that were invalid. */
205 IPMI_STAT_sent_invalid_commands = 0,
206
207 /* Commands we sent to the MC. */
208 IPMI_STAT_sent_local_commands,
209
210 /* Responses from the MC that were delivered to a user. */
211 IPMI_STAT_handled_local_responses,
212
213 /* Responses from the MC that were not delivered to a user. */
214 IPMI_STAT_unhandled_local_responses,
215
216 /* Commands we sent out to the IPMB bus. */
217 IPMI_STAT_sent_ipmb_commands,
218
219 /* Commands sent on the IPMB that had errors on the SEND CMD */
220 IPMI_STAT_sent_ipmb_command_errs,
221
222 /* Each retransmit increments this count. */
223 IPMI_STAT_retransmitted_ipmb_commands,
224
225 /*
226 * When a message times out (runs out of retransmits) this is
227 * incremented.
228 */
229 IPMI_STAT_timed_out_ipmb_commands,
230
231 /*
232 * This is like above, but for broadcasts. Broadcasts are
233 * *not* included in the above count (they are expected to
234 * time out).
235 */
236 IPMI_STAT_timed_out_ipmb_broadcasts,
237
238 /* Responses I have sent to the IPMB bus. */
239 IPMI_STAT_sent_ipmb_responses,
240
241 /* The response was delivered to the user. */
242 IPMI_STAT_handled_ipmb_responses,
243
244 /* The response had invalid data in it. */
245 IPMI_STAT_invalid_ipmb_responses,
246
247 /* The response didn't have anyone waiting for it. */
248 IPMI_STAT_unhandled_ipmb_responses,
249
250 /* Commands we sent out to the IPMB bus. */
251 IPMI_STAT_sent_lan_commands,
252
253 /* Commands sent on the IPMB that had errors on the SEND CMD */
254 IPMI_STAT_sent_lan_command_errs,
255
256 /* Each retransmit increments this count. */
257 IPMI_STAT_retransmitted_lan_commands,
258
259 /*
260 * When a message times out (runs out of retransmits) this is
261 * incremented.
262 */
263 IPMI_STAT_timed_out_lan_commands,
264
265 /* Responses I have sent to the IPMB bus. */
266 IPMI_STAT_sent_lan_responses,
267
268 /* The response was delivered to the user. */
269 IPMI_STAT_handled_lan_responses,
270
271 /* The response had invalid data in it. */
272 IPMI_STAT_invalid_lan_responses,
273
274 /* The response didn't have anyone waiting for it. */
275 IPMI_STAT_unhandled_lan_responses,
276
277 /* The command was delivered to the user. */
278 IPMI_STAT_handled_commands,
279
280 /* The command had invalid data in it. */
281 IPMI_STAT_invalid_commands,
282
283 /* The command didn't have anyone waiting for it. */
284 IPMI_STAT_unhandled_commands,
285
286 /* Invalid data in an event. */
287 IPMI_STAT_invalid_events,
288
289 /* Events that were received with the proper format. */
290 IPMI_STAT_events,
291
292 /* Retransmissions on IPMB that failed. */
293 IPMI_STAT_dropped_rexmit_ipmb_commands,
294
295 /* Retransmissions on LAN that failed. */
296 IPMI_STAT_dropped_rexmit_lan_commands,
297
298 /* This *must* remain last, add new values above this. */
299 IPMI_NUM_STATS
300 };
301
302
303 #define IPMI_IPMB_NUM_SEQ 64
304 #define IPMI_MAX_CHANNELS 16
305 struct ipmi_smi {
306 /* What interface number are we? */
307 int intf_num;
308
309 struct kref refcount;
310
311 /* Used for a list of interfaces. */
312 struct list_head link;
313
314 /*
315 * The list of upper layers that are using me. seq_lock
316 * protects this.
317 */
318 struct list_head users;
319
320 /* Information to supply to users. */
321 unsigned char ipmi_version_major;
322 unsigned char ipmi_version_minor;
323
324 /* Used for wake ups at startup. */
325 wait_queue_head_t waitq;
326
327 struct bmc_device *bmc;
328 char *my_dev_name;
329 char *sysfs_name;
330
331 /*
332 * This is the lower-layer's sender routine. Note that you
333 * must either be holding the ipmi_interfaces_mutex or be in
334 * an umpreemptible region to use this. You must fetch the
335 * value into a local variable and make sure it is not NULL.
336 */
337 struct ipmi_smi_handlers *handlers;
338 void *send_info;
339
340 #ifdef CONFIG_PROC_FS
341 /* A list of proc entries for this interface. */
342 struct mutex proc_entry_lock;
343 struct ipmi_proc_entry *proc_entries;
344 #endif
345
346 /* Driver-model device for the system interface. */
347 struct device *si_dev;
348
349 /*
350 * A table of sequence numbers for this interface. We use the
351 * sequence numbers for IPMB messages that go out of the
352 * interface to match them up with their responses. A routine
353 * is called periodically to time the items in this list.
354 */
355 spinlock_t seq_lock;
356 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
357 int curr_seq;
358
359 /*
360 * Messages queued for delivery. If delivery fails (out of memory
361 * for instance), They will stay in here to be processed later in a
362 * periodic timer interrupt. The tasklet is for handling received
363 * messages directly from the handler.
364 */
365 spinlock_t waiting_msgs_lock;
366 struct list_head waiting_msgs;
367 atomic_t watchdog_pretimeouts_to_deliver;
368 struct tasklet_struct recv_tasklet;
369
370 /*
371 * The list of command receivers that are registered for commands
372 * on this interface.
373 */
374 struct mutex cmd_rcvrs_mutex;
375 struct list_head cmd_rcvrs;
376
377 /*
378 * Events that were queues because no one was there to receive
379 * them.
380 */
381 spinlock_t events_lock; /* For dealing with event stuff. */
382 struct list_head waiting_events;
383 unsigned int waiting_events_count; /* How many events in queue? */
384 char delivering_events;
385 char event_msg_printed;
386
387 /*
388 * The event receiver for my BMC, only really used at panic
389 * shutdown as a place to store this.
390 */
391 unsigned char event_receiver;
392 unsigned char event_receiver_lun;
393 unsigned char local_sel_device;
394 unsigned char local_event_generator;
395
396 /* For handling of maintenance mode. */
397 int maintenance_mode;
398 int maintenance_mode_enable;
399 int auto_maintenance_timeout;
400 spinlock_t maintenance_mode_lock; /* Used in a timer... */
401
402 /*
403 * A cheap hack, if this is non-null and a message to an
404 * interface comes in with a NULL user, call this routine with
405 * it. Note that the message will still be freed by the
406 * caller. This only works on the system interface.
407 */
408 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
409
410 /*
411 * When we are scanning the channels for an SMI, this will
412 * tell which channel we are scanning.
413 */
414 int curr_channel;
415
416 /* Channel information */
417 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
418
419 /* Proc FS stuff. */
420 struct proc_dir_entry *proc_dir;
421 char proc_dir_name[10];
422
423 atomic_t stats[IPMI_NUM_STATS];
424
425 /*
426 * run_to_completion duplicate of smb_info, smi_info
427 * and ipmi_serial_info structures. Used to decrease numbers of
428 * parameters passed by "low" level IPMI code.
429 */
430 int run_to_completion;
431 };
432 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
433
434 /**
435 * The driver model view of the IPMI messaging driver.
436 */
437 static struct platform_driver ipmidriver = {
438 .driver = {
439 .name = "ipmi",
440 .bus = &platform_bus_type
441 }
442 };
443 static DEFINE_MUTEX(ipmidriver_mutex);
444
445 static LIST_HEAD(ipmi_interfaces);
446 static DEFINE_MUTEX(ipmi_interfaces_mutex);
447
448 /*
449 * List of watchers that want to know when smi's are added and deleted.
450 */
451 static LIST_HEAD(smi_watchers);
452 static DEFINE_MUTEX(smi_watchers_mutex);
453
454
455 #define ipmi_inc_stat(intf, stat) \
456 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
457 #define ipmi_get_stat(intf, stat) \
458 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
459
is_lan_addr(struct ipmi_addr * addr)460 static int is_lan_addr(struct ipmi_addr *addr)
461 {
462 return addr->addr_type == IPMI_LAN_ADDR_TYPE;
463 }
464
is_ipmb_addr(struct ipmi_addr * addr)465 static int is_ipmb_addr(struct ipmi_addr *addr)
466 {
467 return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
468 }
469
is_ipmb_bcast_addr(struct ipmi_addr * addr)470 static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
471 {
472 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
473 }
474
free_recv_msg_list(struct list_head * q)475 static void free_recv_msg_list(struct list_head *q)
476 {
477 struct ipmi_recv_msg *msg, *msg2;
478
479 list_for_each_entry_safe(msg, msg2, q, link) {
480 list_del(&msg->link);
481 ipmi_free_recv_msg(msg);
482 }
483 }
484
free_smi_msg_list(struct list_head * q)485 static void free_smi_msg_list(struct list_head *q)
486 {
487 struct ipmi_smi_msg *msg, *msg2;
488
489 list_for_each_entry_safe(msg, msg2, q, link) {
490 list_del(&msg->link);
491 ipmi_free_smi_msg(msg);
492 }
493 }
494
clean_up_interface_data(ipmi_smi_t intf)495 static void clean_up_interface_data(ipmi_smi_t intf)
496 {
497 int i;
498 struct cmd_rcvr *rcvr, *rcvr2;
499 struct list_head list;
500
501 tasklet_kill(&intf->recv_tasklet);
502
503 free_smi_msg_list(&intf->waiting_msgs);
504 free_recv_msg_list(&intf->waiting_events);
505
506 /*
507 * Wholesale remove all the entries from the list in the
508 * interface and wait for RCU to know that none are in use.
509 */
510 mutex_lock(&intf->cmd_rcvrs_mutex);
511 INIT_LIST_HEAD(&list);
512 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
513 mutex_unlock(&intf->cmd_rcvrs_mutex);
514
515 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
516 kfree(rcvr);
517
518 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
519 if ((intf->seq_table[i].inuse)
520 && (intf->seq_table[i].recv_msg))
521 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
522 }
523 }
524
intf_free(struct kref * ref)525 static void intf_free(struct kref *ref)
526 {
527 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
528
529 clean_up_interface_data(intf);
530 kfree(intf);
531 }
532
533 struct watcher_entry {
534 int intf_num;
535 ipmi_smi_t intf;
536 struct list_head link;
537 };
538
ipmi_smi_watcher_register(struct ipmi_smi_watcher * watcher)539 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
540 {
541 ipmi_smi_t intf;
542 LIST_HEAD(to_deliver);
543 struct watcher_entry *e, *e2;
544
545 mutex_lock(&smi_watchers_mutex);
546
547 mutex_lock(&ipmi_interfaces_mutex);
548
549 /* Build a list of things to deliver. */
550 list_for_each_entry(intf, &ipmi_interfaces, link) {
551 if (intf->intf_num == -1)
552 continue;
553 e = kmalloc(sizeof(*e), GFP_KERNEL);
554 if (!e)
555 goto out_err;
556 kref_get(&intf->refcount);
557 e->intf = intf;
558 e->intf_num = intf->intf_num;
559 list_add_tail(&e->link, &to_deliver);
560 }
561
562 /* We will succeed, so add it to the list. */
563 list_add(&watcher->link, &smi_watchers);
564
565 mutex_unlock(&ipmi_interfaces_mutex);
566
567 list_for_each_entry_safe(e, e2, &to_deliver, link) {
568 list_del(&e->link);
569 watcher->new_smi(e->intf_num, e->intf->si_dev);
570 kref_put(&e->intf->refcount, intf_free);
571 kfree(e);
572 }
573
574 mutex_unlock(&smi_watchers_mutex);
575
576 return 0;
577
578 out_err:
579 mutex_unlock(&ipmi_interfaces_mutex);
580 mutex_unlock(&smi_watchers_mutex);
581 list_for_each_entry_safe(e, e2, &to_deliver, link) {
582 list_del(&e->link);
583 kref_put(&e->intf->refcount, intf_free);
584 kfree(e);
585 }
586 return -ENOMEM;
587 }
588 EXPORT_SYMBOL(ipmi_smi_watcher_register);
589
ipmi_smi_watcher_unregister(struct ipmi_smi_watcher * watcher)590 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
591 {
592 mutex_lock(&smi_watchers_mutex);
593 list_del(&(watcher->link));
594 mutex_unlock(&smi_watchers_mutex);
595 return 0;
596 }
597 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
598
599 /*
600 * Must be called with smi_watchers_mutex held.
601 */
602 static void
call_smi_watchers(int i,struct device * dev)603 call_smi_watchers(int i, struct device *dev)
604 {
605 struct ipmi_smi_watcher *w;
606
607 list_for_each_entry(w, &smi_watchers, link) {
608 if (try_module_get(w->owner)) {
609 w->new_smi(i, dev);
610 module_put(w->owner);
611 }
612 }
613 }
614
615 static int
ipmi_addr_equal(struct ipmi_addr * addr1,struct ipmi_addr * addr2)616 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
617 {
618 if (addr1->addr_type != addr2->addr_type)
619 return 0;
620
621 if (addr1->channel != addr2->channel)
622 return 0;
623
624 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
625 struct ipmi_system_interface_addr *smi_addr1
626 = (struct ipmi_system_interface_addr *) addr1;
627 struct ipmi_system_interface_addr *smi_addr2
628 = (struct ipmi_system_interface_addr *) addr2;
629 return (smi_addr1->lun == smi_addr2->lun);
630 }
631
632 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
633 struct ipmi_ipmb_addr *ipmb_addr1
634 = (struct ipmi_ipmb_addr *) addr1;
635 struct ipmi_ipmb_addr *ipmb_addr2
636 = (struct ipmi_ipmb_addr *) addr2;
637
638 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
639 && (ipmb_addr1->lun == ipmb_addr2->lun));
640 }
641
642 if (is_lan_addr(addr1)) {
643 struct ipmi_lan_addr *lan_addr1
644 = (struct ipmi_lan_addr *) addr1;
645 struct ipmi_lan_addr *lan_addr2
646 = (struct ipmi_lan_addr *) addr2;
647
648 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
649 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
650 && (lan_addr1->session_handle
651 == lan_addr2->session_handle)
652 && (lan_addr1->lun == lan_addr2->lun));
653 }
654
655 return 1;
656 }
657
ipmi_validate_addr(struct ipmi_addr * addr,int len)658 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
659 {
660 if (len < sizeof(struct ipmi_system_interface_addr))
661 return -EINVAL;
662
663 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
664 if (addr->channel != IPMI_BMC_CHANNEL)
665 return -EINVAL;
666 return 0;
667 }
668
669 if ((addr->channel == IPMI_BMC_CHANNEL)
670 || (addr->channel >= IPMI_MAX_CHANNELS)
671 || (addr->channel < 0))
672 return -EINVAL;
673
674 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
675 if (len < sizeof(struct ipmi_ipmb_addr))
676 return -EINVAL;
677 return 0;
678 }
679
680 if (is_lan_addr(addr)) {
681 if (len < sizeof(struct ipmi_lan_addr))
682 return -EINVAL;
683 return 0;
684 }
685
686 return -EINVAL;
687 }
688 EXPORT_SYMBOL(ipmi_validate_addr);
689
ipmi_addr_length(int addr_type)690 unsigned int ipmi_addr_length(int addr_type)
691 {
692 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
693 return sizeof(struct ipmi_system_interface_addr);
694
695 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
696 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
697 return sizeof(struct ipmi_ipmb_addr);
698
699 if (addr_type == IPMI_LAN_ADDR_TYPE)
700 return sizeof(struct ipmi_lan_addr);
701
702 return 0;
703 }
704 EXPORT_SYMBOL(ipmi_addr_length);
705
deliver_response(struct ipmi_recv_msg * msg)706 static void deliver_response(struct ipmi_recv_msg *msg)
707 {
708 if (!msg->user) {
709 ipmi_smi_t intf = msg->user_msg_data;
710
711 /* Special handling for NULL users. */
712 if (intf->null_user_handler) {
713 intf->null_user_handler(intf, msg);
714 ipmi_inc_stat(intf, handled_local_responses);
715 } else {
716 /* No handler, so give up. */
717 ipmi_inc_stat(intf, unhandled_local_responses);
718 }
719 ipmi_free_recv_msg(msg);
720 } else {
721 ipmi_user_t user = msg->user;
722 user->handler->ipmi_recv_hndl(msg, user->handler_data);
723 }
724 }
725
726 static void
deliver_err_response(struct ipmi_recv_msg * msg,int err)727 deliver_err_response(struct ipmi_recv_msg *msg, int err)
728 {
729 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
730 msg->msg_data[0] = err;
731 msg->msg.netfn |= 1; /* Convert to a response. */
732 msg->msg.data_len = 1;
733 msg->msg.data = msg->msg_data;
734 deliver_response(msg);
735 }
736
737 /*
738 * Find the next sequence number not being used and add the given
739 * message with the given timeout to the sequence table. This must be
740 * called with the interface's seq_lock held.
741 */
intf_next_seq(ipmi_smi_t intf,struct ipmi_recv_msg * recv_msg,unsigned long timeout,int retries,int broadcast,unsigned char * seq,long * seqid)742 static int intf_next_seq(ipmi_smi_t intf,
743 struct ipmi_recv_msg *recv_msg,
744 unsigned long timeout,
745 int retries,
746 int broadcast,
747 unsigned char *seq,
748 long *seqid)
749 {
750 int rv = 0;
751 unsigned int i;
752
753 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
754 i = (i+1)%IPMI_IPMB_NUM_SEQ) {
755 if (!intf->seq_table[i].inuse)
756 break;
757 }
758
759 if (!intf->seq_table[i].inuse) {
760 intf->seq_table[i].recv_msg = recv_msg;
761
762 /*
763 * Start with the maximum timeout, when the send response
764 * comes in we will start the real timer.
765 */
766 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
767 intf->seq_table[i].orig_timeout = timeout;
768 intf->seq_table[i].retries_left = retries;
769 intf->seq_table[i].broadcast = broadcast;
770 intf->seq_table[i].inuse = 1;
771 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
772 *seq = i;
773 *seqid = intf->seq_table[i].seqid;
774 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
775 } else {
776 rv = -EAGAIN;
777 }
778
779 return rv;
780 }
781
782 /*
783 * Return the receive message for the given sequence number and
784 * release the sequence number so it can be reused. Some other data
785 * is passed in to be sure the message matches up correctly (to help
786 * guard against message coming in after their timeout and the
787 * sequence number being reused).
788 */
intf_find_seq(ipmi_smi_t intf,unsigned char seq,short channel,unsigned char cmd,unsigned char netfn,struct ipmi_addr * addr,struct ipmi_recv_msg ** recv_msg)789 static int intf_find_seq(ipmi_smi_t intf,
790 unsigned char seq,
791 short channel,
792 unsigned char cmd,
793 unsigned char netfn,
794 struct ipmi_addr *addr,
795 struct ipmi_recv_msg **recv_msg)
796 {
797 int rv = -ENODEV;
798 unsigned long flags;
799
800 if (seq >= IPMI_IPMB_NUM_SEQ)
801 return -EINVAL;
802
803 spin_lock_irqsave(&(intf->seq_lock), flags);
804 if (intf->seq_table[seq].inuse) {
805 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
806
807 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
808 && (msg->msg.netfn == netfn)
809 && (ipmi_addr_equal(addr, &(msg->addr)))) {
810 *recv_msg = msg;
811 intf->seq_table[seq].inuse = 0;
812 rv = 0;
813 }
814 }
815 spin_unlock_irqrestore(&(intf->seq_lock), flags);
816
817 return rv;
818 }
819
820
821 /* Start the timer for a specific sequence table entry. */
intf_start_seq_timer(ipmi_smi_t intf,long msgid)822 static int intf_start_seq_timer(ipmi_smi_t intf,
823 long msgid)
824 {
825 int rv = -ENODEV;
826 unsigned long flags;
827 unsigned char seq;
828 unsigned long seqid;
829
830
831 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
832
833 spin_lock_irqsave(&(intf->seq_lock), flags);
834 /*
835 * We do this verification because the user can be deleted
836 * while a message is outstanding.
837 */
838 if ((intf->seq_table[seq].inuse)
839 && (intf->seq_table[seq].seqid == seqid)) {
840 struct seq_table *ent = &(intf->seq_table[seq]);
841 ent->timeout = ent->orig_timeout;
842 rv = 0;
843 }
844 spin_unlock_irqrestore(&(intf->seq_lock), flags);
845
846 return rv;
847 }
848
849 /* Got an error for the send message for a specific sequence number. */
intf_err_seq(ipmi_smi_t intf,long msgid,unsigned int err)850 static int intf_err_seq(ipmi_smi_t intf,
851 long msgid,
852 unsigned int err)
853 {
854 int rv = -ENODEV;
855 unsigned long flags;
856 unsigned char seq;
857 unsigned long seqid;
858 struct ipmi_recv_msg *msg = NULL;
859
860
861 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
862
863 spin_lock_irqsave(&(intf->seq_lock), flags);
864 /*
865 * We do this verification because the user can be deleted
866 * while a message is outstanding.
867 */
868 if ((intf->seq_table[seq].inuse)
869 && (intf->seq_table[seq].seqid == seqid)) {
870 struct seq_table *ent = &(intf->seq_table[seq]);
871
872 ent->inuse = 0;
873 msg = ent->recv_msg;
874 rv = 0;
875 }
876 spin_unlock_irqrestore(&(intf->seq_lock), flags);
877
878 if (msg)
879 deliver_err_response(msg, err);
880
881 return rv;
882 }
883
884
ipmi_create_user(unsigned int if_num,struct ipmi_user_hndl * handler,void * handler_data,ipmi_user_t * user)885 int ipmi_create_user(unsigned int if_num,
886 struct ipmi_user_hndl *handler,
887 void *handler_data,
888 ipmi_user_t *user)
889 {
890 unsigned long flags;
891 ipmi_user_t new_user;
892 int rv = 0;
893 ipmi_smi_t intf;
894
895 /*
896 * There is no module usecount here, because it's not
897 * required. Since this can only be used by and called from
898 * other modules, they will implicitly use this module, and
899 * thus this can't be removed unless the other modules are
900 * removed.
901 */
902
903 if (handler == NULL)
904 return -EINVAL;
905
906 /*
907 * Make sure the driver is actually initialized, this handles
908 * problems with initialization order.
909 */
910 if (!initialized) {
911 rv = ipmi_init_msghandler();
912 if (rv)
913 return rv;
914
915 /*
916 * The init code doesn't return an error if it was turned
917 * off, but it won't initialize. Check that.
918 */
919 if (!initialized)
920 return -ENODEV;
921 }
922
923 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
924 if (!new_user)
925 return -ENOMEM;
926
927 mutex_lock(&ipmi_interfaces_mutex);
928 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
929 if (intf->intf_num == if_num)
930 goto found;
931 }
932 /* Not found, return an error */
933 rv = -EINVAL;
934 goto out_kfree;
935
936 found:
937 /* Note that each existing user holds a refcount to the interface. */
938 kref_get(&intf->refcount);
939
940 kref_init(&new_user->refcount);
941 new_user->handler = handler;
942 new_user->handler_data = handler_data;
943 new_user->intf = intf;
944 new_user->gets_events = 0;
945
946 if (!try_module_get(intf->handlers->owner)) {
947 rv = -ENODEV;
948 goto out_kref;
949 }
950
951 if (intf->handlers->inc_usecount) {
952 rv = intf->handlers->inc_usecount(intf->send_info);
953 if (rv) {
954 module_put(intf->handlers->owner);
955 goto out_kref;
956 }
957 }
958
959 /*
960 * Hold the lock so intf->handlers is guaranteed to be good
961 * until now
962 */
963 mutex_unlock(&ipmi_interfaces_mutex);
964
965 new_user->valid = 1;
966 spin_lock_irqsave(&intf->seq_lock, flags);
967 list_add_rcu(&new_user->link, &intf->users);
968 spin_unlock_irqrestore(&intf->seq_lock, flags);
969 *user = new_user;
970 return 0;
971
972 out_kref:
973 kref_put(&intf->refcount, intf_free);
974 out_kfree:
975 mutex_unlock(&ipmi_interfaces_mutex);
976 kfree(new_user);
977 return rv;
978 }
979 EXPORT_SYMBOL(ipmi_create_user);
980
ipmi_get_smi_info(int if_num,struct ipmi_smi_info * data)981 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
982 {
983 int rv = 0;
984 ipmi_smi_t intf;
985 struct ipmi_smi_handlers *handlers;
986
987 mutex_lock(&ipmi_interfaces_mutex);
988 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
989 if (intf->intf_num == if_num)
990 goto found;
991 }
992 /* Not found, return an error */
993 rv = -EINVAL;
994 mutex_unlock(&ipmi_interfaces_mutex);
995 return rv;
996
997 found:
998 handlers = intf->handlers;
999 rv = -ENOSYS;
1000 if (handlers->get_smi_info)
1001 rv = handlers->get_smi_info(intf->send_info, data);
1002 mutex_unlock(&ipmi_interfaces_mutex);
1003
1004 return rv;
1005 }
1006 EXPORT_SYMBOL(ipmi_get_smi_info);
1007
free_user(struct kref * ref)1008 static void free_user(struct kref *ref)
1009 {
1010 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
1011 kfree(user);
1012 }
1013
ipmi_destroy_user(ipmi_user_t user)1014 int ipmi_destroy_user(ipmi_user_t user)
1015 {
1016 ipmi_smi_t intf = user->intf;
1017 int i;
1018 unsigned long flags;
1019 struct cmd_rcvr *rcvr;
1020 struct cmd_rcvr *rcvrs = NULL;
1021
1022 user->valid = 0;
1023
1024 /* Remove the user from the interface's sequence table. */
1025 spin_lock_irqsave(&intf->seq_lock, flags);
1026 list_del_rcu(&user->link);
1027
1028 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1029 if (intf->seq_table[i].inuse
1030 && (intf->seq_table[i].recv_msg->user == user)) {
1031 intf->seq_table[i].inuse = 0;
1032 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1033 }
1034 }
1035 spin_unlock_irqrestore(&intf->seq_lock, flags);
1036
1037 /*
1038 * Remove the user from the command receiver's table. First
1039 * we build a list of everything (not using the standard link,
1040 * since other things may be using it till we do
1041 * synchronize_rcu()) then free everything in that list.
1042 */
1043 mutex_lock(&intf->cmd_rcvrs_mutex);
1044 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1045 if (rcvr->user == user) {
1046 list_del_rcu(&rcvr->link);
1047 rcvr->next = rcvrs;
1048 rcvrs = rcvr;
1049 }
1050 }
1051 mutex_unlock(&intf->cmd_rcvrs_mutex);
1052 synchronize_rcu();
1053 while (rcvrs) {
1054 rcvr = rcvrs;
1055 rcvrs = rcvr->next;
1056 kfree(rcvr);
1057 }
1058
1059 mutex_lock(&ipmi_interfaces_mutex);
1060 if (intf->handlers) {
1061 module_put(intf->handlers->owner);
1062 if (intf->handlers->dec_usecount)
1063 intf->handlers->dec_usecount(intf->send_info);
1064 }
1065 mutex_unlock(&ipmi_interfaces_mutex);
1066
1067 kref_put(&intf->refcount, intf_free);
1068
1069 kref_put(&user->refcount, free_user);
1070
1071 return 0;
1072 }
1073 EXPORT_SYMBOL(ipmi_destroy_user);
1074
ipmi_get_version(ipmi_user_t user,unsigned char * major,unsigned char * minor)1075 void ipmi_get_version(ipmi_user_t user,
1076 unsigned char *major,
1077 unsigned char *minor)
1078 {
1079 *major = user->intf->ipmi_version_major;
1080 *minor = user->intf->ipmi_version_minor;
1081 }
1082 EXPORT_SYMBOL(ipmi_get_version);
1083
ipmi_set_my_address(ipmi_user_t user,unsigned int channel,unsigned char address)1084 int ipmi_set_my_address(ipmi_user_t user,
1085 unsigned int channel,
1086 unsigned char address)
1087 {
1088 if (channel >= IPMI_MAX_CHANNELS)
1089 return -EINVAL;
1090 user->intf->channels[channel].address = address;
1091 return 0;
1092 }
1093 EXPORT_SYMBOL(ipmi_set_my_address);
1094
ipmi_get_my_address(ipmi_user_t user,unsigned int channel,unsigned char * address)1095 int ipmi_get_my_address(ipmi_user_t user,
1096 unsigned int channel,
1097 unsigned char *address)
1098 {
1099 if (channel >= IPMI_MAX_CHANNELS)
1100 return -EINVAL;
1101 *address = user->intf->channels[channel].address;
1102 return 0;
1103 }
1104 EXPORT_SYMBOL(ipmi_get_my_address);
1105
ipmi_set_my_LUN(ipmi_user_t user,unsigned int channel,unsigned char LUN)1106 int ipmi_set_my_LUN(ipmi_user_t user,
1107 unsigned int channel,
1108 unsigned char LUN)
1109 {
1110 if (channel >= IPMI_MAX_CHANNELS)
1111 return -EINVAL;
1112 user->intf->channels[channel].lun = LUN & 0x3;
1113 return 0;
1114 }
1115 EXPORT_SYMBOL(ipmi_set_my_LUN);
1116
ipmi_get_my_LUN(ipmi_user_t user,unsigned int channel,unsigned char * address)1117 int ipmi_get_my_LUN(ipmi_user_t user,
1118 unsigned int channel,
1119 unsigned char *address)
1120 {
1121 if (channel >= IPMI_MAX_CHANNELS)
1122 return -EINVAL;
1123 *address = user->intf->channels[channel].lun;
1124 return 0;
1125 }
1126 EXPORT_SYMBOL(ipmi_get_my_LUN);
1127
ipmi_get_maintenance_mode(ipmi_user_t user)1128 int ipmi_get_maintenance_mode(ipmi_user_t user)
1129 {
1130 int mode;
1131 unsigned long flags;
1132
1133 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1134 mode = user->intf->maintenance_mode;
1135 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1136
1137 return mode;
1138 }
1139 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1140
maintenance_mode_update(ipmi_smi_t intf)1141 static void maintenance_mode_update(ipmi_smi_t intf)
1142 {
1143 if (intf->handlers->set_maintenance_mode)
1144 intf->handlers->set_maintenance_mode(
1145 intf->send_info, intf->maintenance_mode_enable);
1146 }
1147
ipmi_set_maintenance_mode(ipmi_user_t user,int mode)1148 int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1149 {
1150 int rv = 0;
1151 unsigned long flags;
1152 ipmi_smi_t intf = user->intf;
1153
1154 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1155 if (intf->maintenance_mode != mode) {
1156 switch (mode) {
1157 case IPMI_MAINTENANCE_MODE_AUTO:
1158 intf->maintenance_mode = mode;
1159 intf->maintenance_mode_enable
1160 = (intf->auto_maintenance_timeout > 0);
1161 break;
1162
1163 case IPMI_MAINTENANCE_MODE_OFF:
1164 intf->maintenance_mode = mode;
1165 intf->maintenance_mode_enable = 0;
1166 break;
1167
1168 case IPMI_MAINTENANCE_MODE_ON:
1169 intf->maintenance_mode = mode;
1170 intf->maintenance_mode_enable = 1;
1171 break;
1172
1173 default:
1174 rv = -EINVAL;
1175 goto out_unlock;
1176 }
1177
1178 maintenance_mode_update(intf);
1179 }
1180 out_unlock:
1181 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1182
1183 return rv;
1184 }
1185 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1186
ipmi_set_gets_events(ipmi_user_t user,int val)1187 int ipmi_set_gets_events(ipmi_user_t user, int val)
1188 {
1189 unsigned long flags;
1190 ipmi_smi_t intf = user->intf;
1191 struct ipmi_recv_msg *msg, *msg2;
1192 struct list_head msgs;
1193
1194 INIT_LIST_HEAD(&msgs);
1195
1196 spin_lock_irqsave(&intf->events_lock, flags);
1197 user->gets_events = val;
1198
1199 if (intf->delivering_events)
1200 /*
1201 * Another thread is delivering events for this, so
1202 * let it handle any new events.
1203 */
1204 goto out;
1205
1206 /* Deliver any queued events. */
1207 while (user->gets_events && !list_empty(&intf->waiting_events)) {
1208 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1209 list_move_tail(&msg->link, &msgs);
1210 intf->waiting_events_count = 0;
1211 if (intf->event_msg_printed) {
1212 printk(KERN_WARNING PFX "Event queue no longer"
1213 " full\n");
1214 intf->event_msg_printed = 0;
1215 }
1216
1217 intf->delivering_events = 1;
1218 spin_unlock_irqrestore(&intf->events_lock, flags);
1219
1220 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1221 msg->user = user;
1222 kref_get(&user->refcount);
1223 deliver_response(msg);
1224 }
1225
1226 spin_lock_irqsave(&intf->events_lock, flags);
1227 intf->delivering_events = 0;
1228 }
1229
1230 out:
1231 spin_unlock_irqrestore(&intf->events_lock, flags);
1232
1233 return 0;
1234 }
1235 EXPORT_SYMBOL(ipmi_set_gets_events);
1236
find_cmd_rcvr(ipmi_smi_t intf,unsigned char netfn,unsigned char cmd,unsigned char chan)1237 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
1238 unsigned char netfn,
1239 unsigned char cmd,
1240 unsigned char chan)
1241 {
1242 struct cmd_rcvr *rcvr;
1243
1244 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1245 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1246 && (rcvr->chans & (1 << chan)))
1247 return rcvr;
1248 }
1249 return NULL;
1250 }
1251
is_cmd_rcvr_exclusive(ipmi_smi_t intf,unsigned char netfn,unsigned char cmd,unsigned int chans)1252 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf,
1253 unsigned char netfn,
1254 unsigned char cmd,
1255 unsigned int chans)
1256 {
1257 struct cmd_rcvr *rcvr;
1258
1259 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1260 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1261 && (rcvr->chans & chans))
1262 return 0;
1263 }
1264 return 1;
1265 }
1266
ipmi_register_for_cmd(ipmi_user_t user,unsigned char netfn,unsigned char cmd,unsigned int chans)1267 int ipmi_register_for_cmd(ipmi_user_t user,
1268 unsigned char netfn,
1269 unsigned char cmd,
1270 unsigned int chans)
1271 {
1272 ipmi_smi_t intf = user->intf;
1273 struct cmd_rcvr *rcvr;
1274 int rv = 0;
1275
1276
1277 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1278 if (!rcvr)
1279 return -ENOMEM;
1280 rcvr->cmd = cmd;
1281 rcvr->netfn = netfn;
1282 rcvr->chans = chans;
1283 rcvr->user = user;
1284
1285 mutex_lock(&intf->cmd_rcvrs_mutex);
1286 /* Make sure the command/netfn is not already registered. */
1287 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1288 rv = -EBUSY;
1289 goto out_unlock;
1290 }
1291
1292 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1293
1294 out_unlock:
1295 mutex_unlock(&intf->cmd_rcvrs_mutex);
1296 if (rv)
1297 kfree(rcvr);
1298
1299 return rv;
1300 }
1301 EXPORT_SYMBOL(ipmi_register_for_cmd);
1302
ipmi_unregister_for_cmd(ipmi_user_t user,unsigned char netfn,unsigned char cmd,unsigned int chans)1303 int ipmi_unregister_for_cmd(ipmi_user_t user,
1304 unsigned char netfn,
1305 unsigned char cmd,
1306 unsigned int chans)
1307 {
1308 ipmi_smi_t intf = user->intf;
1309 struct cmd_rcvr *rcvr;
1310 struct cmd_rcvr *rcvrs = NULL;
1311 int i, rv = -ENOENT;
1312
1313 mutex_lock(&intf->cmd_rcvrs_mutex);
1314 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1315 if (((1 << i) & chans) == 0)
1316 continue;
1317 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1318 if (rcvr == NULL)
1319 continue;
1320 if (rcvr->user == user) {
1321 rv = 0;
1322 rcvr->chans &= ~chans;
1323 if (rcvr->chans == 0) {
1324 list_del_rcu(&rcvr->link);
1325 rcvr->next = rcvrs;
1326 rcvrs = rcvr;
1327 }
1328 }
1329 }
1330 mutex_unlock(&intf->cmd_rcvrs_mutex);
1331 synchronize_rcu();
1332 while (rcvrs) {
1333 rcvr = rcvrs;
1334 rcvrs = rcvr->next;
1335 kfree(rcvr);
1336 }
1337 return rv;
1338 }
1339 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1340
1341 static unsigned char
ipmb_checksum(unsigned char * data,int size)1342 ipmb_checksum(unsigned char *data, int size)
1343 {
1344 unsigned char csum = 0;
1345
1346 for (; size > 0; size--, data++)
1347 csum += *data;
1348
1349 return -csum;
1350 }
1351
format_ipmb_msg(struct ipmi_smi_msg * smi_msg,struct kernel_ipmi_msg * msg,struct ipmi_ipmb_addr * ipmb_addr,long msgid,unsigned char ipmb_seq,int broadcast,unsigned char source_address,unsigned char source_lun)1352 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1353 struct kernel_ipmi_msg *msg,
1354 struct ipmi_ipmb_addr *ipmb_addr,
1355 long msgid,
1356 unsigned char ipmb_seq,
1357 int broadcast,
1358 unsigned char source_address,
1359 unsigned char source_lun)
1360 {
1361 int i = broadcast;
1362
1363 /* Format the IPMB header data. */
1364 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1365 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1366 smi_msg->data[2] = ipmb_addr->channel;
1367 if (broadcast)
1368 smi_msg->data[3] = 0;
1369 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1370 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1371 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1372 smi_msg->data[i+6] = source_address;
1373 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1374 smi_msg->data[i+8] = msg->cmd;
1375
1376 /* Now tack on the data to the message. */
1377 if (msg->data_len > 0)
1378 memcpy(&(smi_msg->data[i+9]), msg->data,
1379 msg->data_len);
1380 smi_msg->data_size = msg->data_len + 9;
1381
1382 /* Now calculate the checksum and tack it on. */
1383 smi_msg->data[i+smi_msg->data_size]
1384 = ipmb_checksum(&(smi_msg->data[i+6]),
1385 smi_msg->data_size-6);
1386
1387 /*
1388 * Add on the checksum size and the offset from the
1389 * broadcast.
1390 */
1391 smi_msg->data_size += 1 + i;
1392
1393 smi_msg->msgid = msgid;
1394 }
1395
format_lan_msg(struct ipmi_smi_msg * smi_msg,struct kernel_ipmi_msg * msg,struct ipmi_lan_addr * lan_addr,long msgid,unsigned char ipmb_seq,unsigned char source_lun)1396 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1397 struct kernel_ipmi_msg *msg,
1398 struct ipmi_lan_addr *lan_addr,
1399 long msgid,
1400 unsigned char ipmb_seq,
1401 unsigned char source_lun)
1402 {
1403 /* Format the IPMB header data. */
1404 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1405 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1406 smi_msg->data[2] = lan_addr->channel;
1407 smi_msg->data[3] = lan_addr->session_handle;
1408 smi_msg->data[4] = lan_addr->remote_SWID;
1409 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1410 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1411 smi_msg->data[7] = lan_addr->local_SWID;
1412 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1413 smi_msg->data[9] = msg->cmd;
1414
1415 /* Now tack on the data to the message. */
1416 if (msg->data_len > 0)
1417 memcpy(&(smi_msg->data[10]), msg->data,
1418 msg->data_len);
1419 smi_msg->data_size = msg->data_len + 10;
1420
1421 /* Now calculate the checksum and tack it on. */
1422 smi_msg->data[smi_msg->data_size]
1423 = ipmb_checksum(&(smi_msg->data[7]),
1424 smi_msg->data_size-7);
1425
1426 /*
1427 * Add on the checksum size and the offset from the
1428 * broadcast.
1429 */
1430 smi_msg->data_size += 1;
1431
1432 smi_msg->msgid = msgid;
1433 }
1434
1435 /*
1436 * Separate from ipmi_request so that the user does not have to be
1437 * supplied in certain circumstances (mainly at panic time). If
1438 * messages are supplied, they will be freed, even if an error
1439 * occurs.
1440 */
i_ipmi_request(ipmi_user_t user,ipmi_smi_t intf,struct ipmi_addr * addr,long msgid,struct kernel_ipmi_msg * msg,void * user_msg_data,void * supplied_smi,struct ipmi_recv_msg * supplied_recv,int priority,unsigned char source_address,unsigned char source_lun,int retries,unsigned int retry_time_ms)1441 static int i_ipmi_request(ipmi_user_t user,
1442 ipmi_smi_t intf,
1443 struct ipmi_addr *addr,
1444 long msgid,
1445 struct kernel_ipmi_msg *msg,
1446 void *user_msg_data,
1447 void *supplied_smi,
1448 struct ipmi_recv_msg *supplied_recv,
1449 int priority,
1450 unsigned char source_address,
1451 unsigned char source_lun,
1452 int retries,
1453 unsigned int retry_time_ms)
1454 {
1455 int rv = 0;
1456 struct ipmi_smi_msg *smi_msg;
1457 struct ipmi_recv_msg *recv_msg;
1458 unsigned long flags;
1459 struct ipmi_smi_handlers *handlers;
1460
1461
1462 if (supplied_recv)
1463 recv_msg = supplied_recv;
1464 else {
1465 recv_msg = ipmi_alloc_recv_msg();
1466 if (recv_msg == NULL)
1467 return -ENOMEM;
1468 }
1469 recv_msg->user_msg_data = user_msg_data;
1470
1471 if (supplied_smi)
1472 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1473 else {
1474 smi_msg = ipmi_alloc_smi_msg();
1475 if (smi_msg == NULL) {
1476 ipmi_free_recv_msg(recv_msg);
1477 return -ENOMEM;
1478 }
1479 }
1480
1481 rcu_read_lock();
1482 handlers = intf->handlers;
1483 if (!handlers) {
1484 rv = -ENODEV;
1485 goto out_err;
1486 }
1487
1488 recv_msg->user = user;
1489 if (user)
1490 kref_get(&user->refcount);
1491 recv_msg->msgid = msgid;
1492 /*
1493 * Store the message to send in the receive message so timeout
1494 * responses can get the proper response data.
1495 */
1496 recv_msg->msg = *msg;
1497
1498 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1499 struct ipmi_system_interface_addr *smi_addr;
1500
1501 if (msg->netfn & 1) {
1502 /* Responses are not allowed to the SMI. */
1503 rv = -EINVAL;
1504 goto out_err;
1505 }
1506
1507 smi_addr = (struct ipmi_system_interface_addr *) addr;
1508 if (smi_addr->lun > 3) {
1509 ipmi_inc_stat(intf, sent_invalid_commands);
1510 rv = -EINVAL;
1511 goto out_err;
1512 }
1513
1514 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1515
1516 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1517 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1518 || (msg->cmd == IPMI_GET_MSG_CMD)
1519 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1520 /*
1521 * We don't let the user do these, since we manage
1522 * the sequence numbers.
1523 */
1524 ipmi_inc_stat(intf, sent_invalid_commands);
1525 rv = -EINVAL;
1526 goto out_err;
1527 }
1528
1529 if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1530 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1531 || (msg->cmd == IPMI_WARM_RESET_CMD)))
1532 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) {
1533 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1534 intf->auto_maintenance_timeout
1535 = IPMI_MAINTENANCE_MODE_TIMEOUT;
1536 if (!intf->maintenance_mode
1537 && !intf->maintenance_mode_enable) {
1538 intf->maintenance_mode_enable = 1;
1539 maintenance_mode_update(intf);
1540 }
1541 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1542 flags);
1543 }
1544
1545 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1546 ipmi_inc_stat(intf, sent_invalid_commands);
1547 rv = -EMSGSIZE;
1548 goto out_err;
1549 }
1550
1551 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1552 smi_msg->data[1] = msg->cmd;
1553 smi_msg->msgid = msgid;
1554 smi_msg->user_data = recv_msg;
1555 if (msg->data_len > 0)
1556 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1557 smi_msg->data_size = msg->data_len + 2;
1558 ipmi_inc_stat(intf, sent_local_commands);
1559 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
1560 struct ipmi_ipmb_addr *ipmb_addr;
1561 unsigned char ipmb_seq;
1562 long seqid;
1563 int broadcast = 0;
1564
1565 if (addr->channel >= IPMI_MAX_CHANNELS) {
1566 ipmi_inc_stat(intf, sent_invalid_commands);
1567 rv = -EINVAL;
1568 goto out_err;
1569 }
1570
1571 if (intf->channels[addr->channel].medium
1572 != IPMI_CHANNEL_MEDIUM_IPMB) {
1573 ipmi_inc_stat(intf, sent_invalid_commands);
1574 rv = -EINVAL;
1575 goto out_err;
1576 }
1577
1578 if (retries < 0) {
1579 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1580 retries = 0; /* Don't retry broadcasts. */
1581 else
1582 retries = 4;
1583 }
1584 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1585 /*
1586 * Broadcasts add a zero at the beginning of the
1587 * message, but otherwise is the same as an IPMB
1588 * address.
1589 */
1590 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1591 broadcast = 1;
1592 }
1593
1594
1595 /* Default to 1 second retries. */
1596 if (retry_time_ms == 0)
1597 retry_time_ms = 1000;
1598
1599 /*
1600 * 9 for the header and 1 for the checksum, plus
1601 * possibly one for the broadcast.
1602 */
1603 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1604 ipmi_inc_stat(intf, sent_invalid_commands);
1605 rv = -EMSGSIZE;
1606 goto out_err;
1607 }
1608
1609 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1610 if (ipmb_addr->lun > 3) {
1611 ipmi_inc_stat(intf, sent_invalid_commands);
1612 rv = -EINVAL;
1613 goto out_err;
1614 }
1615
1616 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1617
1618 if (recv_msg->msg.netfn & 0x1) {
1619 /*
1620 * It's a response, so use the user's sequence
1621 * from msgid.
1622 */
1623 ipmi_inc_stat(intf, sent_ipmb_responses);
1624 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1625 msgid, broadcast,
1626 source_address, source_lun);
1627
1628 /*
1629 * Save the receive message so we can use it
1630 * to deliver the response.
1631 */
1632 smi_msg->user_data = recv_msg;
1633 } else {
1634 /* It's a command, so get a sequence for it. */
1635
1636 spin_lock_irqsave(&(intf->seq_lock), flags);
1637
1638 /*
1639 * Create a sequence number with a 1 second
1640 * timeout and 4 retries.
1641 */
1642 rv = intf_next_seq(intf,
1643 recv_msg,
1644 retry_time_ms,
1645 retries,
1646 broadcast,
1647 &ipmb_seq,
1648 &seqid);
1649 if (rv) {
1650 /*
1651 * We have used up all the sequence numbers,
1652 * probably, so abort.
1653 */
1654 spin_unlock_irqrestore(&(intf->seq_lock),
1655 flags);
1656 goto out_err;
1657 }
1658
1659 ipmi_inc_stat(intf, sent_ipmb_commands);
1660
1661 /*
1662 * Store the sequence number in the message,
1663 * so that when the send message response
1664 * comes back we can start the timer.
1665 */
1666 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1667 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1668 ipmb_seq, broadcast,
1669 source_address, source_lun);
1670
1671 /*
1672 * Copy the message into the recv message data, so we
1673 * can retransmit it later if necessary.
1674 */
1675 memcpy(recv_msg->msg_data, smi_msg->data,
1676 smi_msg->data_size);
1677 recv_msg->msg.data = recv_msg->msg_data;
1678 recv_msg->msg.data_len = smi_msg->data_size;
1679
1680 /*
1681 * We don't unlock until here, because we need
1682 * to copy the completed message into the
1683 * recv_msg before we release the lock.
1684 * Otherwise, race conditions may bite us. I
1685 * know that's pretty paranoid, but I prefer
1686 * to be correct.
1687 */
1688 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1689 }
1690 } else if (is_lan_addr(addr)) {
1691 struct ipmi_lan_addr *lan_addr;
1692 unsigned char ipmb_seq;
1693 long seqid;
1694
1695 if (addr->channel >= IPMI_MAX_CHANNELS) {
1696 ipmi_inc_stat(intf, sent_invalid_commands);
1697 rv = -EINVAL;
1698 goto out_err;
1699 }
1700
1701 if ((intf->channels[addr->channel].medium
1702 != IPMI_CHANNEL_MEDIUM_8023LAN)
1703 && (intf->channels[addr->channel].medium
1704 != IPMI_CHANNEL_MEDIUM_ASYNC)) {
1705 ipmi_inc_stat(intf, sent_invalid_commands);
1706 rv = -EINVAL;
1707 goto out_err;
1708 }
1709
1710 retries = 4;
1711
1712 /* Default to 1 second retries. */
1713 if (retry_time_ms == 0)
1714 retry_time_ms = 1000;
1715
1716 /* 11 for the header and 1 for the checksum. */
1717 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1718 ipmi_inc_stat(intf, sent_invalid_commands);
1719 rv = -EMSGSIZE;
1720 goto out_err;
1721 }
1722
1723 lan_addr = (struct ipmi_lan_addr *) addr;
1724 if (lan_addr->lun > 3) {
1725 ipmi_inc_stat(intf, sent_invalid_commands);
1726 rv = -EINVAL;
1727 goto out_err;
1728 }
1729
1730 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1731
1732 if (recv_msg->msg.netfn & 0x1) {
1733 /*
1734 * It's a response, so use the user's sequence
1735 * from msgid.
1736 */
1737 ipmi_inc_stat(intf, sent_lan_responses);
1738 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1739 msgid, source_lun);
1740
1741 /*
1742 * Save the receive message so we can use it
1743 * to deliver the response.
1744 */
1745 smi_msg->user_data = recv_msg;
1746 } else {
1747 /* It's a command, so get a sequence for it. */
1748
1749 spin_lock_irqsave(&(intf->seq_lock), flags);
1750
1751 /*
1752 * Create a sequence number with a 1 second
1753 * timeout and 4 retries.
1754 */
1755 rv = intf_next_seq(intf,
1756 recv_msg,
1757 retry_time_ms,
1758 retries,
1759 0,
1760 &ipmb_seq,
1761 &seqid);
1762 if (rv) {
1763 /*
1764 * We have used up all the sequence numbers,
1765 * probably, so abort.
1766 */
1767 spin_unlock_irqrestore(&(intf->seq_lock),
1768 flags);
1769 goto out_err;
1770 }
1771
1772 ipmi_inc_stat(intf, sent_lan_commands);
1773
1774 /*
1775 * Store the sequence number in the message,
1776 * so that when the send message response
1777 * comes back we can start the timer.
1778 */
1779 format_lan_msg(smi_msg, msg, lan_addr,
1780 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1781 ipmb_seq, source_lun);
1782
1783 /*
1784 * Copy the message into the recv message data, so we
1785 * can retransmit it later if necessary.
1786 */
1787 memcpy(recv_msg->msg_data, smi_msg->data,
1788 smi_msg->data_size);
1789 recv_msg->msg.data = recv_msg->msg_data;
1790 recv_msg->msg.data_len = smi_msg->data_size;
1791
1792 /*
1793 * We don't unlock until here, because we need
1794 * to copy the completed message into the
1795 * recv_msg before we release the lock.
1796 * Otherwise, race conditions may bite us. I
1797 * know that's pretty paranoid, but I prefer
1798 * to be correct.
1799 */
1800 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1801 }
1802 } else {
1803 /* Unknown address type. */
1804 ipmi_inc_stat(intf, sent_invalid_commands);
1805 rv = -EINVAL;
1806 goto out_err;
1807 }
1808
1809 #ifdef DEBUG_MSGING
1810 {
1811 int m;
1812 for (m = 0; m < smi_msg->data_size; m++)
1813 printk(" %2.2x", smi_msg->data[m]);
1814 printk("\n");
1815 }
1816 #endif
1817
1818 handlers->sender(intf->send_info, smi_msg, priority);
1819 rcu_read_unlock();
1820
1821 return 0;
1822
1823 out_err:
1824 rcu_read_unlock();
1825 ipmi_free_smi_msg(smi_msg);
1826 ipmi_free_recv_msg(recv_msg);
1827 return rv;
1828 }
1829
check_addr(ipmi_smi_t intf,struct ipmi_addr * addr,unsigned char * saddr,unsigned char * lun)1830 static int check_addr(ipmi_smi_t intf,
1831 struct ipmi_addr *addr,
1832 unsigned char *saddr,
1833 unsigned char *lun)
1834 {
1835 if (addr->channel >= IPMI_MAX_CHANNELS)
1836 return -EINVAL;
1837 *lun = intf->channels[addr->channel].lun;
1838 *saddr = intf->channels[addr->channel].address;
1839 return 0;
1840 }
1841
ipmi_request_settime(ipmi_user_t user,struct ipmi_addr * addr,long msgid,struct kernel_ipmi_msg * msg,void * user_msg_data,int priority,int retries,unsigned int retry_time_ms)1842 int ipmi_request_settime(ipmi_user_t user,
1843 struct ipmi_addr *addr,
1844 long msgid,
1845 struct kernel_ipmi_msg *msg,
1846 void *user_msg_data,
1847 int priority,
1848 int retries,
1849 unsigned int retry_time_ms)
1850 {
1851 unsigned char saddr, lun;
1852 int rv;
1853
1854 if (!user)
1855 return -EINVAL;
1856 rv = check_addr(user->intf, addr, &saddr, &lun);
1857 if (rv)
1858 return rv;
1859 return i_ipmi_request(user,
1860 user->intf,
1861 addr,
1862 msgid,
1863 msg,
1864 user_msg_data,
1865 NULL, NULL,
1866 priority,
1867 saddr,
1868 lun,
1869 retries,
1870 retry_time_ms);
1871 }
1872 EXPORT_SYMBOL(ipmi_request_settime);
1873
ipmi_request_supply_msgs(ipmi_user_t user,struct ipmi_addr * addr,long msgid,struct kernel_ipmi_msg * msg,void * user_msg_data,void * supplied_smi,struct ipmi_recv_msg * supplied_recv,int priority)1874 int ipmi_request_supply_msgs(ipmi_user_t user,
1875 struct ipmi_addr *addr,
1876 long msgid,
1877 struct kernel_ipmi_msg *msg,
1878 void *user_msg_data,
1879 void *supplied_smi,
1880 struct ipmi_recv_msg *supplied_recv,
1881 int priority)
1882 {
1883 unsigned char saddr, lun;
1884 int rv;
1885
1886 if (!user)
1887 return -EINVAL;
1888 rv = check_addr(user->intf, addr, &saddr, &lun);
1889 if (rv)
1890 return rv;
1891 return i_ipmi_request(user,
1892 user->intf,
1893 addr,
1894 msgid,
1895 msg,
1896 user_msg_data,
1897 supplied_smi,
1898 supplied_recv,
1899 priority,
1900 saddr,
1901 lun,
1902 -1, 0);
1903 }
1904 EXPORT_SYMBOL(ipmi_request_supply_msgs);
1905
1906 #ifdef CONFIG_PROC_FS
smi_ipmb_proc_show(struct seq_file * m,void * v)1907 static int smi_ipmb_proc_show(struct seq_file *m, void *v)
1908 {
1909 ipmi_smi_t intf = m->private;
1910 int i;
1911
1912 seq_printf(m, "%x", intf->channels[0].address);
1913 for (i = 1; i < IPMI_MAX_CHANNELS; i++)
1914 seq_printf(m, " %x", intf->channels[i].address);
1915 return seq_putc(m, '\n');
1916 }
1917
smi_ipmb_proc_open(struct inode * inode,struct file * file)1918 static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
1919 {
1920 return single_open(file, smi_ipmb_proc_show, PDE(inode)->data);
1921 }
1922
1923 static const struct file_operations smi_ipmb_proc_ops = {
1924 .open = smi_ipmb_proc_open,
1925 .read = seq_read,
1926 .llseek = seq_lseek,
1927 .release = single_release,
1928 };
1929
smi_version_proc_show(struct seq_file * m,void * v)1930 static int smi_version_proc_show(struct seq_file *m, void *v)
1931 {
1932 ipmi_smi_t intf = m->private;
1933
1934 return seq_printf(m, "%u.%u\n",
1935 ipmi_version_major(&intf->bmc->id),
1936 ipmi_version_minor(&intf->bmc->id));
1937 }
1938
smi_version_proc_open(struct inode * inode,struct file * file)1939 static int smi_version_proc_open(struct inode *inode, struct file *file)
1940 {
1941 return single_open(file, smi_version_proc_show, PDE(inode)->data);
1942 }
1943
1944 static const struct file_operations smi_version_proc_ops = {
1945 .open = smi_version_proc_open,
1946 .read = seq_read,
1947 .llseek = seq_lseek,
1948 .release = single_release,
1949 };
1950
smi_stats_proc_show(struct seq_file * m,void * v)1951 static int smi_stats_proc_show(struct seq_file *m, void *v)
1952 {
1953 ipmi_smi_t intf = m->private;
1954
1955 seq_printf(m, "sent_invalid_commands: %u\n",
1956 ipmi_get_stat(intf, sent_invalid_commands));
1957 seq_printf(m, "sent_local_commands: %u\n",
1958 ipmi_get_stat(intf, sent_local_commands));
1959 seq_printf(m, "handled_local_responses: %u\n",
1960 ipmi_get_stat(intf, handled_local_responses));
1961 seq_printf(m, "unhandled_local_responses: %u\n",
1962 ipmi_get_stat(intf, unhandled_local_responses));
1963 seq_printf(m, "sent_ipmb_commands: %u\n",
1964 ipmi_get_stat(intf, sent_ipmb_commands));
1965 seq_printf(m, "sent_ipmb_command_errs: %u\n",
1966 ipmi_get_stat(intf, sent_ipmb_command_errs));
1967 seq_printf(m, "retransmitted_ipmb_commands: %u\n",
1968 ipmi_get_stat(intf, retransmitted_ipmb_commands));
1969 seq_printf(m, "timed_out_ipmb_commands: %u\n",
1970 ipmi_get_stat(intf, timed_out_ipmb_commands));
1971 seq_printf(m, "timed_out_ipmb_broadcasts: %u\n",
1972 ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
1973 seq_printf(m, "sent_ipmb_responses: %u\n",
1974 ipmi_get_stat(intf, sent_ipmb_responses));
1975 seq_printf(m, "handled_ipmb_responses: %u\n",
1976 ipmi_get_stat(intf, handled_ipmb_responses));
1977 seq_printf(m, "invalid_ipmb_responses: %u\n",
1978 ipmi_get_stat(intf, invalid_ipmb_responses));
1979 seq_printf(m, "unhandled_ipmb_responses: %u\n",
1980 ipmi_get_stat(intf, unhandled_ipmb_responses));
1981 seq_printf(m, "sent_lan_commands: %u\n",
1982 ipmi_get_stat(intf, sent_lan_commands));
1983 seq_printf(m, "sent_lan_command_errs: %u\n",
1984 ipmi_get_stat(intf, sent_lan_command_errs));
1985 seq_printf(m, "retransmitted_lan_commands: %u\n",
1986 ipmi_get_stat(intf, retransmitted_lan_commands));
1987 seq_printf(m, "timed_out_lan_commands: %u\n",
1988 ipmi_get_stat(intf, timed_out_lan_commands));
1989 seq_printf(m, "sent_lan_responses: %u\n",
1990 ipmi_get_stat(intf, sent_lan_responses));
1991 seq_printf(m, "handled_lan_responses: %u\n",
1992 ipmi_get_stat(intf, handled_lan_responses));
1993 seq_printf(m, "invalid_lan_responses: %u\n",
1994 ipmi_get_stat(intf, invalid_lan_responses));
1995 seq_printf(m, "unhandled_lan_responses: %u\n",
1996 ipmi_get_stat(intf, unhandled_lan_responses));
1997 seq_printf(m, "handled_commands: %u\n",
1998 ipmi_get_stat(intf, handled_commands));
1999 seq_printf(m, "invalid_commands: %u\n",
2000 ipmi_get_stat(intf, invalid_commands));
2001 seq_printf(m, "unhandled_commands: %u\n",
2002 ipmi_get_stat(intf, unhandled_commands));
2003 seq_printf(m, "invalid_events: %u\n",
2004 ipmi_get_stat(intf, invalid_events));
2005 seq_printf(m, "events: %u\n",
2006 ipmi_get_stat(intf, events));
2007 seq_printf(m, "failed rexmit LAN msgs: %u\n",
2008 ipmi_get_stat(intf, dropped_rexmit_lan_commands));
2009 seq_printf(m, "failed rexmit IPMB msgs: %u\n",
2010 ipmi_get_stat(intf, dropped_rexmit_ipmb_commands));
2011 return 0;
2012 }
2013
smi_stats_proc_open(struct inode * inode,struct file * file)2014 static int smi_stats_proc_open(struct inode *inode, struct file *file)
2015 {
2016 return single_open(file, smi_stats_proc_show, PDE(inode)->data);
2017 }
2018
2019 static const struct file_operations smi_stats_proc_ops = {
2020 .open = smi_stats_proc_open,
2021 .read = seq_read,
2022 .llseek = seq_lseek,
2023 .release = single_release,
2024 };
2025 #endif /* CONFIG_PROC_FS */
2026
ipmi_smi_add_proc_entry(ipmi_smi_t smi,char * name,const struct file_operations * proc_ops,void * data)2027 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
2028 const struct file_operations *proc_ops,
2029 void *data)
2030 {
2031 int rv = 0;
2032 #ifdef CONFIG_PROC_FS
2033 struct proc_dir_entry *file;
2034 struct ipmi_proc_entry *entry;
2035
2036 /* Create a list element. */
2037 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2038 if (!entry)
2039 return -ENOMEM;
2040 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
2041 if (!entry->name) {
2042 kfree(entry);
2043 return -ENOMEM;
2044 }
2045 strcpy(entry->name, name);
2046
2047 file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
2048 if (!file) {
2049 kfree(entry->name);
2050 kfree(entry);
2051 rv = -ENOMEM;
2052 } else {
2053 mutex_lock(&smi->proc_entry_lock);
2054 /* Stick it on the list. */
2055 entry->next = smi->proc_entries;
2056 smi->proc_entries = entry;
2057 mutex_unlock(&smi->proc_entry_lock);
2058 }
2059 #endif /* CONFIG_PROC_FS */
2060
2061 return rv;
2062 }
2063 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
2064
add_proc_entries(ipmi_smi_t smi,int num)2065 static int add_proc_entries(ipmi_smi_t smi, int num)
2066 {
2067 int rv = 0;
2068
2069 #ifdef CONFIG_PROC_FS
2070 sprintf(smi->proc_dir_name, "%d", num);
2071 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
2072 if (!smi->proc_dir)
2073 rv = -ENOMEM;
2074
2075 if (rv == 0)
2076 rv = ipmi_smi_add_proc_entry(smi, "stats",
2077 &smi_stats_proc_ops,
2078 smi);
2079
2080 if (rv == 0)
2081 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
2082 &smi_ipmb_proc_ops,
2083 smi);
2084
2085 if (rv == 0)
2086 rv = ipmi_smi_add_proc_entry(smi, "version",
2087 &smi_version_proc_ops,
2088 smi);
2089 #endif /* CONFIG_PROC_FS */
2090
2091 return rv;
2092 }
2093
remove_proc_entries(ipmi_smi_t smi)2094 static void remove_proc_entries(ipmi_smi_t smi)
2095 {
2096 #ifdef CONFIG_PROC_FS
2097 struct ipmi_proc_entry *entry;
2098
2099 mutex_lock(&smi->proc_entry_lock);
2100 while (smi->proc_entries) {
2101 entry = smi->proc_entries;
2102 smi->proc_entries = entry->next;
2103
2104 remove_proc_entry(entry->name, smi->proc_dir);
2105 kfree(entry->name);
2106 kfree(entry);
2107 }
2108 mutex_unlock(&smi->proc_entry_lock);
2109 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
2110 #endif /* CONFIG_PROC_FS */
2111 }
2112
__find_bmc_guid(struct device * dev,void * data)2113 static int __find_bmc_guid(struct device *dev, void *data)
2114 {
2115 unsigned char *id = data;
2116 struct bmc_device *bmc = dev_get_drvdata(dev);
2117 return memcmp(bmc->guid, id, 16) == 0;
2118 }
2119
ipmi_find_bmc_guid(struct device_driver * drv,unsigned char * guid)2120 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2121 unsigned char *guid)
2122 {
2123 struct device *dev;
2124
2125 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2126 if (dev)
2127 return dev_get_drvdata(dev);
2128 else
2129 return NULL;
2130 }
2131
2132 struct prod_dev_id {
2133 unsigned int product_id;
2134 unsigned char device_id;
2135 };
2136
__find_bmc_prod_dev_id(struct device * dev,void * data)2137 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
2138 {
2139 struct prod_dev_id *id = data;
2140 struct bmc_device *bmc = dev_get_drvdata(dev);
2141
2142 return (bmc->id.product_id == id->product_id
2143 && bmc->id.device_id == id->device_id);
2144 }
2145
ipmi_find_bmc_prod_dev_id(struct device_driver * drv,unsigned int product_id,unsigned char device_id)2146 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2147 struct device_driver *drv,
2148 unsigned int product_id, unsigned char device_id)
2149 {
2150 struct prod_dev_id id = {
2151 .product_id = product_id,
2152 .device_id = device_id,
2153 };
2154 struct device *dev;
2155
2156 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2157 if (dev)
2158 return dev_get_drvdata(dev);
2159 else
2160 return NULL;
2161 }
2162
device_id_show(struct device * dev,struct device_attribute * attr,char * buf)2163 static ssize_t device_id_show(struct device *dev,
2164 struct device_attribute *attr,
2165 char *buf)
2166 {
2167 struct bmc_device *bmc = dev_get_drvdata(dev);
2168
2169 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
2170 }
2171
provides_dev_sdrs_show(struct device * dev,struct device_attribute * attr,char * buf)2172 static ssize_t provides_dev_sdrs_show(struct device *dev,
2173 struct device_attribute *attr,
2174 char *buf)
2175 {
2176 struct bmc_device *bmc = dev_get_drvdata(dev);
2177
2178 return snprintf(buf, 10, "%u\n",
2179 (bmc->id.device_revision & 0x80) >> 7);
2180 }
2181
revision_show(struct device * dev,struct device_attribute * attr,char * buf)2182 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2183 char *buf)
2184 {
2185 struct bmc_device *bmc = dev_get_drvdata(dev);
2186
2187 return snprintf(buf, 20, "%u\n",
2188 bmc->id.device_revision & 0x0F);
2189 }
2190
firmware_rev_show(struct device * dev,struct device_attribute * attr,char * buf)2191 static ssize_t firmware_rev_show(struct device *dev,
2192 struct device_attribute *attr,
2193 char *buf)
2194 {
2195 struct bmc_device *bmc = dev_get_drvdata(dev);
2196
2197 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
2198 bmc->id.firmware_revision_2);
2199 }
2200
ipmi_version_show(struct device * dev,struct device_attribute * attr,char * buf)2201 static ssize_t ipmi_version_show(struct device *dev,
2202 struct device_attribute *attr,
2203 char *buf)
2204 {
2205 struct bmc_device *bmc = dev_get_drvdata(dev);
2206
2207 return snprintf(buf, 20, "%u.%u\n",
2208 ipmi_version_major(&bmc->id),
2209 ipmi_version_minor(&bmc->id));
2210 }
2211
add_dev_support_show(struct device * dev,struct device_attribute * attr,char * buf)2212 static ssize_t add_dev_support_show(struct device *dev,
2213 struct device_attribute *attr,
2214 char *buf)
2215 {
2216 struct bmc_device *bmc = dev_get_drvdata(dev);
2217
2218 return snprintf(buf, 10, "0x%02x\n",
2219 bmc->id.additional_device_support);
2220 }
2221
manufacturer_id_show(struct device * dev,struct device_attribute * attr,char * buf)2222 static ssize_t manufacturer_id_show(struct device *dev,
2223 struct device_attribute *attr,
2224 char *buf)
2225 {
2226 struct bmc_device *bmc = dev_get_drvdata(dev);
2227
2228 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
2229 }
2230
product_id_show(struct device * dev,struct device_attribute * attr,char * buf)2231 static ssize_t product_id_show(struct device *dev,
2232 struct device_attribute *attr,
2233 char *buf)
2234 {
2235 struct bmc_device *bmc = dev_get_drvdata(dev);
2236
2237 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
2238 }
2239
aux_firmware_rev_show(struct device * dev,struct device_attribute * attr,char * buf)2240 static ssize_t aux_firmware_rev_show(struct device *dev,
2241 struct device_attribute *attr,
2242 char *buf)
2243 {
2244 struct bmc_device *bmc = dev_get_drvdata(dev);
2245
2246 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2247 bmc->id.aux_firmware_revision[3],
2248 bmc->id.aux_firmware_revision[2],
2249 bmc->id.aux_firmware_revision[1],
2250 bmc->id.aux_firmware_revision[0]);
2251 }
2252
guid_show(struct device * dev,struct device_attribute * attr,char * buf)2253 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2254 char *buf)
2255 {
2256 struct bmc_device *bmc = dev_get_drvdata(dev);
2257
2258 return snprintf(buf, 100, "%Lx%Lx\n",
2259 (long long) bmc->guid[0],
2260 (long long) bmc->guid[8]);
2261 }
2262
remove_files(struct bmc_device * bmc)2263 static void remove_files(struct bmc_device *bmc)
2264 {
2265 if (!bmc->dev)
2266 return;
2267
2268 device_remove_file(&bmc->dev->dev,
2269 &bmc->device_id_attr);
2270 device_remove_file(&bmc->dev->dev,
2271 &bmc->provides_dev_sdrs_attr);
2272 device_remove_file(&bmc->dev->dev,
2273 &bmc->revision_attr);
2274 device_remove_file(&bmc->dev->dev,
2275 &bmc->firmware_rev_attr);
2276 device_remove_file(&bmc->dev->dev,
2277 &bmc->version_attr);
2278 device_remove_file(&bmc->dev->dev,
2279 &bmc->add_dev_support_attr);
2280 device_remove_file(&bmc->dev->dev,
2281 &bmc->manufacturer_id_attr);
2282 device_remove_file(&bmc->dev->dev,
2283 &bmc->product_id_attr);
2284
2285 if (bmc->id.aux_firmware_revision_set)
2286 device_remove_file(&bmc->dev->dev,
2287 &bmc->aux_firmware_rev_attr);
2288 if (bmc->guid_set)
2289 device_remove_file(&bmc->dev->dev,
2290 &bmc->guid_attr);
2291 }
2292
2293 static void
cleanup_bmc_device(struct kref * ref)2294 cleanup_bmc_device(struct kref *ref)
2295 {
2296 struct bmc_device *bmc;
2297
2298 bmc = container_of(ref, struct bmc_device, refcount);
2299
2300 remove_files(bmc);
2301 platform_device_unregister(bmc->dev);
2302 kfree(bmc);
2303 }
2304
ipmi_bmc_unregister(ipmi_smi_t intf)2305 static void ipmi_bmc_unregister(ipmi_smi_t intf)
2306 {
2307 struct bmc_device *bmc = intf->bmc;
2308
2309 if (intf->sysfs_name) {
2310 sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
2311 kfree(intf->sysfs_name);
2312 intf->sysfs_name = NULL;
2313 }
2314 if (intf->my_dev_name) {
2315 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
2316 kfree(intf->my_dev_name);
2317 intf->my_dev_name = NULL;
2318 }
2319
2320 mutex_lock(&ipmidriver_mutex);
2321 kref_put(&bmc->refcount, cleanup_bmc_device);
2322 intf->bmc = NULL;
2323 mutex_unlock(&ipmidriver_mutex);
2324 }
2325
create_files(struct bmc_device * bmc)2326 static int create_files(struct bmc_device *bmc)
2327 {
2328 int err;
2329
2330 bmc->device_id_attr.attr.name = "device_id";
2331 bmc->device_id_attr.attr.mode = S_IRUGO;
2332 bmc->device_id_attr.show = device_id_show;
2333 sysfs_attr_init(&bmc->device_id_attr.attr);
2334
2335 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2336 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2337 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2338 sysfs_attr_init(&bmc->provides_dev_sdrs_attr.attr);
2339
2340 bmc->revision_attr.attr.name = "revision";
2341 bmc->revision_attr.attr.mode = S_IRUGO;
2342 bmc->revision_attr.show = revision_show;
2343 sysfs_attr_init(&bmc->revision_attr.attr);
2344
2345 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2346 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2347 bmc->firmware_rev_attr.show = firmware_rev_show;
2348 sysfs_attr_init(&bmc->firmware_rev_attr.attr);
2349
2350 bmc->version_attr.attr.name = "ipmi_version";
2351 bmc->version_attr.attr.mode = S_IRUGO;
2352 bmc->version_attr.show = ipmi_version_show;
2353 sysfs_attr_init(&bmc->version_attr.attr);
2354
2355 bmc->add_dev_support_attr.attr.name = "additional_device_support";
2356 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2357 bmc->add_dev_support_attr.show = add_dev_support_show;
2358 sysfs_attr_init(&bmc->add_dev_support_attr.attr);
2359
2360 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2361 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2362 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2363 sysfs_attr_init(&bmc->manufacturer_id_attr.attr);
2364
2365 bmc->product_id_attr.attr.name = "product_id";
2366 bmc->product_id_attr.attr.mode = S_IRUGO;
2367 bmc->product_id_attr.show = product_id_show;
2368 sysfs_attr_init(&bmc->product_id_attr.attr);
2369
2370 bmc->guid_attr.attr.name = "guid";
2371 bmc->guid_attr.attr.mode = S_IRUGO;
2372 bmc->guid_attr.show = guid_show;
2373 sysfs_attr_init(&bmc->guid_attr.attr);
2374
2375 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2376 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2377 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2378 sysfs_attr_init(&bmc->aux_firmware_rev_attr.attr);
2379
2380 err = device_create_file(&bmc->dev->dev,
2381 &bmc->device_id_attr);
2382 if (err)
2383 goto out;
2384 err = device_create_file(&bmc->dev->dev,
2385 &bmc->provides_dev_sdrs_attr);
2386 if (err)
2387 goto out_devid;
2388 err = device_create_file(&bmc->dev->dev,
2389 &bmc->revision_attr);
2390 if (err)
2391 goto out_sdrs;
2392 err = device_create_file(&bmc->dev->dev,
2393 &bmc->firmware_rev_attr);
2394 if (err)
2395 goto out_rev;
2396 err = device_create_file(&bmc->dev->dev,
2397 &bmc->version_attr);
2398 if (err)
2399 goto out_firm;
2400 err = device_create_file(&bmc->dev->dev,
2401 &bmc->add_dev_support_attr);
2402 if (err)
2403 goto out_version;
2404 err = device_create_file(&bmc->dev->dev,
2405 &bmc->manufacturer_id_attr);
2406 if (err)
2407 goto out_add_dev;
2408 err = device_create_file(&bmc->dev->dev,
2409 &bmc->product_id_attr);
2410 if (err)
2411 goto out_manu;
2412 if (bmc->id.aux_firmware_revision_set) {
2413 err = device_create_file(&bmc->dev->dev,
2414 &bmc->aux_firmware_rev_attr);
2415 if (err)
2416 goto out_prod_id;
2417 }
2418 if (bmc->guid_set) {
2419 err = device_create_file(&bmc->dev->dev,
2420 &bmc->guid_attr);
2421 if (err)
2422 goto out_aux_firm;
2423 }
2424
2425 return 0;
2426
2427 out_aux_firm:
2428 if (bmc->id.aux_firmware_revision_set)
2429 device_remove_file(&bmc->dev->dev,
2430 &bmc->aux_firmware_rev_attr);
2431 out_prod_id:
2432 device_remove_file(&bmc->dev->dev,
2433 &bmc->product_id_attr);
2434 out_manu:
2435 device_remove_file(&bmc->dev->dev,
2436 &bmc->manufacturer_id_attr);
2437 out_add_dev:
2438 device_remove_file(&bmc->dev->dev,
2439 &bmc->add_dev_support_attr);
2440 out_version:
2441 device_remove_file(&bmc->dev->dev,
2442 &bmc->version_attr);
2443 out_firm:
2444 device_remove_file(&bmc->dev->dev,
2445 &bmc->firmware_rev_attr);
2446 out_rev:
2447 device_remove_file(&bmc->dev->dev,
2448 &bmc->revision_attr);
2449 out_sdrs:
2450 device_remove_file(&bmc->dev->dev,
2451 &bmc->provides_dev_sdrs_attr);
2452 out_devid:
2453 device_remove_file(&bmc->dev->dev,
2454 &bmc->device_id_attr);
2455 out:
2456 return err;
2457 }
2458
ipmi_bmc_register(ipmi_smi_t intf,int ifnum,const char * sysfs_name)2459 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2460 const char *sysfs_name)
2461 {
2462 int rv;
2463 struct bmc_device *bmc = intf->bmc;
2464 struct bmc_device *old_bmc;
2465 int size;
2466 char dummy[1];
2467
2468 mutex_lock(&ipmidriver_mutex);
2469
2470 /*
2471 * Try to find if there is an bmc_device struct
2472 * representing the interfaced BMC already
2473 */
2474 if (bmc->guid_set)
2475 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, bmc->guid);
2476 else
2477 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2478 bmc->id.product_id,
2479 bmc->id.device_id);
2480
2481 /*
2482 * If there is already an bmc_device, free the new one,
2483 * otherwise register the new BMC device
2484 */
2485 if (old_bmc) {
2486 kfree(bmc);
2487 intf->bmc = old_bmc;
2488 bmc = old_bmc;
2489
2490 kref_get(&bmc->refcount);
2491 mutex_unlock(&ipmidriver_mutex);
2492
2493 printk(KERN_INFO
2494 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2495 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2496 bmc->id.manufacturer_id,
2497 bmc->id.product_id,
2498 bmc->id.device_id);
2499 } else {
2500 char name[14];
2501 unsigned char orig_dev_id = bmc->id.device_id;
2502 int warn_printed = 0;
2503
2504 snprintf(name, sizeof(name),
2505 "ipmi_bmc.%4.4x", bmc->id.product_id);
2506
2507 while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2508 bmc->id.product_id,
2509 bmc->id.device_id)) {
2510 if (!warn_printed) {
2511 printk(KERN_WARNING PFX
2512 "This machine has two different BMCs"
2513 " with the same product id and device"
2514 " id. This is an error in the"
2515 " firmware, but incrementing the"
2516 " device id to work around the problem."
2517 " Prod ID = 0x%x, Dev ID = 0x%x\n",
2518 bmc->id.product_id, bmc->id.device_id);
2519 warn_printed = 1;
2520 }
2521 bmc->id.device_id++; /* Wraps at 255 */
2522 if (bmc->id.device_id == orig_dev_id) {
2523 printk(KERN_ERR PFX
2524 "Out of device ids!\n");
2525 break;
2526 }
2527 }
2528
2529 bmc->dev = platform_device_alloc(name, bmc->id.device_id);
2530 if (!bmc->dev) {
2531 mutex_unlock(&ipmidriver_mutex);
2532 printk(KERN_ERR
2533 "ipmi_msghandler:"
2534 " Unable to allocate platform device\n");
2535 return -ENOMEM;
2536 }
2537 bmc->dev->dev.driver = &ipmidriver.driver;
2538 dev_set_drvdata(&bmc->dev->dev, bmc);
2539 kref_init(&bmc->refcount);
2540
2541 rv = platform_device_add(bmc->dev);
2542 mutex_unlock(&ipmidriver_mutex);
2543 if (rv) {
2544 platform_device_put(bmc->dev);
2545 bmc->dev = NULL;
2546 printk(KERN_ERR
2547 "ipmi_msghandler:"
2548 " Unable to register bmc device: %d\n",
2549 rv);
2550 /*
2551 * Don't go to out_err, you can only do that if
2552 * the device is registered already.
2553 */
2554 return rv;
2555 }
2556
2557 rv = create_files(bmc);
2558 if (rv) {
2559 mutex_lock(&ipmidriver_mutex);
2560 platform_device_unregister(bmc->dev);
2561 mutex_unlock(&ipmidriver_mutex);
2562
2563 return rv;
2564 }
2565
2566 dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
2567 "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2568 bmc->id.manufacturer_id,
2569 bmc->id.product_id,
2570 bmc->id.device_id);
2571 }
2572
2573 /*
2574 * create symlink from system interface device to bmc device
2575 * and back.
2576 */
2577 intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
2578 if (!intf->sysfs_name) {
2579 rv = -ENOMEM;
2580 printk(KERN_ERR
2581 "ipmi_msghandler: allocate link to BMC: %d\n",
2582 rv);
2583 goto out_err;
2584 }
2585
2586 rv = sysfs_create_link(&intf->si_dev->kobj,
2587 &bmc->dev->dev.kobj, intf->sysfs_name);
2588 if (rv) {
2589 kfree(intf->sysfs_name);
2590 intf->sysfs_name = NULL;
2591 printk(KERN_ERR
2592 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2593 rv);
2594 goto out_err;
2595 }
2596
2597 size = snprintf(dummy, 0, "ipmi%d", ifnum);
2598 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2599 if (!intf->my_dev_name) {
2600 kfree(intf->sysfs_name);
2601 intf->sysfs_name = NULL;
2602 rv = -ENOMEM;
2603 printk(KERN_ERR
2604 "ipmi_msghandler: allocate link from BMC: %d\n",
2605 rv);
2606 goto out_err;
2607 }
2608 snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
2609
2610 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2611 intf->my_dev_name);
2612 if (rv) {
2613 kfree(intf->sysfs_name);
2614 intf->sysfs_name = NULL;
2615 kfree(intf->my_dev_name);
2616 intf->my_dev_name = NULL;
2617 printk(KERN_ERR
2618 "ipmi_msghandler:"
2619 " Unable to create symlink to bmc: %d\n",
2620 rv);
2621 goto out_err;
2622 }
2623
2624 return 0;
2625
2626 out_err:
2627 ipmi_bmc_unregister(intf);
2628 return rv;
2629 }
2630
2631 static int
send_guid_cmd(ipmi_smi_t intf,int chan)2632 send_guid_cmd(ipmi_smi_t intf, int chan)
2633 {
2634 struct kernel_ipmi_msg msg;
2635 struct ipmi_system_interface_addr si;
2636
2637 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2638 si.channel = IPMI_BMC_CHANNEL;
2639 si.lun = 0;
2640
2641 msg.netfn = IPMI_NETFN_APP_REQUEST;
2642 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2643 msg.data = NULL;
2644 msg.data_len = 0;
2645 return i_ipmi_request(NULL,
2646 intf,
2647 (struct ipmi_addr *) &si,
2648 0,
2649 &msg,
2650 intf,
2651 NULL,
2652 NULL,
2653 0,
2654 intf->channels[0].address,
2655 intf->channels[0].lun,
2656 -1, 0);
2657 }
2658
2659 static void
guid_handler(ipmi_smi_t intf,struct ipmi_recv_msg * msg)2660 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2661 {
2662 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2663 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2664 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2665 /* Not for me */
2666 return;
2667
2668 if (msg->msg.data[0] != 0) {
2669 /* Error from getting the GUID, the BMC doesn't have one. */
2670 intf->bmc->guid_set = 0;
2671 goto out;
2672 }
2673
2674 if (msg->msg.data_len < 17) {
2675 intf->bmc->guid_set = 0;
2676 printk(KERN_WARNING PFX
2677 "guid_handler: The GUID response from the BMC was too"
2678 " short, it was %d but should have been 17. Assuming"
2679 " GUID is not available.\n",
2680 msg->msg.data_len);
2681 goto out;
2682 }
2683
2684 memcpy(intf->bmc->guid, msg->msg.data, 16);
2685 intf->bmc->guid_set = 1;
2686 out:
2687 wake_up(&intf->waitq);
2688 }
2689
2690 static void
get_guid(ipmi_smi_t intf)2691 get_guid(ipmi_smi_t intf)
2692 {
2693 int rv;
2694
2695 intf->bmc->guid_set = 0x2;
2696 intf->null_user_handler = guid_handler;
2697 rv = send_guid_cmd(intf, 0);
2698 if (rv)
2699 /* Send failed, no GUID available. */
2700 intf->bmc->guid_set = 0;
2701 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2702 intf->null_user_handler = NULL;
2703 }
2704
2705 static int
send_channel_info_cmd(ipmi_smi_t intf,int chan)2706 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2707 {
2708 struct kernel_ipmi_msg msg;
2709 unsigned char data[1];
2710 struct ipmi_system_interface_addr si;
2711
2712 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2713 si.channel = IPMI_BMC_CHANNEL;
2714 si.lun = 0;
2715
2716 msg.netfn = IPMI_NETFN_APP_REQUEST;
2717 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2718 msg.data = data;
2719 msg.data_len = 1;
2720 data[0] = chan;
2721 return i_ipmi_request(NULL,
2722 intf,
2723 (struct ipmi_addr *) &si,
2724 0,
2725 &msg,
2726 intf,
2727 NULL,
2728 NULL,
2729 0,
2730 intf->channels[0].address,
2731 intf->channels[0].lun,
2732 -1, 0);
2733 }
2734
2735 static void
channel_handler(ipmi_smi_t intf,struct ipmi_recv_msg * msg)2736 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2737 {
2738 int rv = 0;
2739 int chan;
2740
2741 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2742 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2743 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
2744 /* It's the one we want */
2745 if (msg->msg.data[0] != 0) {
2746 /* Got an error from the channel, just go on. */
2747
2748 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2749 /*
2750 * If the MC does not support this
2751 * command, that is legal. We just
2752 * assume it has one IPMB at channel
2753 * zero.
2754 */
2755 intf->channels[0].medium
2756 = IPMI_CHANNEL_MEDIUM_IPMB;
2757 intf->channels[0].protocol
2758 = IPMI_CHANNEL_PROTOCOL_IPMB;
2759 rv = -ENOSYS;
2760
2761 intf->curr_channel = IPMI_MAX_CHANNELS;
2762 wake_up(&intf->waitq);
2763 goto out;
2764 }
2765 goto next_channel;
2766 }
2767 if (msg->msg.data_len < 4) {
2768 /* Message not big enough, just go on. */
2769 goto next_channel;
2770 }
2771 chan = intf->curr_channel;
2772 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2773 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2774
2775 next_channel:
2776 intf->curr_channel++;
2777 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2778 wake_up(&intf->waitq);
2779 else
2780 rv = send_channel_info_cmd(intf, intf->curr_channel);
2781
2782 if (rv) {
2783 /* Got an error somehow, just give up. */
2784 intf->curr_channel = IPMI_MAX_CHANNELS;
2785 wake_up(&intf->waitq);
2786
2787 printk(KERN_WARNING PFX
2788 "Error sending channel information: %d\n",
2789 rv);
2790 }
2791 }
2792 out:
2793 return;
2794 }
2795
ipmi_poll(ipmi_smi_t intf)2796 static void ipmi_poll(ipmi_smi_t intf)
2797 {
2798 if (intf->handlers->poll)
2799 intf->handlers->poll(intf->send_info);
2800 /* In case something came in */
2801 handle_new_recv_msgs(intf);
2802 }
2803
ipmi_poll_interface(ipmi_user_t user)2804 void ipmi_poll_interface(ipmi_user_t user)
2805 {
2806 ipmi_poll(user->intf);
2807 }
2808 EXPORT_SYMBOL(ipmi_poll_interface);
2809
ipmi_register_smi(struct ipmi_smi_handlers * handlers,void * send_info,struct ipmi_device_id * device_id,struct device * si_dev,const char * sysfs_name,unsigned char slave_addr)2810 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2811 void *send_info,
2812 struct ipmi_device_id *device_id,
2813 struct device *si_dev,
2814 const char *sysfs_name,
2815 unsigned char slave_addr)
2816 {
2817 int i, j;
2818 int rv;
2819 ipmi_smi_t intf;
2820 ipmi_smi_t tintf;
2821 struct list_head *link;
2822
2823 /*
2824 * Make sure the driver is actually initialized, this handles
2825 * problems with initialization order.
2826 */
2827 if (!initialized) {
2828 rv = ipmi_init_msghandler();
2829 if (rv)
2830 return rv;
2831 /*
2832 * The init code doesn't return an error if it was turned
2833 * off, but it won't initialize. Check that.
2834 */
2835 if (!initialized)
2836 return -ENODEV;
2837 }
2838
2839 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
2840 if (!intf)
2841 return -ENOMEM;
2842
2843 intf->ipmi_version_major = ipmi_version_major(device_id);
2844 intf->ipmi_version_minor = ipmi_version_minor(device_id);
2845
2846 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2847 if (!intf->bmc) {
2848 kfree(intf);
2849 return -ENOMEM;
2850 }
2851 intf->intf_num = -1; /* Mark it invalid for now. */
2852 kref_init(&intf->refcount);
2853 intf->bmc->id = *device_id;
2854 intf->si_dev = si_dev;
2855 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2856 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2857 intf->channels[j].lun = 2;
2858 }
2859 if (slave_addr != 0)
2860 intf->channels[0].address = slave_addr;
2861 INIT_LIST_HEAD(&intf->users);
2862 intf->handlers = handlers;
2863 intf->send_info = send_info;
2864 spin_lock_init(&intf->seq_lock);
2865 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2866 intf->seq_table[j].inuse = 0;
2867 intf->seq_table[j].seqid = 0;
2868 }
2869 intf->curr_seq = 0;
2870 #ifdef CONFIG_PROC_FS
2871 mutex_init(&intf->proc_entry_lock);
2872 #endif
2873 spin_lock_init(&intf->waiting_msgs_lock);
2874 INIT_LIST_HEAD(&intf->waiting_msgs);
2875 tasklet_init(&intf->recv_tasklet,
2876 smi_recv_tasklet,
2877 (unsigned long) intf);
2878 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
2879 spin_lock_init(&intf->events_lock);
2880 INIT_LIST_HEAD(&intf->waiting_events);
2881 intf->waiting_events_count = 0;
2882 mutex_init(&intf->cmd_rcvrs_mutex);
2883 spin_lock_init(&intf->maintenance_mode_lock);
2884 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2885 init_waitqueue_head(&intf->waitq);
2886 for (i = 0; i < IPMI_NUM_STATS; i++)
2887 atomic_set(&intf->stats[i], 0);
2888
2889 intf->proc_dir = NULL;
2890
2891 mutex_lock(&smi_watchers_mutex);
2892 mutex_lock(&ipmi_interfaces_mutex);
2893 /* Look for a hole in the numbers. */
2894 i = 0;
2895 link = &ipmi_interfaces;
2896 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
2897 if (tintf->intf_num != i) {
2898 link = &tintf->link;
2899 break;
2900 }
2901 i++;
2902 }
2903 /* Add the new interface in numeric order. */
2904 if (i == 0)
2905 list_add_rcu(&intf->link, &ipmi_interfaces);
2906 else
2907 list_add_tail_rcu(&intf->link, link);
2908
2909 rv = handlers->start_processing(send_info, intf);
2910 if (rv)
2911 goto out;
2912
2913 get_guid(intf);
2914
2915 if ((intf->ipmi_version_major > 1)
2916 || ((intf->ipmi_version_major == 1)
2917 && (intf->ipmi_version_minor >= 5))) {
2918 /*
2919 * Start scanning the channels to see what is
2920 * available.
2921 */
2922 intf->null_user_handler = channel_handler;
2923 intf->curr_channel = 0;
2924 rv = send_channel_info_cmd(intf, 0);
2925 if (rv)
2926 goto out;
2927
2928 /* Wait for the channel info to be read. */
2929 wait_event(intf->waitq,
2930 intf->curr_channel >= IPMI_MAX_CHANNELS);
2931 intf->null_user_handler = NULL;
2932 } else {
2933 /* Assume a single IPMB channel at zero. */
2934 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2935 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2936 intf->curr_channel = IPMI_MAX_CHANNELS;
2937 }
2938
2939 if (rv == 0)
2940 rv = add_proc_entries(intf, i);
2941
2942 rv = ipmi_bmc_register(intf, i, sysfs_name);
2943
2944 out:
2945 if (rv) {
2946 if (intf->proc_dir)
2947 remove_proc_entries(intf);
2948 intf->handlers = NULL;
2949 list_del_rcu(&intf->link);
2950 mutex_unlock(&ipmi_interfaces_mutex);
2951 mutex_unlock(&smi_watchers_mutex);
2952 synchronize_rcu();
2953 kref_put(&intf->refcount, intf_free);
2954 } else {
2955 /*
2956 * Keep memory order straight for RCU readers. Make
2957 * sure everything else is committed to memory before
2958 * setting intf_num to mark the interface valid.
2959 */
2960 smp_wmb();
2961 intf->intf_num = i;
2962 mutex_unlock(&ipmi_interfaces_mutex);
2963 /* After this point the interface is legal to use. */
2964 call_smi_watchers(i, intf->si_dev);
2965 mutex_unlock(&smi_watchers_mutex);
2966 }
2967
2968 return rv;
2969 }
2970 EXPORT_SYMBOL(ipmi_register_smi);
2971
cleanup_smi_msgs(ipmi_smi_t intf)2972 static void cleanup_smi_msgs(ipmi_smi_t intf)
2973 {
2974 int i;
2975 struct seq_table *ent;
2976
2977 /* No need for locks, the interface is down. */
2978 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
2979 ent = &(intf->seq_table[i]);
2980 if (!ent->inuse)
2981 continue;
2982 deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
2983 }
2984 }
2985
ipmi_unregister_smi(ipmi_smi_t intf)2986 int ipmi_unregister_smi(ipmi_smi_t intf)
2987 {
2988 struct ipmi_smi_watcher *w;
2989 int intf_num = intf->intf_num;
2990
2991 ipmi_bmc_unregister(intf);
2992
2993 mutex_lock(&smi_watchers_mutex);
2994 mutex_lock(&ipmi_interfaces_mutex);
2995 intf->intf_num = -1;
2996 intf->handlers = NULL;
2997 list_del_rcu(&intf->link);
2998 mutex_unlock(&ipmi_interfaces_mutex);
2999 synchronize_rcu();
3000
3001 cleanup_smi_msgs(intf);
3002
3003 remove_proc_entries(intf);
3004
3005 /*
3006 * Call all the watcher interfaces to tell them that
3007 * an interface is gone.
3008 */
3009 list_for_each_entry(w, &smi_watchers, link)
3010 w->smi_gone(intf_num);
3011 mutex_unlock(&smi_watchers_mutex);
3012
3013 kref_put(&intf->refcount, intf_free);
3014 return 0;
3015 }
3016 EXPORT_SYMBOL(ipmi_unregister_smi);
3017
handle_ipmb_get_msg_rsp(ipmi_smi_t intf,struct ipmi_smi_msg * msg)3018 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
3019 struct ipmi_smi_msg *msg)
3020 {
3021 struct ipmi_ipmb_addr ipmb_addr;
3022 struct ipmi_recv_msg *recv_msg;
3023
3024 /*
3025 * This is 11, not 10, because the response must contain a
3026 * completion code.
3027 */
3028 if (msg->rsp_size < 11) {
3029 /* Message not big enough, just ignore it. */
3030 ipmi_inc_stat(intf, invalid_ipmb_responses);
3031 return 0;
3032 }
3033
3034 if (msg->rsp[2] != 0) {
3035 /* An error getting the response, just ignore it. */
3036 return 0;
3037 }
3038
3039 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3040 ipmb_addr.slave_addr = msg->rsp[6];
3041 ipmb_addr.channel = msg->rsp[3] & 0x0f;
3042 ipmb_addr.lun = msg->rsp[7] & 3;
3043
3044 /*
3045 * It's a response from a remote entity. Look up the sequence
3046 * number and handle the response.
3047 */
3048 if (intf_find_seq(intf,
3049 msg->rsp[7] >> 2,
3050 msg->rsp[3] & 0x0f,
3051 msg->rsp[8],
3052 (msg->rsp[4] >> 2) & (~1),
3053 (struct ipmi_addr *) &(ipmb_addr),
3054 &recv_msg)) {
3055 /*
3056 * We were unable to find the sequence number,
3057 * so just nuke the message.
3058 */
3059 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3060 return 0;
3061 }
3062
3063 memcpy(recv_msg->msg_data,
3064 &(msg->rsp[9]),
3065 msg->rsp_size - 9);
3066 /*
3067 * The other fields matched, so no need to set them, except
3068 * for netfn, which needs to be the response that was
3069 * returned, not the request value.
3070 */
3071 recv_msg->msg.netfn = msg->rsp[4] >> 2;
3072 recv_msg->msg.data = recv_msg->msg_data;
3073 recv_msg->msg.data_len = msg->rsp_size - 10;
3074 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3075 ipmi_inc_stat(intf, handled_ipmb_responses);
3076 deliver_response(recv_msg);
3077
3078 return 0;
3079 }
3080
handle_ipmb_get_msg_cmd(ipmi_smi_t intf,struct ipmi_smi_msg * msg)3081 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
3082 struct ipmi_smi_msg *msg)
3083 {
3084 struct cmd_rcvr *rcvr;
3085 int rv = 0;
3086 unsigned char netfn;
3087 unsigned char cmd;
3088 unsigned char chan;
3089 ipmi_user_t user = NULL;
3090 struct ipmi_ipmb_addr *ipmb_addr;
3091 struct ipmi_recv_msg *recv_msg;
3092 struct ipmi_smi_handlers *handlers;
3093
3094 if (msg->rsp_size < 10) {
3095 /* Message not big enough, just ignore it. */
3096 ipmi_inc_stat(intf, invalid_commands);
3097 return 0;
3098 }
3099
3100 if (msg->rsp[2] != 0) {
3101 /* An error getting the response, just ignore it. */
3102 return 0;
3103 }
3104
3105 netfn = msg->rsp[4] >> 2;
3106 cmd = msg->rsp[8];
3107 chan = msg->rsp[3] & 0xf;
3108
3109 rcu_read_lock();
3110 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3111 if (rcvr) {
3112 user = rcvr->user;
3113 kref_get(&user->refcount);
3114 } else
3115 user = NULL;
3116 rcu_read_unlock();
3117
3118 if (user == NULL) {
3119 /* We didn't find a user, deliver an error response. */
3120 ipmi_inc_stat(intf, unhandled_commands);
3121
3122 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3123 msg->data[1] = IPMI_SEND_MSG_CMD;
3124 msg->data[2] = msg->rsp[3];
3125 msg->data[3] = msg->rsp[6];
3126 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3127 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
3128 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
3129 /* rqseq/lun */
3130 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3131 msg->data[8] = msg->rsp[8]; /* cmd */
3132 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3133 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
3134 msg->data_size = 11;
3135
3136 #ifdef DEBUG_MSGING
3137 {
3138 int m;
3139 printk("Invalid command:");
3140 for (m = 0; m < msg->data_size; m++)
3141 printk(" %2.2x", msg->data[m]);
3142 printk("\n");
3143 }
3144 #endif
3145 rcu_read_lock();
3146 handlers = intf->handlers;
3147 if (handlers) {
3148 handlers->sender(intf->send_info, msg, 0);
3149 /*
3150 * We used the message, so return the value
3151 * that causes it to not be freed or
3152 * queued.
3153 */
3154 rv = -1;
3155 }
3156 rcu_read_unlock();
3157 } else {
3158 /* Deliver the message to the user. */
3159 ipmi_inc_stat(intf, handled_commands);
3160
3161 recv_msg = ipmi_alloc_recv_msg();
3162 if (!recv_msg) {
3163 /*
3164 * We couldn't allocate memory for the
3165 * message, so requeue it for handling
3166 * later.
3167 */
3168 rv = 1;
3169 kref_put(&user->refcount, free_user);
3170 } else {
3171 /* Extract the source address from the data. */
3172 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3173 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3174 ipmb_addr->slave_addr = msg->rsp[6];
3175 ipmb_addr->lun = msg->rsp[7] & 3;
3176 ipmb_addr->channel = msg->rsp[3] & 0xf;
3177
3178 /*
3179 * Extract the rest of the message information
3180 * from the IPMB header.
3181 */
3182 recv_msg->user = user;
3183 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3184 recv_msg->msgid = msg->rsp[7] >> 2;
3185 recv_msg->msg.netfn = msg->rsp[4] >> 2;
3186 recv_msg->msg.cmd = msg->rsp[8];
3187 recv_msg->msg.data = recv_msg->msg_data;
3188
3189 /*
3190 * We chop off 10, not 9 bytes because the checksum
3191 * at the end also needs to be removed.
3192 */
3193 recv_msg->msg.data_len = msg->rsp_size - 10;
3194 memcpy(recv_msg->msg_data,
3195 &(msg->rsp[9]),
3196 msg->rsp_size - 10);
3197 deliver_response(recv_msg);
3198 }
3199 }
3200
3201 return rv;
3202 }
3203
handle_lan_get_msg_rsp(ipmi_smi_t intf,struct ipmi_smi_msg * msg)3204 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
3205 struct ipmi_smi_msg *msg)
3206 {
3207 struct ipmi_lan_addr lan_addr;
3208 struct ipmi_recv_msg *recv_msg;
3209
3210
3211 /*
3212 * This is 13, not 12, because the response must contain a
3213 * completion code.
3214 */
3215 if (msg->rsp_size < 13) {
3216 /* Message not big enough, just ignore it. */
3217 ipmi_inc_stat(intf, invalid_lan_responses);
3218 return 0;
3219 }
3220
3221 if (msg->rsp[2] != 0) {
3222 /* An error getting the response, just ignore it. */
3223 return 0;
3224 }
3225
3226 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3227 lan_addr.session_handle = msg->rsp[4];
3228 lan_addr.remote_SWID = msg->rsp[8];
3229 lan_addr.local_SWID = msg->rsp[5];
3230 lan_addr.channel = msg->rsp[3] & 0x0f;
3231 lan_addr.privilege = msg->rsp[3] >> 4;
3232 lan_addr.lun = msg->rsp[9] & 3;
3233
3234 /*
3235 * It's a response from a remote entity. Look up the sequence
3236 * number and handle the response.
3237 */
3238 if (intf_find_seq(intf,
3239 msg->rsp[9] >> 2,
3240 msg->rsp[3] & 0x0f,
3241 msg->rsp[10],
3242 (msg->rsp[6] >> 2) & (~1),
3243 (struct ipmi_addr *) &(lan_addr),
3244 &recv_msg)) {
3245 /*
3246 * We were unable to find the sequence number,
3247 * so just nuke the message.
3248 */
3249 ipmi_inc_stat(intf, unhandled_lan_responses);
3250 return 0;
3251 }
3252
3253 memcpy(recv_msg->msg_data,
3254 &(msg->rsp[11]),
3255 msg->rsp_size - 11);
3256 /*
3257 * The other fields matched, so no need to set them, except
3258 * for netfn, which needs to be the response that was
3259 * returned, not the request value.
3260 */
3261 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3262 recv_msg->msg.data = recv_msg->msg_data;
3263 recv_msg->msg.data_len = msg->rsp_size - 12;
3264 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3265 ipmi_inc_stat(intf, handled_lan_responses);
3266 deliver_response(recv_msg);
3267
3268 return 0;
3269 }
3270
handle_lan_get_msg_cmd(ipmi_smi_t intf,struct ipmi_smi_msg * msg)3271 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
3272 struct ipmi_smi_msg *msg)
3273 {
3274 struct cmd_rcvr *rcvr;
3275 int rv = 0;
3276 unsigned char netfn;
3277 unsigned char cmd;
3278 unsigned char chan;
3279 ipmi_user_t user = NULL;
3280 struct ipmi_lan_addr *lan_addr;
3281 struct ipmi_recv_msg *recv_msg;
3282
3283 if (msg->rsp_size < 12) {
3284 /* Message not big enough, just ignore it. */
3285 ipmi_inc_stat(intf, invalid_commands);
3286 return 0;
3287 }
3288
3289 if (msg->rsp[2] != 0) {
3290 /* An error getting the response, just ignore it. */
3291 return 0;
3292 }
3293
3294 netfn = msg->rsp[6] >> 2;
3295 cmd = msg->rsp[10];
3296 chan = msg->rsp[3] & 0xf;
3297
3298 rcu_read_lock();
3299 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3300 if (rcvr) {
3301 user = rcvr->user;
3302 kref_get(&user->refcount);
3303 } else
3304 user = NULL;
3305 rcu_read_unlock();
3306
3307 if (user == NULL) {
3308 /* We didn't find a user, just give up. */
3309 ipmi_inc_stat(intf, unhandled_commands);
3310
3311 /*
3312 * Don't do anything with these messages, just allow
3313 * them to be freed.
3314 */
3315 rv = 0;
3316 } else {
3317 /* Deliver the message to the user. */
3318 ipmi_inc_stat(intf, handled_commands);
3319
3320 recv_msg = ipmi_alloc_recv_msg();
3321 if (!recv_msg) {
3322 /*
3323 * We couldn't allocate memory for the
3324 * message, so requeue it for handling later.
3325 */
3326 rv = 1;
3327 kref_put(&user->refcount, free_user);
3328 } else {
3329 /* Extract the source address from the data. */
3330 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3331 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3332 lan_addr->session_handle = msg->rsp[4];
3333 lan_addr->remote_SWID = msg->rsp[8];
3334 lan_addr->local_SWID = msg->rsp[5];
3335 lan_addr->lun = msg->rsp[9] & 3;
3336 lan_addr->channel = msg->rsp[3] & 0xf;
3337 lan_addr->privilege = msg->rsp[3] >> 4;
3338
3339 /*
3340 * Extract the rest of the message information
3341 * from the IPMB header.
3342 */
3343 recv_msg->user = user;
3344 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3345 recv_msg->msgid = msg->rsp[9] >> 2;
3346 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3347 recv_msg->msg.cmd = msg->rsp[10];
3348 recv_msg->msg.data = recv_msg->msg_data;
3349
3350 /*
3351 * We chop off 12, not 11 bytes because the checksum
3352 * at the end also needs to be removed.
3353 */
3354 recv_msg->msg.data_len = msg->rsp_size - 12;
3355 memcpy(recv_msg->msg_data,
3356 &(msg->rsp[11]),
3357 msg->rsp_size - 12);
3358 deliver_response(recv_msg);
3359 }
3360 }
3361
3362 return rv;
3363 }
3364
3365 /*
3366 * This routine will handle "Get Message" command responses with
3367 * channels that use an OEM Medium. The message format belongs to
3368 * the OEM. See IPMI 2.0 specification, Chapter 6 and
3369 * Chapter 22, sections 22.6 and 22.24 for more details.
3370 */
handle_oem_get_msg_cmd(ipmi_smi_t intf,struct ipmi_smi_msg * msg)3371 static int handle_oem_get_msg_cmd(ipmi_smi_t intf,
3372 struct ipmi_smi_msg *msg)
3373 {
3374 struct cmd_rcvr *rcvr;
3375 int rv = 0;
3376 unsigned char netfn;
3377 unsigned char cmd;
3378 unsigned char chan;
3379 ipmi_user_t user = NULL;
3380 struct ipmi_system_interface_addr *smi_addr;
3381 struct ipmi_recv_msg *recv_msg;
3382
3383 /*
3384 * We expect the OEM SW to perform error checking
3385 * so we just do some basic sanity checks
3386 */
3387 if (msg->rsp_size < 4) {
3388 /* Message not big enough, just ignore it. */
3389 ipmi_inc_stat(intf, invalid_commands);
3390 return 0;
3391 }
3392
3393 if (msg->rsp[2] != 0) {
3394 /* An error getting the response, just ignore it. */
3395 return 0;
3396 }
3397
3398 /*
3399 * This is an OEM Message so the OEM needs to know how
3400 * handle the message. We do no interpretation.
3401 */
3402 netfn = msg->rsp[0] >> 2;
3403 cmd = msg->rsp[1];
3404 chan = msg->rsp[3] & 0xf;
3405
3406 rcu_read_lock();
3407 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3408 if (rcvr) {
3409 user = rcvr->user;
3410 kref_get(&user->refcount);
3411 } else
3412 user = NULL;
3413 rcu_read_unlock();
3414
3415 if (user == NULL) {
3416 /* We didn't find a user, just give up. */
3417 ipmi_inc_stat(intf, unhandled_commands);
3418
3419 /*
3420 * Don't do anything with these messages, just allow
3421 * them to be freed.
3422 */
3423
3424 rv = 0;
3425 } else {
3426 /* Deliver the message to the user. */
3427 ipmi_inc_stat(intf, handled_commands);
3428
3429 recv_msg = ipmi_alloc_recv_msg();
3430 if (!recv_msg) {
3431 /*
3432 * We couldn't allocate memory for the
3433 * message, so requeue it for handling
3434 * later.
3435 */
3436 rv = 1;
3437 kref_put(&user->refcount, free_user);
3438 } else {
3439 /*
3440 * OEM Messages are expected to be delivered via
3441 * the system interface to SMS software. We might
3442 * need to visit this again depending on OEM
3443 * requirements
3444 */
3445 smi_addr = ((struct ipmi_system_interface_addr *)
3446 &(recv_msg->addr));
3447 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3448 smi_addr->channel = IPMI_BMC_CHANNEL;
3449 smi_addr->lun = msg->rsp[0] & 3;
3450
3451 recv_msg->user = user;
3452 recv_msg->user_msg_data = NULL;
3453 recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
3454 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3455 recv_msg->msg.cmd = msg->rsp[1];
3456 recv_msg->msg.data = recv_msg->msg_data;
3457
3458 /*
3459 * The message starts at byte 4 which follows the
3460 * the Channel Byte in the "GET MESSAGE" command
3461 */
3462 recv_msg->msg.data_len = msg->rsp_size - 4;
3463 memcpy(recv_msg->msg_data,
3464 &(msg->rsp[4]),
3465 msg->rsp_size - 4);
3466 deliver_response(recv_msg);
3467 }
3468 }
3469
3470 return rv;
3471 }
3472
copy_event_into_recv_msg(struct ipmi_recv_msg * recv_msg,struct ipmi_smi_msg * msg)3473 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3474 struct ipmi_smi_msg *msg)
3475 {
3476 struct ipmi_system_interface_addr *smi_addr;
3477
3478 recv_msg->msgid = 0;
3479 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
3480 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3481 smi_addr->channel = IPMI_BMC_CHANNEL;
3482 smi_addr->lun = msg->rsp[0] & 3;
3483 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3484 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3485 recv_msg->msg.cmd = msg->rsp[1];
3486 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
3487 recv_msg->msg.data = recv_msg->msg_data;
3488 recv_msg->msg.data_len = msg->rsp_size - 3;
3489 }
3490
handle_read_event_rsp(ipmi_smi_t intf,struct ipmi_smi_msg * msg)3491 static int handle_read_event_rsp(ipmi_smi_t intf,
3492 struct ipmi_smi_msg *msg)
3493 {
3494 struct ipmi_recv_msg *recv_msg, *recv_msg2;
3495 struct list_head msgs;
3496 ipmi_user_t user;
3497 int rv = 0;
3498 int deliver_count = 0;
3499 unsigned long flags;
3500
3501 if (msg->rsp_size < 19) {
3502 /* Message is too small to be an IPMB event. */
3503 ipmi_inc_stat(intf, invalid_events);
3504 return 0;
3505 }
3506
3507 if (msg->rsp[2] != 0) {
3508 /* An error getting the event, just ignore it. */
3509 return 0;
3510 }
3511
3512 INIT_LIST_HEAD(&msgs);
3513
3514 spin_lock_irqsave(&intf->events_lock, flags);
3515
3516 ipmi_inc_stat(intf, events);
3517
3518 /*
3519 * Allocate and fill in one message for every user that is
3520 * getting events.
3521 */
3522 rcu_read_lock();
3523 list_for_each_entry_rcu(user, &intf->users, link) {
3524 if (!user->gets_events)
3525 continue;
3526
3527 recv_msg = ipmi_alloc_recv_msg();
3528 if (!recv_msg) {
3529 rcu_read_unlock();
3530 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
3531 link) {
3532 list_del(&recv_msg->link);
3533 ipmi_free_recv_msg(recv_msg);
3534 }
3535 /*
3536 * We couldn't allocate memory for the
3537 * message, so requeue it for handling
3538 * later.
3539 */
3540 rv = 1;
3541 goto out;
3542 }
3543
3544 deliver_count++;
3545
3546 copy_event_into_recv_msg(recv_msg, msg);
3547 recv_msg->user = user;
3548 kref_get(&user->refcount);
3549 list_add_tail(&(recv_msg->link), &msgs);
3550 }
3551 rcu_read_unlock();
3552
3553 if (deliver_count) {
3554 /* Now deliver all the messages. */
3555 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
3556 list_del(&recv_msg->link);
3557 deliver_response(recv_msg);
3558 }
3559 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3560 /*
3561 * No one to receive the message, put it in queue if there's
3562 * not already too many things in the queue.
3563 */
3564 recv_msg = ipmi_alloc_recv_msg();
3565 if (!recv_msg) {
3566 /*
3567 * We couldn't allocate memory for the
3568 * message, so requeue it for handling
3569 * later.
3570 */
3571 rv = 1;
3572 goto out;
3573 }
3574
3575 copy_event_into_recv_msg(recv_msg, msg);
3576 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3577 intf->waiting_events_count++;
3578 } else if (!intf->event_msg_printed) {
3579 /*
3580 * There's too many things in the queue, discard this
3581 * message.
3582 */
3583 printk(KERN_WARNING PFX "Event queue full, discarding"
3584 " incoming events\n");
3585 intf->event_msg_printed = 1;
3586 }
3587
3588 out:
3589 spin_unlock_irqrestore(&(intf->events_lock), flags);
3590
3591 return rv;
3592 }
3593
handle_bmc_rsp(ipmi_smi_t intf,struct ipmi_smi_msg * msg)3594 static int handle_bmc_rsp(ipmi_smi_t intf,
3595 struct ipmi_smi_msg *msg)
3596 {
3597 struct ipmi_recv_msg *recv_msg;
3598 struct ipmi_user *user;
3599
3600 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3601 if (recv_msg == NULL) {
3602 printk(KERN_WARNING
3603 "IPMI message received with no owner. This\n"
3604 "could be because of a malformed message, or\n"
3605 "because of a hardware error. Contact your\n"
3606 "hardware vender for assistance\n");
3607 return 0;
3608 }
3609
3610 user = recv_msg->user;
3611 /* Make sure the user still exists. */
3612 if (user && !user->valid) {
3613 /* The user for the message went away, so give up. */
3614 ipmi_inc_stat(intf, unhandled_local_responses);
3615 ipmi_free_recv_msg(recv_msg);
3616 } else {
3617 struct ipmi_system_interface_addr *smi_addr;
3618
3619 ipmi_inc_stat(intf, handled_local_responses);
3620 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3621 recv_msg->msgid = msg->msgid;
3622 smi_addr = ((struct ipmi_system_interface_addr *)
3623 &(recv_msg->addr));
3624 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3625 smi_addr->channel = IPMI_BMC_CHANNEL;
3626 smi_addr->lun = msg->rsp[0] & 3;
3627 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3628 recv_msg->msg.cmd = msg->rsp[1];
3629 memcpy(recv_msg->msg_data,
3630 &(msg->rsp[2]),
3631 msg->rsp_size - 2);
3632 recv_msg->msg.data = recv_msg->msg_data;
3633 recv_msg->msg.data_len = msg->rsp_size - 2;
3634 deliver_response(recv_msg);
3635 }
3636
3637 return 0;
3638 }
3639
3640 /*
3641 * Handle a received message. Return 1 if the message should be requeued,
3642 * 0 if the message should be freed, or -1 if the message should not
3643 * be freed or requeued.
3644 */
handle_one_recv_msg(ipmi_smi_t intf,struct ipmi_smi_msg * msg)3645 static int handle_one_recv_msg(ipmi_smi_t intf,
3646 struct ipmi_smi_msg *msg)
3647 {
3648 int requeue;
3649 int chan;
3650
3651 #ifdef DEBUG_MSGING
3652 int m;
3653 printk("Recv:");
3654 for (m = 0; m < msg->rsp_size; m++)
3655 printk(" %2.2x", msg->rsp[m]);
3656 printk("\n");
3657 #endif
3658 if (msg->rsp_size < 2) {
3659 /* Message is too small to be correct. */
3660 printk(KERN_WARNING PFX "BMC returned to small a message"
3661 " for netfn %x cmd %x, got %d bytes\n",
3662 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3663
3664 /* Generate an error response for the message. */
3665 msg->rsp[0] = msg->data[0] | (1 << 2);
3666 msg->rsp[1] = msg->data[1];
3667 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3668 msg->rsp_size = 3;
3669 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
3670 || (msg->rsp[1] != msg->data[1])) {
3671 /*
3672 * The NetFN and Command in the response is not even
3673 * marginally correct.
3674 */
3675 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3676 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3677 (msg->data[0] >> 2) | 1, msg->data[1],
3678 msg->rsp[0] >> 2, msg->rsp[1]);
3679
3680 /* Generate an error response for the message. */
3681 msg->rsp[0] = msg->data[0] | (1 << 2);
3682 msg->rsp[1] = msg->data[1];
3683 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3684 msg->rsp_size = 3;
3685 }
3686
3687 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3688 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3689 && (msg->user_data != NULL)) {
3690 /*
3691 * It's a response to a response we sent. For this we
3692 * deliver a send message response to the user.
3693 */
3694 struct ipmi_recv_msg *recv_msg = msg->user_data;
3695
3696 requeue = 0;
3697 if (msg->rsp_size < 2)
3698 /* Message is too small to be correct. */
3699 goto out;
3700
3701 chan = msg->data[2] & 0x0f;
3702 if (chan >= IPMI_MAX_CHANNELS)
3703 /* Invalid channel number */
3704 goto out;
3705
3706 if (!recv_msg)
3707 goto out;
3708
3709 /* Make sure the user still exists. */
3710 if (!recv_msg->user || !recv_msg->user->valid)
3711 goto out;
3712
3713 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3714 recv_msg->msg.data = recv_msg->msg_data;
3715 recv_msg->msg.data_len = 1;
3716 recv_msg->msg_data[0] = msg->rsp[2];
3717 deliver_response(recv_msg);
3718 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3719 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
3720 /* It's from the receive queue. */
3721 chan = msg->rsp[3] & 0xf;
3722 if (chan >= IPMI_MAX_CHANNELS) {
3723 /* Invalid channel number */
3724 requeue = 0;
3725 goto out;
3726 }
3727
3728 /*
3729 * We need to make sure the channels have been initialized.
3730 * The channel_handler routine will set the "curr_channel"
3731 * equal to or greater than IPMI_MAX_CHANNELS when all the
3732 * channels for this interface have been initialized.
3733 */
3734 if (intf->curr_channel < IPMI_MAX_CHANNELS) {
3735 requeue = 0; /* Throw the message away */
3736 goto out;
3737 }
3738
3739 switch (intf->channels[chan].medium) {
3740 case IPMI_CHANNEL_MEDIUM_IPMB:
3741 if (msg->rsp[4] & 0x04) {
3742 /*
3743 * It's a response, so find the
3744 * requesting message and send it up.
3745 */
3746 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3747 } else {
3748 /*
3749 * It's a command to the SMS from some other
3750 * entity. Handle that.
3751 */
3752 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3753 }
3754 break;
3755
3756 case IPMI_CHANNEL_MEDIUM_8023LAN:
3757 case IPMI_CHANNEL_MEDIUM_ASYNC:
3758 if (msg->rsp[6] & 0x04) {
3759 /*
3760 * It's a response, so find the
3761 * requesting message and send it up.
3762 */
3763 requeue = handle_lan_get_msg_rsp(intf, msg);
3764 } else {
3765 /*
3766 * It's a command to the SMS from some other
3767 * entity. Handle that.
3768 */
3769 requeue = handle_lan_get_msg_cmd(intf, msg);
3770 }
3771 break;
3772
3773 default:
3774 /* Check for OEM Channels. Clients had better
3775 register for these commands. */
3776 if ((intf->channels[chan].medium
3777 >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
3778 && (intf->channels[chan].medium
3779 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
3780 requeue = handle_oem_get_msg_cmd(intf, msg);
3781 } else {
3782 /*
3783 * We don't handle the channel type, so just
3784 * free the message.
3785 */
3786 requeue = 0;
3787 }
3788 }
3789
3790 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3791 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
3792 /* It's an asyncronous event. */
3793 requeue = handle_read_event_rsp(intf, msg);
3794 } else {
3795 /* It's a response from the local BMC. */
3796 requeue = handle_bmc_rsp(intf, msg);
3797 }
3798
3799 out:
3800 return requeue;
3801 }
3802
3803 /*
3804 * If there are messages in the queue or pretimeouts, handle them.
3805 */
handle_new_recv_msgs(ipmi_smi_t intf)3806 static void handle_new_recv_msgs(ipmi_smi_t intf)
3807 {
3808 struct ipmi_smi_msg *smi_msg;
3809 unsigned long flags = 0;
3810 int rv;
3811 int run_to_completion = intf->run_to_completion;
3812
3813 /* See if any waiting messages need to be processed. */
3814 if (!run_to_completion)
3815 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3816 while (!list_empty(&intf->waiting_msgs)) {
3817 smi_msg = list_entry(intf->waiting_msgs.next,
3818 struct ipmi_smi_msg, link);
3819 list_del(&smi_msg->link);
3820 if (!run_to_completion)
3821 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3822 rv = handle_one_recv_msg(intf, smi_msg);
3823 if (!run_to_completion)
3824 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3825 if (rv == 0) {
3826 /* Message handled */
3827 ipmi_free_smi_msg(smi_msg);
3828 } else if (rv < 0) {
3829 /* Fatal error on the message, del but don't free. */
3830 } else {
3831 /*
3832 * To preserve message order, quit if we
3833 * can't handle a message.
3834 */
3835 list_add(&smi_msg->link, &intf->waiting_msgs);
3836 break;
3837 }
3838 }
3839 if (!run_to_completion)
3840 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3841
3842 /*
3843 * If the pretimout count is non-zero, decrement one from it and
3844 * deliver pretimeouts to all the users.
3845 */
3846 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
3847 ipmi_user_t user;
3848
3849 rcu_read_lock();
3850 list_for_each_entry_rcu(user, &intf->users, link) {
3851 if (user->handler->ipmi_watchdog_pretimeout)
3852 user->handler->ipmi_watchdog_pretimeout(
3853 user->handler_data);
3854 }
3855 rcu_read_unlock();
3856 }
3857 }
3858
smi_recv_tasklet(unsigned long val)3859 static void smi_recv_tasklet(unsigned long val)
3860 {
3861 handle_new_recv_msgs((ipmi_smi_t) val);
3862 }
3863
3864 /* Handle a new message from the lower layer. */
ipmi_smi_msg_received(ipmi_smi_t intf,struct ipmi_smi_msg * msg)3865 void ipmi_smi_msg_received(ipmi_smi_t intf,
3866 struct ipmi_smi_msg *msg)
3867 {
3868 unsigned long flags = 0; /* keep us warning-free. */
3869 int run_to_completion;
3870
3871
3872 if ((msg->data_size >= 2)
3873 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3874 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3875 && (msg->user_data == NULL)) {
3876 /*
3877 * This is the local response to a command send, start
3878 * the timer for these. The user_data will not be
3879 * NULL if this is a response send, and we will let
3880 * response sends just go through.
3881 */
3882
3883 /*
3884 * Check for errors, if we get certain errors (ones
3885 * that mean basically we can try again later), we
3886 * ignore them and start the timer. Otherwise we
3887 * report the error immediately.
3888 */
3889 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3890 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3891 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
3892 && (msg->rsp[2] != IPMI_BUS_ERR)
3893 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
3894 int chan = msg->rsp[3] & 0xf;
3895
3896 /* Got an error sending the message, handle it. */
3897 if (chan >= IPMI_MAX_CHANNELS)
3898 ; /* This shouldn't happen */
3899 else if ((intf->channels[chan].medium
3900 == IPMI_CHANNEL_MEDIUM_8023LAN)
3901 || (intf->channels[chan].medium
3902 == IPMI_CHANNEL_MEDIUM_ASYNC))
3903 ipmi_inc_stat(intf, sent_lan_command_errs);
3904 else
3905 ipmi_inc_stat(intf, sent_ipmb_command_errs);
3906 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3907 } else
3908 /* The message was sent, start the timer. */
3909 intf_start_seq_timer(intf, msg->msgid);
3910
3911 ipmi_free_smi_msg(msg);
3912 goto out;
3913 }
3914
3915 /*
3916 * To preserve message order, if the list is not empty, we
3917 * tack this message onto the end of the list.
3918 */
3919 run_to_completion = intf->run_to_completion;
3920 if (!run_to_completion)
3921 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3922 list_add_tail(&msg->link, &intf->waiting_msgs);
3923 if (!run_to_completion)
3924 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3925
3926 tasklet_schedule(&intf->recv_tasklet);
3927 out:
3928 return;
3929 }
3930 EXPORT_SYMBOL(ipmi_smi_msg_received);
3931
ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)3932 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3933 {
3934 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
3935 tasklet_schedule(&intf->recv_tasklet);
3936 }
3937 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3938
3939 static struct ipmi_smi_msg *
smi_from_recv_msg(ipmi_smi_t intf,struct ipmi_recv_msg * recv_msg,unsigned char seq,long seqid)3940 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3941 unsigned char seq, long seqid)
3942 {
3943 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3944 if (!smi_msg)
3945 /*
3946 * If we can't allocate the message, then just return, we
3947 * get 4 retries, so this should be ok.
3948 */
3949 return NULL;
3950
3951 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3952 smi_msg->data_size = recv_msg->msg.data_len;
3953 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3954
3955 #ifdef DEBUG_MSGING
3956 {
3957 int m;
3958 printk("Resend: ");
3959 for (m = 0; m < smi_msg->data_size; m++)
3960 printk(" %2.2x", smi_msg->data[m]);
3961 printk("\n");
3962 }
3963 #endif
3964 return smi_msg;
3965 }
3966
check_msg_timeout(ipmi_smi_t intf,struct seq_table * ent,struct list_head * timeouts,long timeout_period,int slot,unsigned long * flags)3967 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3968 struct list_head *timeouts, long timeout_period,
3969 int slot, unsigned long *flags)
3970 {
3971 struct ipmi_recv_msg *msg;
3972 struct ipmi_smi_handlers *handlers;
3973
3974 if (intf->intf_num == -1)
3975 return;
3976
3977 if (!ent->inuse)
3978 return;
3979
3980 ent->timeout -= timeout_period;
3981 if (ent->timeout > 0)
3982 return;
3983
3984 if (ent->retries_left == 0) {
3985 /* The message has used all its retries. */
3986 ent->inuse = 0;
3987 msg = ent->recv_msg;
3988 list_add_tail(&msg->link, timeouts);
3989 if (ent->broadcast)
3990 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
3991 else if (is_lan_addr(&ent->recv_msg->addr))
3992 ipmi_inc_stat(intf, timed_out_lan_commands);
3993 else
3994 ipmi_inc_stat(intf, timed_out_ipmb_commands);
3995 } else {
3996 struct ipmi_smi_msg *smi_msg;
3997 /* More retries, send again. */
3998
3999 /*
4000 * Start with the max timer, set to normal timer after
4001 * the message is sent.
4002 */
4003 ent->timeout = MAX_MSG_TIMEOUT;
4004 ent->retries_left--;
4005 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4006 ent->seqid);
4007 if (!smi_msg) {
4008 if (is_lan_addr(&ent->recv_msg->addr))
4009 ipmi_inc_stat(intf,
4010 dropped_rexmit_lan_commands);
4011 else
4012 ipmi_inc_stat(intf,
4013 dropped_rexmit_ipmb_commands);
4014 return;
4015 }
4016
4017 spin_unlock_irqrestore(&intf->seq_lock, *flags);
4018
4019 /*
4020 * Send the new message. We send with a zero
4021 * priority. It timed out, I doubt time is that
4022 * critical now, and high priority messages are really
4023 * only for messages to the local MC, which don't get
4024 * resent.
4025 */
4026 handlers = intf->handlers;
4027 if (handlers) {
4028 if (is_lan_addr(&ent->recv_msg->addr))
4029 ipmi_inc_stat(intf,
4030 retransmitted_lan_commands);
4031 else
4032 ipmi_inc_stat(intf,
4033 retransmitted_ipmb_commands);
4034
4035 intf->handlers->sender(intf->send_info,
4036 smi_msg, 0);
4037 } else
4038 ipmi_free_smi_msg(smi_msg);
4039
4040 spin_lock_irqsave(&intf->seq_lock, *flags);
4041 }
4042 }
4043
ipmi_timeout_handler(long timeout_period)4044 static void ipmi_timeout_handler(long timeout_period)
4045 {
4046 ipmi_smi_t intf;
4047 struct list_head timeouts;
4048 struct ipmi_recv_msg *msg, *msg2;
4049 unsigned long flags;
4050 int i;
4051
4052 rcu_read_lock();
4053 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4054 tasklet_schedule(&intf->recv_tasklet);
4055
4056 /*
4057 * Go through the seq table and find any messages that
4058 * have timed out, putting them in the timeouts
4059 * list.
4060 */
4061 INIT_LIST_HEAD(&timeouts);
4062 spin_lock_irqsave(&intf->seq_lock, flags);
4063 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4064 check_msg_timeout(intf, &(intf->seq_table[i]),
4065 &timeouts, timeout_period, i,
4066 &flags);
4067 spin_unlock_irqrestore(&intf->seq_lock, flags);
4068
4069 list_for_each_entry_safe(msg, msg2, &timeouts, link)
4070 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
4071
4072 /*
4073 * Maintenance mode handling. Check the timeout
4074 * optimistically before we claim the lock. It may
4075 * mean a timeout gets missed occasionally, but that
4076 * only means the timeout gets extended by one period
4077 * in that case. No big deal, and it avoids the lock
4078 * most of the time.
4079 */
4080 if (intf->auto_maintenance_timeout > 0) {
4081 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4082 if (intf->auto_maintenance_timeout > 0) {
4083 intf->auto_maintenance_timeout
4084 -= timeout_period;
4085 if (!intf->maintenance_mode
4086 && (intf->auto_maintenance_timeout <= 0)) {
4087 intf->maintenance_mode_enable = 0;
4088 maintenance_mode_update(intf);
4089 }
4090 }
4091 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4092 flags);
4093 }
4094 }
4095 rcu_read_unlock();
4096 }
4097
ipmi_request_event(void)4098 static void ipmi_request_event(void)
4099 {
4100 ipmi_smi_t intf;
4101 struct ipmi_smi_handlers *handlers;
4102
4103 rcu_read_lock();
4104 /*
4105 * Called from the timer, no need to check if handlers is
4106 * valid.
4107 */
4108 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4109 /* No event requests when in maintenance mode. */
4110 if (intf->maintenance_mode_enable)
4111 continue;
4112
4113 handlers = intf->handlers;
4114 if (handlers)
4115 handlers->request_events(intf->send_info);
4116 }
4117 rcu_read_unlock();
4118 }
4119
4120 static struct timer_list ipmi_timer;
4121
4122 /* Call every ~1000 ms. */
4123 #define IPMI_TIMEOUT_TIME 1000
4124
4125 /* How many jiffies does it take to get to the timeout time. */
4126 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
4127
4128 /*
4129 * Request events from the queue every second (this is the number of
4130 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
4131 * future, IPMI will add a way to know immediately if an event is in
4132 * the queue and this silliness can go away.
4133 */
4134 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
4135
4136 static atomic_t stop_operation;
4137 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4138
ipmi_timeout(unsigned long data)4139 static void ipmi_timeout(unsigned long data)
4140 {
4141 if (atomic_read(&stop_operation))
4142 return;
4143
4144 ticks_to_req_ev--;
4145 if (ticks_to_req_ev == 0) {
4146 ipmi_request_event();
4147 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4148 }
4149
4150 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
4151
4152 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4153 }
4154
4155
4156 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4157 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4158
4159 /* FIXME - convert these to slabs. */
free_smi_msg(struct ipmi_smi_msg * msg)4160 static void free_smi_msg(struct ipmi_smi_msg *msg)
4161 {
4162 atomic_dec(&smi_msg_inuse_count);
4163 kfree(msg);
4164 }
4165
ipmi_alloc_smi_msg(void)4166 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
4167 {
4168 struct ipmi_smi_msg *rv;
4169 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
4170 if (rv) {
4171 rv->done = free_smi_msg;
4172 rv->user_data = NULL;
4173 atomic_inc(&smi_msg_inuse_count);
4174 }
4175 return rv;
4176 }
4177 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4178
free_recv_msg(struct ipmi_recv_msg * msg)4179 static void free_recv_msg(struct ipmi_recv_msg *msg)
4180 {
4181 atomic_dec(&recv_msg_inuse_count);
4182 kfree(msg);
4183 }
4184
ipmi_alloc_recv_msg(void)4185 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
4186 {
4187 struct ipmi_recv_msg *rv;
4188
4189 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
4190 if (rv) {
4191 rv->user = NULL;
4192 rv->done = free_recv_msg;
4193 atomic_inc(&recv_msg_inuse_count);
4194 }
4195 return rv;
4196 }
4197
ipmi_free_recv_msg(struct ipmi_recv_msg * msg)4198 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
4199 {
4200 if (msg->user)
4201 kref_put(&msg->user->refcount, free_user);
4202 msg->done(msg);
4203 }
4204 EXPORT_SYMBOL(ipmi_free_recv_msg);
4205
4206 #ifdef CONFIG_IPMI_PANIC_EVENT
4207
4208 static atomic_t panic_done_count = ATOMIC_INIT(0);
4209
dummy_smi_done_handler(struct ipmi_smi_msg * msg)4210 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4211 {
4212 atomic_dec(&panic_done_count);
4213 }
4214
dummy_recv_done_handler(struct ipmi_recv_msg * msg)4215 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4216 {
4217 atomic_dec(&panic_done_count);
4218 }
4219
4220 /*
4221 * Inside a panic, send a message and wait for a response.
4222 */
ipmi_panic_request_and_wait(ipmi_smi_t intf,struct ipmi_addr * addr,struct kernel_ipmi_msg * msg)4223 static void ipmi_panic_request_and_wait(ipmi_smi_t intf,
4224 struct ipmi_addr *addr,
4225 struct kernel_ipmi_msg *msg)
4226 {
4227 struct ipmi_smi_msg smi_msg;
4228 struct ipmi_recv_msg recv_msg;
4229 int rv;
4230
4231 smi_msg.done = dummy_smi_done_handler;
4232 recv_msg.done = dummy_recv_done_handler;
4233 atomic_add(2, &panic_done_count);
4234 rv = i_ipmi_request(NULL,
4235 intf,
4236 addr,
4237 0,
4238 msg,
4239 intf,
4240 &smi_msg,
4241 &recv_msg,
4242 0,
4243 intf->channels[0].address,
4244 intf->channels[0].lun,
4245 0, 1); /* Don't retry, and don't wait. */
4246 if (rv)
4247 atomic_sub(2, &panic_done_count);
4248 while (atomic_read(&panic_done_count) != 0)
4249 ipmi_poll(intf);
4250 }
4251
4252 #ifdef CONFIG_IPMI_PANIC_STRING
event_receiver_fetcher(ipmi_smi_t intf,struct ipmi_recv_msg * msg)4253 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
4254 {
4255 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4256 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
4257 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
4258 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4259 /* A get event receiver command, save it. */
4260 intf->event_receiver = msg->msg.data[1];
4261 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
4262 }
4263 }
4264
device_id_fetcher(ipmi_smi_t intf,struct ipmi_recv_msg * msg)4265 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
4266 {
4267 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4268 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
4269 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
4270 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4271 /*
4272 * A get device id command, save if we are an event
4273 * receiver or generator.
4274 */
4275 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
4276 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
4277 }
4278 }
4279 #endif
4280
send_panic_events(char * str)4281 static void send_panic_events(char *str)
4282 {
4283 struct kernel_ipmi_msg msg;
4284 ipmi_smi_t intf;
4285 unsigned char data[16];
4286 struct ipmi_system_interface_addr *si;
4287 struct ipmi_addr addr;
4288
4289 si = (struct ipmi_system_interface_addr *) &addr;
4290 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4291 si->channel = IPMI_BMC_CHANNEL;
4292 si->lun = 0;
4293
4294 /* Fill in an event telling that we have failed. */
4295 msg.netfn = 0x04; /* Sensor or Event. */
4296 msg.cmd = 2; /* Platform event command. */
4297 msg.data = data;
4298 msg.data_len = 8;
4299 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
4300 data[1] = 0x03; /* This is for IPMI 1.0. */
4301 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4302 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4303 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4304
4305 /*
4306 * Put a few breadcrumbs in. Hopefully later we can add more things
4307 * to make the panic events more useful.
4308 */
4309 if (str) {
4310 data[3] = str[0];
4311 data[6] = str[1];
4312 data[7] = str[2];
4313 }
4314
4315 /* For every registered interface, send the event. */
4316 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4317 if (!intf->handlers)
4318 /* Interface is not ready. */
4319 continue;
4320
4321 intf->run_to_completion = 1;
4322 /* Send the event announcing the panic. */
4323 intf->handlers->set_run_to_completion(intf->send_info, 1);
4324 ipmi_panic_request_and_wait(intf, &addr, &msg);
4325 }
4326
4327 #ifdef CONFIG_IPMI_PANIC_STRING
4328 /*
4329 * On every interface, dump a bunch of OEM event holding the
4330 * string.
4331 */
4332 if (!str)
4333 return;
4334
4335 /* For every registered interface, send the event. */
4336 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4337 char *p = str;
4338 struct ipmi_ipmb_addr *ipmb;
4339 int j;
4340
4341 if (intf->intf_num == -1)
4342 /* Interface was not ready yet. */
4343 continue;
4344
4345 /*
4346 * intf_num is used as an marker to tell if the
4347 * interface is valid. Thus we need a read barrier to
4348 * make sure data fetched before checking intf_num
4349 * won't be used.
4350 */
4351 smp_rmb();
4352
4353 /*
4354 * First job here is to figure out where to send the
4355 * OEM events. There's no way in IPMI to send OEM
4356 * events using an event send command, so we have to
4357 * find the SEL to put them in and stick them in
4358 * there.
4359 */
4360
4361 /* Get capabilities from the get device id. */
4362 intf->local_sel_device = 0;
4363 intf->local_event_generator = 0;
4364 intf->event_receiver = 0;
4365
4366 /* Request the device info from the local MC. */
4367 msg.netfn = IPMI_NETFN_APP_REQUEST;
4368 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
4369 msg.data = NULL;
4370 msg.data_len = 0;
4371 intf->null_user_handler = device_id_fetcher;
4372 ipmi_panic_request_and_wait(intf, &addr, &msg);
4373
4374 if (intf->local_event_generator) {
4375 /* Request the event receiver from the local MC. */
4376 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
4377 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
4378 msg.data = NULL;
4379 msg.data_len = 0;
4380 intf->null_user_handler = event_receiver_fetcher;
4381 ipmi_panic_request_and_wait(intf, &addr, &msg);
4382 }
4383 intf->null_user_handler = NULL;
4384
4385 /*
4386 * Validate the event receiver. The low bit must not
4387 * be 1 (it must be a valid IPMB address), it cannot
4388 * be zero, and it must not be my address.
4389 */
4390 if (((intf->event_receiver & 1) == 0)
4391 && (intf->event_receiver != 0)
4392 && (intf->event_receiver != intf->channels[0].address)) {
4393 /*
4394 * The event receiver is valid, send an IPMB
4395 * message.
4396 */
4397 ipmb = (struct ipmi_ipmb_addr *) &addr;
4398 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
4399 ipmb->channel = 0; /* FIXME - is this right? */
4400 ipmb->lun = intf->event_receiver_lun;
4401 ipmb->slave_addr = intf->event_receiver;
4402 } else if (intf->local_sel_device) {
4403 /*
4404 * The event receiver was not valid (or was
4405 * me), but I am an SEL device, just dump it
4406 * in my SEL.
4407 */
4408 si = (struct ipmi_system_interface_addr *) &addr;
4409 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4410 si->channel = IPMI_BMC_CHANNEL;
4411 si->lun = 0;
4412 } else
4413 continue; /* No where to send the event. */
4414
4415 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
4416 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
4417 msg.data = data;
4418 msg.data_len = 16;
4419
4420 j = 0;
4421 while (*p) {
4422 int size = strlen(p);
4423
4424 if (size > 11)
4425 size = 11;
4426 data[0] = 0;
4427 data[1] = 0;
4428 data[2] = 0xf0; /* OEM event without timestamp. */
4429 data[3] = intf->channels[0].address;
4430 data[4] = j++; /* sequence # */
4431 /*
4432 * Always give 11 bytes, so strncpy will fill
4433 * it with zeroes for me.
4434 */
4435 strncpy(data+5, p, 11);
4436 p += size;
4437
4438 ipmi_panic_request_and_wait(intf, &addr, &msg);
4439 }
4440 }
4441 #endif /* CONFIG_IPMI_PANIC_STRING */
4442 }
4443 #endif /* CONFIG_IPMI_PANIC_EVENT */
4444
4445 static int has_panicked;
4446
panic_event(struct notifier_block * this,unsigned long event,void * ptr)4447 static int panic_event(struct notifier_block *this,
4448 unsigned long event,
4449 void *ptr)
4450 {
4451 ipmi_smi_t intf;
4452
4453 if (has_panicked)
4454 return NOTIFY_DONE;
4455 has_panicked = 1;
4456
4457 /* For every registered interface, set it to run to completion. */
4458 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4459 if (!intf->handlers)
4460 /* Interface is not ready. */
4461 continue;
4462
4463 intf->run_to_completion = 1;
4464 intf->handlers->set_run_to_completion(intf->send_info, 1);
4465 }
4466
4467 #ifdef CONFIG_IPMI_PANIC_EVENT
4468 send_panic_events(ptr);
4469 #endif
4470
4471 return NOTIFY_DONE;
4472 }
4473
4474 static struct notifier_block panic_block = {
4475 .notifier_call = panic_event,
4476 .next = NULL,
4477 .priority = 200 /* priority: INT_MAX >= x >= 0 */
4478 };
4479
ipmi_init_msghandler(void)4480 static int ipmi_init_msghandler(void)
4481 {
4482 int rv;
4483
4484 if (initialized)
4485 return 0;
4486
4487 rv = driver_register(&ipmidriver.driver);
4488 if (rv) {
4489 printk(KERN_ERR PFX "Could not register IPMI driver\n");
4490 return rv;
4491 }
4492
4493 printk(KERN_INFO "ipmi message handler version "
4494 IPMI_DRIVER_VERSION "\n");
4495
4496 #ifdef CONFIG_PROC_FS
4497 proc_ipmi_root = proc_mkdir("ipmi", NULL);
4498 if (!proc_ipmi_root) {
4499 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
4500 return -ENOMEM;
4501 }
4502
4503 #endif /* CONFIG_PROC_FS */
4504
4505 setup_timer(&ipmi_timer, ipmi_timeout, 0);
4506 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4507
4508 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
4509
4510 initialized = 1;
4511
4512 return 0;
4513 }
4514
ipmi_init_msghandler_mod(void)4515 static int __init ipmi_init_msghandler_mod(void)
4516 {
4517 ipmi_init_msghandler();
4518 return 0;
4519 }
4520
cleanup_ipmi(void)4521 static void __exit cleanup_ipmi(void)
4522 {
4523 int count;
4524
4525 if (!initialized)
4526 return;
4527
4528 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
4529
4530 /*
4531 * This can't be called if any interfaces exist, so no worry
4532 * about shutting down the interfaces.
4533 */
4534
4535 /*
4536 * Tell the timer to stop, then wait for it to stop. This
4537 * avoids problems with race conditions removing the timer
4538 * here.
4539 */
4540 atomic_inc(&stop_operation);
4541 del_timer_sync(&ipmi_timer);
4542
4543 #ifdef CONFIG_PROC_FS
4544 remove_proc_entry(proc_ipmi_root->name, NULL);
4545 #endif /* CONFIG_PROC_FS */
4546
4547 driver_unregister(&ipmidriver.driver);
4548
4549 initialized = 0;
4550
4551 /* Check for buffer leaks. */
4552 count = atomic_read(&smi_msg_inuse_count);
4553 if (count != 0)
4554 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
4555 count);
4556 count = atomic_read(&recv_msg_inuse_count);
4557 if (count != 0)
4558 printk(KERN_WARNING PFX "recv message count %d at exit\n",
4559 count);
4560 }
4561 module_exit(cleanup_ipmi);
4562
4563 module_init(ipmi_init_msghandler_mod);
4564 MODULE_LICENSE("GPL");
4565 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
4566 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
4567 " interface.");
4568 MODULE_VERSION(IPMI_DRIVER_VERSION);
4569