1 /*
2 * ipmi_msghandler.c
3 *
4 * Incoming and outgoing message routing for an IPMI interface.
5 *
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
9 *
10 * Copyright 2002 MontaVista Software Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
34 #include <linux/config.h>
35 #include <linux/module.h>
36 #include <linux/errno.h>
37 #include <asm/system.h>
38 #include <linux/sched.h>
39 #include <linux/poll.h>
40 #include <linux/spinlock.h>
41 #include <linux/rwsem.h>
42 #include <linux/slab.h>
43 #include <linux/ipmi.h>
44 #include <linux/ipmi_smi.h>
45 #include <linux/notifier.h>
46 #include <linux/init.h>
47
48 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
49 static int ipmi_init_msghandler(void);
50
51 static int initialized = 0;
52
53 #define MAX_EVENTS_IN_QUEUE 25
54
55 /* Don't let a message sit in a queue forever, always time it with at lest
56 the max message timer. */
57 #define MAX_MSG_TIMEOUT 60000
58
59 struct ipmi_user
60 {
61 struct list_head link;
62
63 /* The upper layer that handles receive messages. */
64 struct ipmi_user_hndl *handler;
65 void *handler_data;
66
67 /* The interface this user is bound to. */
68 ipmi_smi_t intf;
69
70 /* Does this interface receive IPMI events? */
71 int gets_events;
72 };
73
74 struct cmd_rcvr
75 {
76 struct list_head link;
77
78 ipmi_user_t user;
79 unsigned char netfn;
80 unsigned char cmd;
81 };
82
83 struct seq_table
84 {
85 int inuse : 1;
86
87 unsigned long timeout;
88 unsigned long orig_timeout;
89 unsigned int retries_left;
90
91 /* To verify on an incoming send message response that this is
92 the message that the response is for, we keep a sequence id
93 and increment it every time we send a message. */
94 long seqid;
95
96 /* This is held so we can properly respond to the message on a
97 timeout, and it is used to hold the temporary data for
98 retransmission, too. */
99 struct ipmi_recv_msg *recv_msg;
100 };
101
102 /* Store the information in a msgid (long) to allow us to find a
103 sequence table entry from the msgid. */
104 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
105
106 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
107 do { \
108 seq = ((msgid >> 26) & 0x3f); \
109 seqid = (msgid & 0x3fffff); \
110 } while(0)
111
112 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
113
114
115 #define IPMI_IPMB_NUM_SEQ 64
116 struct ipmi_smi
117 {
118 /* The list of upper layers that are using me. We read-lock
119 this when delivering messages to the upper layer to keep
120 the user from going away while we are processing the
121 message. This means that you cannot add or delete a user
122 from the receive callback. */
123 rwlock_t users_lock;
124 struct list_head users;
125
126 /* The IPMI version of the BMC on the other end. */
127 unsigned char version_major;
128 unsigned char version_minor;
129
130 /* This is the lower-layer's sender routine. */
131 struct ipmi_smi_handlers *handlers;
132 void *send_info;
133
134 /* A table of sequence numbers for this interface. We use the
135 sequence numbers for IPMB messages that go out of the
136 interface to match them up with their responses. A routine
137 is called periodically to time the items in this list. */
138 spinlock_t seq_lock;
139 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
140 int curr_seq;
141
142 /* Messages that were delayed for some reason (out of memory,
143 for instance), will go in here to be processed later in a
144 periodic timer interrupt. */
145 spinlock_t waiting_msgs_lock;
146 struct list_head waiting_msgs;
147
148 /* The list of command receivers that are registered for commands
149 on this interface. */
150 rwlock_t cmd_rcvr_lock;
151 struct list_head cmd_rcvrs;
152
153 /* Events that were queues because no one was there to receive
154 them. */
155 spinlock_t events_lock; /* For dealing with event stuff. */
156 struct list_head waiting_events;
157 unsigned int waiting_events_count; /* How many events in queue? */
158
159 /* This will be non-null if someone registers to receive all
160 IPMI commands (this is for interface emulation). There
161 may not be any things in the cmd_rcvrs list above when
162 this is registered. */
163 ipmi_user_t all_cmd_rcvr;
164
165 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
166 but may be changed by the user. */
167 unsigned char my_address;
168
169 /* My LUN. This should generally stay the SMS LUN, but just in
170 case... */
171 unsigned char my_lun;
172 };
173
174 int
ipmi_register_all_cmd_rcvr(ipmi_user_t user)175 ipmi_register_all_cmd_rcvr(ipmi_user_t user)
176 {
177 unsigned long flags;
178 int rv = -EBUSY;
179
180 write_lock_irqsave(&(user->intf->users_lock), flags);
181 write_lock(&(user->intf->cmd_rcvr_lock));
182 if ((user->intf->all_cmd_rcvr == NULL)
183 && (list_empty(&(user->intf->cmd_rcvrs))))
184 {
185 user->intf->all_cmd_rcvr = user;
186 rv = 0;
187 }
188 write_unlock(&(user->intf->cmd_rcvr_lock));
189 write_unlock_irqrestore(&(user->intf->users_lock), flags);
190 return rv;
191 }
192
193 int
ipmi_unregister_all_cmd_rcvr(ipmi_user_t user)194 ipmi_unregister_all_cmd_rcvr(ipmi_user_t user)
195 {
196 unsigned long flags;
197 int rv = -EINVAL;
198
199 write_lock_irqsave(&(user->intf->users_lock), flags);
200 write_lock(&(user->intf->cmd_rcvr_lock));
201 if (user->intf->all_cmd_rcvr == user)
202 {
203 user->intf->all_cmd_rcvr = NULL;
204 rv = 0;
205 }
206 write_unlock(&(user->intf->cmd_rcvr_lock));
207 write_unlock_irqrestore(&(user->intf->users_lock), flags);
208 return rv;
209 }
210
211
212 #define MAX_IPMI_INTERFACES 4
213 static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
214
215 /* Used to keep interfaces from going away while operations are
216 operating on interfaces. Grab read if you are not modifying the
217 interfaces, write if you are. */
218 static DECLARE_RWSEM(interfaces_sem);
219
220 /* Directly protects the ipmi_interfaces data structure. This is
221 claimed in the timer interrupt. */
222 static spinlock_t interfaces_lock = SPIN_LOCK_UNLOCKED;
223
224 /* List of watchers that want to know when smi's are added and
225 deleted. */
226 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
227 static DECLARE_RWSEM(smi_watchers_sem);
228
ipmi_smi_watcher_register(struct ipmi_smi_watcher * watcher)229 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
230 {
231 int i;
232
233 down_read(&interfaces_sem);
234 down_write(&smi_watchers_sem);
235 list_add(&(watcher->link), &smi_watchers);
236 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
237 if (ipmi_interfaces[i] != NULL) {
238 watcher->new_smi(i);
239 }
240 }
241 up_write(&smi_watchers_sem);
242 up_read(&interfaces_sem);
243 return 0;
244 }
245
ipmi_smi_watcher_unregister(struct ipmi_smi_watcher * watcher)246 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
247 {
248 down_write(&smi_watchers_sem);
249 list_del(&(watcher->link));
250 up_write(&smi_watchers_sem);
251 return 0;
252 }
253
254 int
ipmi_addr_equal(struct ipmi_addr * addr1,struct ipmi_addr * addr2)255 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
256 {
257 if (addr1->addr_type != addr2->addr_type)
258 return 0;
259
260 if (addr1->channel != addr2->channel)
261 return 0;
262
263 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
264 struct ipmi_system_interface_addr *smi_addr1
265 = (struct ipmi_system_interface_addr *) addr1;
266 struct ipmi_system_interface_addr *smi_addr2
267 = (struct ipmi_system_interface_addr *) addr2;
268 return (smi_addr1->lun == smi_addr2->lun);
269 }
270
271 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
272 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
273 {
274 struct ipmi_ipmb_addr *ipmb_addr1
275 = (struct ipmi_ipmb_addr *) addr1;
276 struct ipmi_ipmb_addr *ipmb_addr2
277 = (struct ipmi_ipmb_addr *) addr2;
278
279 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
280 && (ipmb_addr1->lun == ipmb_addr2->lun));
281 }
282
283 return 1;
284 }
285
ipmi_validate_addr(struct ipmi_addr * addr,int len)286 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
287 {
288 if (len < sizeof(struct ipmi_system_interface_addr)) {
289 return -EINVAL;
290 }
291
292 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
293 if (addr->channel != IPMI_BMC_CHANNEL)
294 return -EINVAL;
295 return 0;
296 }
297
298 if ((addr->channel == IPMI_BMC_CHANNEL)
299 || (addr->channel >= IPMI_NUM_CHANNELS)
300 || (addr->channel < 0))
301 return -EINVAL;
302
303 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
304 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
305 {
306 if (len < sizeof(struct ipmi_ipmb_addr)) {
307 return -EINVAL;
308 }
309 return 0;
310 }
311
312 return -EINVAL;
313 }
314
ipmi_addr_length(int addr_type)315 unsigned int ipmi_addr_length(int addr_type)
316 {
317 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
318 return sizeof(struct ipmi_system_interface_addr);
319
320 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
321 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
322 {
323 return sizeof(struct ipmi_ipmb_addr);
324 }
325
326 return 0;
327 }
328
deliver_response(struct ipmi_recv_msg * msg)329 static void deliver_response(struct ipmi_recv_msg *msg)
330 {
331 msg->user->handler->ipmi_recv_hndl(msg, msg->user->handler_data);
332 }
333
334 /* Find the next sequence number not being used and add the given
335 message with the given timeout to the sequence table. This must be
336 called with the interface's seq_lock held. */
intf_next_seq(ipmi_smi_t intf,struct ipmi_recv_msg * recv_msg,unsigned long timeout,int retries,unsigned char * seq,long * seqid)337 static int intf_next_seq(ipmi_smi_t intf,
338 struct ipmi_recv_msg *recv_msg,
339 unsigned long timeout,
340 int retries,
341 unsigned char *seq,
342 long *seqid)
343 {
344 int rv = 0;
345 unsigned int i;
346
347 for (i=intf->curr_seq;
348 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
349 i=(i+1)%IPMI_IPMB_NUM_SEQ)
350 {
351 if (! intf->seq_table[i].inuse)
352 break;
353 }
354
355 if (! intf->seq_table[i].inuse) {
356 intf->seq_table[i].recv_msg = recv_msg;
357
358 /* Start with the maximum timeout, when the send response
359 comes in we will start the real timer. */
360 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
361 intf->seq_table[i].orig_timeout = timeout;
362 intf->seq_table[i].retries_left = retries;
363 intf->seq_table[i].inuse = 1;
364 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
365 *seq = i;
366 *seqid = intf->seq_table[i].seqid;
367 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
368 } else {
369 rv = -EAGAIN;
370 }
371
372 return rv;
373 }
374
375 /* Return the receive message for the given sequence number and
376 release the sequence number so it can be reused. Some other data
377 is passed in to be sure the message matches up correctly (to help
378 guard against message coming in after their timeout and the
379 sequence number being reused). */
intf_find_seq(ipmi_smi_t intf,unsigned char seq,short channel,unsigned char cmd,unsigned char netfn,struct ipmi_addr * addr,struct ipmi_recv_msg ** recv_msg)380 static int intf_find_seq(ipmi_smi_t intf,
381 unsigned char seq,
382 short channel,
383 unsigned char cmd,
384 unsigned char netfn,
385 struct ipmi_addr *addr,
386 struct ipmi_recv_msg **recv_msg)
387 {
388 int rv = -ENODEV;
389 unsigned long flags;
390
391 if (seq >= IPMI_IPMB_NUM_SEQ)
392 return -EINVAL;
393
394 spin_lock_irqsave(&(intf->seq_lock), flags);
395 if (intf->seq_table[seq].inuse) {
396 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
397
398 if ((msg->addr.channel == channel)
399 && (msg->msg.cmd == cmd)
400 && (msg->msg.netfn == netfn)
401 && (ipmi_addr_equal(addr, &(msg->addr))))
402 {
403 *recv_msg = msg;
404 intf->seq_table[seq].inuse = 0;
405 rv = 0;
406 }
407 }
408 spin_unlock_irqrestore(&(intf->seq_lock), flags);
409
410 return rv;
411 }
412
413
414 /* Start the timer for a specific sequence table entry. */
intf_start_seq_timer(ipmi_smi_t intf,long msgid)415 static int intf_start_seq_timer(ipmi_smi_t intf,
416 long msgid)
417 {
418 int rv = -ENODEV;
419 unsigned long flags;
420 unsigned char seq;
421 unsigned long seqid;
422
423
424 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
425
426 spin_lock_irqsave(&(intf->seq_lock), flags);
427 /* We do this verification because the user can be deleted
428 while a message is outstanding. */
429 if ((intf->seq_table[seq].inuse)
430 && (intf->seq_table[seq].seqid == seqid))
431 {
432 struct seq_table *ent = &(intf->seq_table[seq]);
433 ent->timeout = ent->orig_timeout;
434 }
435 spin_unlock_irqrestore(&(intf->seq_lock), flags);
436
437 return rv;
438 }
439
440
ipmi_create_user(unsigned int if_num,struct ipmi_user_hndl * handler,void * handler_data,ipmi_user_t * user)441 int ipmi_create_user(unsigned int if_num,
442 struct ipmi_user_hndl *handler,
443 void *handler_data,
444 ipmi_user_t *user)
445 {
446 unsigned long flags;
447 ipmi_user_t new_user;
448 int rv = 0;
449
450 /* There is no module usecount here, because it's not
451 required. Since this can only be used by and called from
452 other modules, they will implicitly use this module, and
453 thus this can't be removed unless the other modules are
454 removed. */
455
456 if (handler == NULL)
457 return -EINVAL;
458
459 /* Make sure the driver is actually initialized, this handles
460 problems with initialization order. */
461 if (!initialized) {
462 rv = ipmi_init_msghandler();
463 if (rv)
464 return rv;
465
466 /* The init code doesn't return an error if it was turned
467 off, but it won't initialize. Check that. */
468 if (!initialized)
469 return -ENODEV;
470 }
471
472 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
473 if (! new_user)
474 return -ENOMEM;
475
476 down_read(&interfaces_sem);
477 if ((if_num > MAX_IPMI_INTERFACES) || ipmi_interfaces[if_num] == NULL)
478 {
479 rv = -EINVAL;
480 goto out_unlock;
481 }
482
483 new_user->handler = handler;
484 new_user->handler_data = handler_data;
485 new_user->intf = ipmi_interfaces[if_num];
486 new_user->gets_events = 0;
487
488 rv = new_user->intf->handlers->new_user(new_user->intf->send_info);
489 if (rv)
490 goto out_unlock;
491
492 write_lock_irqsave(&(new_user->intf->users_lock), flags);
493 list_add_tail(&(new_user->link), &(new_user->intf->users));
494 write_unlock_irqrestore(&(new_user->intf->users_lock), flags);
495
496 out_unlock:
497 if (rv) {
498 kfree(new_user);
499 } else {
500 *user = new_user;
501 }
502
503 up_read(&interfaces_sem);
504 return rv;
505 }
506
ipmi_destroy_user_nolock(ipmi_user_t user)507 static int ipmi_destroy_user_nolock(ipmi_user_t user)
508 {
509 int rv = -ENODEV;
510 ipmi_user_t t_user;
511 struct list_head *entry, *entry2;
512 int i;
513 unsigned long flags;
514
515 /* Find the user and delete them from the list. */
516 list_for_each(entry, &(user->intf->users)) {
517 t_user = list_entry(entry, struct ipmi_user, link);
518 if (t_user == user) {
519 list_del(entry);
520 rv = 0;
521 break;
522 }
523 }
524
525 if (rv) {
526 goto out_unlock;
527 }
528
529 /* Remove the user from the interfaces sequence table. */
530 spin_lock_irqsave(&(user->intf->seq_lock), flags);
531 for (i=0; i<IPMI_IPMB_NUM_SEQ; i++) {
532 if (user->intf->seq_table[i].inuse
533 && (user->intf->seq_table[i].recv_msg->user == user))
534 {
535 user->intf->seq_table[i].inuse = 0;
536 }
537 }
538 spin_unlock_irqrestore(&(user->intf->seq_lock), flags);
539
540 /* Remove the user from the command receiver's table. */
541 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
542 list_for_each_safe(entry, entry2, &(user->intf->cmd_rcvrs)) {
543 struct cmd_rcvr *rcvr;
544 rcvr = list_entry(entry, struct cmd_rcvr, link);
545 if (rcvr->user == user) {
546 list_del(entry);
547 kfree(rcvr);
548 }
549 }
550 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
551
552 kfree(user);
553
554 out_unlock:
555
556 return rv;
557 }
558
ipmi_destroy_user(ipmi_user_t user)559 int ipmi_destroy_user(ipmi_user_t user)
560 {
561 int rv;
562 ipmi_smi_t intf = user->intf;
563 unsigned long flags;
564
565 down_read(&interfaces_sem);
566 write_lock_irqsave(&(intf->users_lock), flags);
567 rv = ipmi_destroy_user_nolock(user);
568 if (!rv)
569 intf->handlers->user_left(intf->send_info);
570
571 write_unlock_irqrestore(&(intf->users_lock), flags);
572 up_read(&interfaces_sem);
573 return rv;
574 }
575
ipmi_get_version(ipmi_user_t user,unsigned char * major,unsigned char * minor)576 void ipmi_get_version(ipmi_user_t user,
577 unsigned char *major,
578 unsigned char *minor)
579 {
580 *major = user->intf->version_major;
581 *minor = user->intf->version_minor;
582 }
583
ipmi_set_my_address(ipmi_user_t user,unsigned char address)584 void ipmi_set_my_address(ipmi_user_t user,
585 unsigned char address)
586 {
587 user->intf->my_address = address;
588 }
589
ipmi_get_my_address(ipmi_user_t user)590 unsigned char ipmi_get_my_address(ipmi_user_t user)
591 {
592 return user->intf->my_address;
593 }
594
ipmi_set_my_LUN(ipmi_user_t user,unsigned char LUN)595 void ipmi_set_my_LUN(ipmi_user_t user,
596 unsigned char LUN)
597 {
598 user->intf->my_lun = LUN & 0x3;
599 }
600
ipmi_get_my_LUN(ipmi_user_t user)601 unsigned char ipmi_get_my_LUN(ipmi_user_t user)
602 {
603 return user->intf->my_lun;
604 }
605
ipmi_set_gets_events(ipmi_user_t user,int val)606 int ipmi_set_gets_events(ipmi_user_t user, int val)
607 {
608 unsigned long flags;
609 struct list_head *e, *e2;
610 struct ipmi_recv_msg *msg;
611
612 read_lock(&(user->intf->users_lock));
613 spin_lock_irqsave(&(user->intf->events_lock), flags);
614 user->gets_events = val;
615
616 if (val) {
617 /* Deliver any queued events. */
618 list_for_each_safe(e, e2, &(user->intf->waiting_events)) {
619 msg = list_entry(e, struct ipmi_recv_msg, link);
620 list_del(e);
621 msg->user = user;
622 deliver_response(msg);
623 }
624 }
625
626 spin_unlock_irqrestore(&(user->intf->events_lock), flags);
627 read_unlock(&(user->intf->users_lock));
628
629 return 0;
630 }
631
ipmi_register_for_cmd(ipmi_user_t user,unsigned char netfn,unsigned char cmd)632 int ipmi_register_for_cmd(ipmi_user_t user,
633 unsigned char netfn,
634 unsigned char cmd)
635 {
636 struct list_head *entry;
637 unsigned long flags;
638 struct cmd_rcvr *rcvr;
639 int rv = 0;
640
641
642 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
643 if (! rcvr)
644 return -ENOMEM;
645
646 read_lock(&(user->intf->users_lock));
647 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
648 if (user->intf->all_cmd_rcvr != NULL) {
649 rv = -EBUSY;
650 goto out_unlock;
651 }
652
653 /* Make sure the command/netfn is not already registered. */
654 list_for_each(entry, &(user->intf->cmd_rcvrs)) {
655 struct cmd_rcvr *cmp;
656 cmp = list_entry(entry, struct cmd_rcvr, link);
657 if ((cmp->netfn == netfn) && (cmp->cmd == cmd)) {
658 rv = -EBUSY;
659 break;
660 }
661 }
662
663 if (! rv) {
664 rcvr->cmd = cmd;
665 rcvr->netfn = netfn;
666 rcvr->user = user;
667 list_add_tail(&(rcvr->link), &(user->intf->cmd_rcvrs));
668 }
669 out_unlock:
670 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
671 read_unlock(&(user->intf->users_lock));
672
673 if (rv)
674 kfree(rcvr);
675
676 return rv;
677 }
678
ipmi_unregister_for_cmd(ipmi_user_t user,unsigned char netfn,unsigned char cmd)679 int ipmi_unregister_for_cmd(ipmi_user_t user,
680 unsigned char netfn,
681 unsigned char cmd)
682 {
683 struct list_head *entry;
684 unsigned long flags;
685 struct cmd_rcvr *rcvr;
686 int rv = -ENOENT;
687
688 read_lock(&(user->intf->users_lock));
689 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
690 /* Make sure the command/netfn is not already registered. */
691 list_for_each(entry, &(user->intf->cmd_rcvrs)) {
692 rcvr = list_entry(entry, struct cmd_rcvr, link);
693 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
694 rv = 0;
695 list_del(entry);
696 kfree(rcvr);
697 break;
698 }
699 }
700 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
701 read_unlock(&(user->intf->users_lock));
702
703 return rv;
704 }
705
706 static unsigned char
ipmb_checksum(unsigned char * data,int size)707 ipmb_checksum(unsigned char *data, int size)
708 {
709 unsigned char csum = 0;
710
711 for (; size > 0; size--, data++)
712 csum += *data;
713
714 return -csum;
715 }
716
format_ipmb_msg(struct ipmi_smi_msg * smi_msg,struct ipmi_msg * msg,struct ipmi_ipmb_addr * ipmb_addr,long msgid,unsigned char ipmb_seq,int broadcast,unsigned char source_address,unsigned char source_lun)717 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
718 struct ipmi_msg *msg,
719 struct ipmi_ipmb_addr *ipmb_addr,
720 long msgid,
721 unsigned char ipmb_seq,
722 int broadcast,
723 unsigned char source_address,
724 unsigned char source_lun)
725 {
726 int i = broadcast;
727
728 /* Format the IPMB header data. */
729 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
730 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
731 smi_msg->data[2] = ipmb_addr->channel;
732 if (broadcast)
733 smi_msg->data[3] = 0;
734 smi_msg->data[i+3] = ipmb_addr->slave_addr;
735 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
736 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
737 smi_msg->data[i+6] = source_address;
738 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
739 smi_msg->data[i+8] = msg->cmd;
740
741 /* Now tack on the data to the message. */
742 if (msg->data_len > 0)
743 memcpy(&(smi_msg->data[i+9]), msg->data,
744 msg->data_len);
745 smi_msg->data_size = msg->data_len + 9;
746
747 /* Now calculate the checksum and tack it on. */
748 smi_msg->data[i+smi_msg->data_size]
749 = ipmb_checksum(&(smi_msg->data[i+6]),
750 smi_msg->data_size-6);
751
752 /* Add on the checksum size and the offset from the
753 broadcast. */
754 smi_msg->data_size += 1 + i;
755
756 smi_msg->msgid = msgid;
757 }
758
759 /* Separate from ipmi_request so that the user does not have to be
760 supplied in certain circumstances (mainly at panic time). If
761 messages are supplied, they will be freed, even if an error
762 occurs. */
i_ipmi_request(ipmi_user_t user,ipmi_smi_t intf,struct ipmi_addr * addr,long msgid,struct ipmi_msg * msg,void * supplied_smi,struct ipmi_recv_msg * supplied_recv,int priority,unsigned char source_address,unsigned char source_lun)763 static inline int i_ipmi_request(ipmi_user_t user,
764 ipmi_smi_t intf,
765 struct ipmi_addr *addr,
766 long msgid,
767 struct ipmi_msg *msg,
768 void *supplied_smi,
769 struct ipmi_recv_msg *supplied_recv,
770 int priority,
771 unsigned char source_address,
772 unsigned char source_lun)
773 {
774 int rv = 0;
775 struct ipmi_smi_msg *smi_msg;
776 struct ipmi_recv_msg *recv_msg;
777 unsigned long flags;
778
779
780 if (supplied_recv) {
781 recv_msg = supplied_recv;
782 } else {
783 recv_msg = ipmi_alloc_recv_msg();
784 if (recv_msg == NULL) {
785 return -ENOMEM;
786 }
787 }
788
789 if (supplied_smi) {
790 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
791 } else {
792 smi_msg = ipmi_alloc_smi_msg();
793 if (smi_msg == NULL) {
794 ipmi_free_recv_msg(recv_msg);
795 return -ENOMEM;
796 }
797 }
798
799 if (addr->channel > IPMI_NUM_CHANNELS) {
800 rv = -EINVAL;
801 goto out_err;
802 }
803
804 recv_msg->user = user;
805 recv_msg->msgid = msgid;
806 /* Store the message to send in the receive message so timeout
807 responses can get the proper response data. */
808 recv_msg->msg = *msg;
809
810 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
811 struct ipmi_system_interface_addr *smi_addr;
812
813
814 smi_addr = (struct ipmi_system_interface_addr *) addr;
815 if (smi_addr->lun > 3)
816 return -EINVAL;
817
818 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
819
820 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
821 && ((msg->cmd == IPMI_SEND_MSG_CMD)
822 || (msg->cmd == IPMI_GET_MSG_CMD)
823 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
824 {
825 /* We don't let the user do these, since we manage
826 the sequence numbers. */
827 rv = -EINVAL;
828 goto out_err;
829 }
830
831 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
832 rv = -EMSGSIZE;
833 goto out_err;
834 }
835
836 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
837 smi_msg->data[1] = msg->cmd;
838 smi_msg->msgid = msgid;
839 smi_msg->user_data = recv_msg;
840 if (msg->data_len > 0)
841 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
842 smi_msg->data_size = msg->data_len + 2;
843 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
844 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
845 {
846 struct ipmi_ipmb_addr *ipmb_addr;
847 unsigned char ipmb_seq;
848 long seqid;
849 int broadcast;
850 int retries;
851
852 if (addr == NULL) {
853 rv = -EINVAL;
854 goto out_err;
855 }
856
857 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
858 /* Broadcasts add a zero at the beginning of the
859 message, but otherwise is the same as an IPMB
860 address. */
861 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
862 broadcast = 1;
863 retries = 0; /* Don't retry broadcasts. */
864 } else {
865 broadcast = 0;
866 retries = 4;
867 }
868
869 /* 9 for the header and 1 for the checksum, plus
870 possibly one for the broadcast. */
871 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
872 rv = -EMSGSIZE;
873 goto out_err;
874 }
875
876 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
877 if (ipmb_addr->lun > 3) {
878 rv = -EINVAL;
879 goto out_err;
880 }
881
882 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
883
884 if (recv_msg->msg.netfn & 0x1) {
885 /* It's a response, so use the user's sequence
886 from msgid. */
887 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
888 msgid, broadcast,
889 source_address, source_lun);
890 } else {
891 /* It's a command, so get a sequence for it. */
892
893 spin_lock_irqsave(&(intf->seq_lock), flags);
894
895 /* Create a sequence number with a 1 second
896 timeout and 4 retries. */
897 /* FIXME - magic number for the timeout. */
898 rv = intf_next_seq(intf,
899 recv_msg,
900 1000,
901 retries,
902 &ipmb_seq,
903 &seqid);
904 if (rv) {
905 /* We have used up all the sequence numbers,
906 probably, so abort. */
907 spin_unlock_irqrestore(&(intf->seq_lock),
908 flags);
909 goto out_err;
910 }
911
912 /* Store the sequence number in the message,
913 so that when the send message response
914 comes back we can start the timer. */
915 format_ipmb_msg(smi_msg, msg, ipmb_addr,
916 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
917 ipmb_seq, broadcast,
918 source_address, source_lun);
919
920 /* Copy the message into the recv message data, so we
921 can retransmit it later if necessary. */
922 memcpy(recv_msg->msg_data, smi_msg->data,
923 smi_msg->data_size);
924 recv_msg->msg.data = recv_msg->msg_data;
925 recv_msg->msg.data_len = smi_msg->data_size;
926
927 /* We don't unlock until here, because we need
928 to copy the completed message into the
929 recv_msg before we release the lock.
930 Otherwise, race conditions may bite us. I
931 know that's pretty paranoid, but I prefer
932 to be correct. */
933 spin_unlock_irqrestore(&(intf->seq_lock), flags);
934 }
935 } else {
936 /* Unknown address type. */
937 rv = -EINVAL;
938 goto out_err;
939 }
940
941 #if DEBUG_MSGING
942 {
943 int m;
944 for (m=0; m<smi_msg->data_size; m++)
945 printk(" %2.2x", smi_msg->data[m]);
946 printk("\n");
947 }
948 #endif
949 intf->handlers->sender(intf->send_info, smi_msg, priority);
950
951 return 0;
952
953 out_err:
954 ipmi_free_smi_msg(smi_msg);
955 ipmi_free_recv_msg(recv_msg);
956 return rv;
957 }
958
ipmi_request(ipmi_user_t user,struct ipmi_addr * addr,long msgid,struct ipmi_msg * msg,int priority)959 int ipmi_request(ipmi_user_t user,
960 struct ipmi_addr *addr,
961 long msgid,
962 struct ipmi_msg *msg,
963 int priority)
964 {
965 return i_ipmi_request(user,
966 user->intf,
967 addr,
968 msgid,
969 msg,
970 NULL, NULL,
971 priority,
972 user->intf->my_address,
973 user->intf->my_lun);
974 }
975
ipmi_request_supply_msgs(ipmi_user_t user,struct ipmi_addr * addr,long msgid,struct ipmi_msg * msg,void * supplied_smi,struct ipmi_recv_msg * supplied_recv,int priority)976 int ipmi_request_supply_msgs(ipmi_user_t user,
977 struct ipmi_addr *addr,
978 long msgid,
979 struct ipmi_msg *msg,
980 void *supplied_smi,
981 struct ipmi_recv_msg *supplied_recv,
982 int priority)
983 {
984 return i_ipmi_request(user,
985 user->intf,
986 addr,
987 msgid,
988 msg,
989 supplied_smi,
990 supplied_recv,
991 priority,
992 user->intf->my_address,
993 user->intf->my_lun);
994 }
995
ipmi_request_with_source(ipmi_user_t user,struct ipmi_addr * addr,long msgid,struct ipmi_msg * msg,int priority,unsigned char source_address,unsigned char source_lun)996 int ipmi_request_with_source(ipmi_user_t user,
997 struct ipmi_addr *addr,
998 long msgid,
999 struct ipmi_msg *msg,
1000 int priority,
1001 unsigned char source_address,
1002 unsigned char source_lun)
1003 {
1004 return i_ipmi_request(user,
1005 user->intf,
1006 addr,
1007 msgid,
1008 msg,
1009 NULL, NULL,
1010 priority,
1011 source_address,
1012 source_lun);
1013 }
1014
ipmi_register_smi(struct ipmi_smi_handlers * handlers,void * send_info,unsigned char version_major,unsigned char version_minor,ipmi_smi_t * intf)1015 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1016 void *send_info,
1017 unsigned char version_major,
1018 unsigned char version_minor,
1019 ipmi_smi_t *intf)
1020 {
1021 int i, j;
1022 int rv;
1023 ipmi_smi_t new_intf;
1024 struct list_head *entry;
1025 unsigned long flags;
1026
1027
1028 /* Make sure the driver is actually initialized, this handles
1029 problems with initialization order. */
1030 if (!initialized) {
1031 rv = ipmi_init_msghandler();
1032 if (rv)
1033 return rv;
1034 /* The init code doesn't return an error if it was turned
1035 off, but it won't initialize. Check that. */
1036 if (!initialized)
1037 return -ENODEV;
1038 }
1039
1040 new_intf = kmalloc(sizeof(*new_intf), GFP_KERNEL);
1041 if (!new_intf)
1042 return -ENOMEM;
1043
1044 rv = -ENOMEM;
1045
1046 down_write(&interfaces_sem);
1047 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
1048 if (ipmi_interfaces[i] == NULL) {
1049 new_intf->version_major = version_major;
1050 new_intf->version_minor = version_minor;
1051 new_intf->my_address = IPMI_BMC_SLAVE_ADDR;
1052 new_intf->my_lun = 2; /* the SMS LUN. */
1053 rwlock_init(&(new_intf->users_lock));
1054 INIT_LIST_HEAD(&(new_intf->users));
1055 new_intf->handlers = handlers;
1056 new_intf->send_info = send_info;
1057 spin_lock_init(&(new_intf->seq_lock));
1058 for (j=0; j<IPMI_IPMB_NUM_SEQ; j++) {
1059 new_intf->seq_table[j].inuse = 0;
1060 new_intf->seq_table[j].seqid = 0;
1061 }
1062 new_intf->curr_seq = 0;
1063 spin_lock_init(&(new_intf->waiting_msgs_lock));
1064 INIT_LIST_HEAD(&(new_intf->waiting_msgs));
1065 spin_lock_init(&(new_intf->events_lock));
1066 INIT_LIST_HEAD(&(new_intf->waiting_events));
1067 new_intf->waiting_events_count = 0;
1068 rwlock_init(&(new_intf->cmd_rcvr_lock));
1069 INIT_LIST_HEAD(&(new_intf->cmd_rcvrs));
1070 new_intf->all_cmd_rcvr = NULL;
1071
1072 spin_lock_irqsave(&interfaces_lock, flags);
1073 ipmi_interfaces[i] = new_intf;
1074 spin_unlock_irqrestore(&interfaces_lock, flags);
1075
1076 rv = 0;
1077 *intf = new_intf;
1078 break;
1079 }
1080 }
1081
1082 /* We convert to a read semaphore here. It's possible the
1083 interface was removed between the calls, we have to recheck
1084 afterwards. */
1085 up_write(&interfaces_sem);
1086 down_read(&interfaces_sem);
1087
1088 if (ipmi_interfaces[i] != new_intf)
1089 /* Well, it went away. Just return. */
1090 goto out;
1091
1092 if (rv == 0) {
1093 /* Call all the watcher interfaces to tell them that a
1094 new interface is available. */
1095 down_read(&smi_watchers_sem);
1096 list_for_each(entry, &smi_watchers) {
1097 struct ipmi_smi_watcher *w;
1098 w = list_entry(entry, struct ipmi_smi_watcher, link);
1099 w->new_smi(i);
1100 }
1101 up_read(&smi_watchers_sem);
1102 }
1103
1104 out:
1105 up_read(&interfaces_sem);
1106
1107 if (rv)
1108 kfree(new_intf);
1109
1110 return rv;
1111 }
1112
free_recv_msg_list(struct list_head * q)1113 static void free_recv_msg_list(struct list_head *q)
1114 {
1115 struct list_head *entry, *entry2;
1116 struct ipmi_recv_msg *msg;
1117
1118 list_for_each_safe(entry, entry2, q) {
1119 msg = list_entry(entry, struct ipmi_recv_msg, link);
1120 list_del(entry);
1121 ipmi_free_recv_msg(msg);
1122 }
1123 }
1124
free_cmd_rcvr_list(struct list_head * q)1125 static void free_cmd_rcvr_list(struct list_head *q)
1126 {
1127 struct list_head *entry, *entry2;
1128 struct cmd_rcvr *rcvr;
1129
1130 list_for_each_safe(entry, entry2, q) {
1131 rcvr = list_entry(entry, struct cmd_rcvr, link);
1132 list_del(entry);
1133 kfree(rcvr);
1134 }
1135 }
1136
clean_up_interface_data(ipmi_smi_t intf)1137 static void clean_up_interface_data(ipmi_smi_t intf)
1138 {
1139 int i;
1140
1141 free_recv_msg_list(&(intf->waiting_msgs));
1142 free_recv_msg_list(&(intf->waiting_events));
1143 free_cmd_rcvr_list(&(intf->cmd_rcvrs));
1144
1145 for (i=0; i<IPMI_IPMB_NUM_SEQ; i++) {
1146 if ((intf->seq_table[i].inuse)
1147 && (intf->seq_table[i].recv_msg))
1148 {
1149 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1150 }
1151 }
1152 }
1153
ipmi_unregister_smi(ipmi_smi_t intf)1154 int ipmi_unregister_smi(ipmi_smi_t intf)
1155 {
1156 int rv = -ENODEV;
1157 int i;
1158 struct list_head *entry;
1159 unsigned long flags;
1160
1161 down_write(&interfaces_sem);
1162 if (list_empty(&(intf->users)))
1163 {
1164 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
1165 if (ipmi_interfaces[i] == intf) {
1166 spin_lock_irqsave(&interfaces_lock, flags);
1167 ipmi_interfaces[i] = NULL;
1168 clean_up_interface_data(intf);
1169 spin_unlock_irqrestore(&interfaces_lock,flags);
1170 kfree(intf);
1171 rv = 0;
1172 goto out_call_watcher;
1173 }
1174 }
1175 } else {
1176 rv = -EBUSY;
1177 }
1178 up_write(&interfaces_sem);
1179
1180 return rv;
1181
1182 out_call_watcher:
1183 /* Convert to a read semaphore so callbacks don't bite us. */
1184 up_write(&interfaces_sem);
1185 down_read(&interfaces_sem);
1186
1187 /* Call all the watcher interfaces to tell them that
1188 an interface is gone. */
1189 down_read(&smi_watchers_sem);
1190 list_for_each(entry, &smi_watchers) {
1191 struct ipmi_smi_watcher *w;
1192 w = list_entry(entry,
1193 struct ipmi_smi_watcher,
1194 link);
1195 w->smi_gone(i);
1196 }
1197 up_read(&smi_watchers_sem);
1198 up_read(&interfaces_sem);
1199 return 0;
1200 }
1201
handle_get_msg_rsp(ipmi_smi_t intf,struct ipmi_smi_msg * msg)1202 static int handle_get_msg_rsp(ipmi_smi_t intf,
1203 struct ipmi_smi_msg *msg)
1204 {
1205 struct ipmi_ipmb_addr ipmb_addr;
1206 struct ipmi_recv_msg *recv_msg;
1207
1208
1209 if (msg->rsp_size < 11)
1210 /* Message not big enough, just ignore it. */
1211 return 0;
1212
1213 if (msg->rsp[2] != 0)
1214 /* An error getting the response, just ignore it. */
1215 return 0;
1216
1217 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
1218 ipmb_addr.slave_addr = msg->rsp[6];
1219 ipmb_addr.channel = msg->rsp[3] & 0x0f;
1220 ipmb_addr.lun = msg->rsp[7] & 3;
1221
1222 /* It's a response from a remote entity. Look up the sequence
1223 number and handle the response. */
1224 if (intf_find_seq(intf,
1225 msg->rsp[7] >> 2,
1226 msg->rsp[3] & 0x0f,
1227 msg->rsp[8],
1228 (msg->rsp[4] >> 2) & (~1),
1229 (struct ipmi_addr *) &(ipmb_addr),
1230 &recv_msg))
1231 {
1232 /* We were unable to find the sequence number,
1233 so just nuke the message. */
1234 return 0;
1235 }
1236
1237 memcpy(recv_msg->msg_data,
1238 &(msg->rsp[9]),
1239 msg->rsp_size - 9);
1240 /* THe other fields matched, so no need to set them, except
1241 for netfn, which needs to be the response that was
1242 returned, not the request value. */
1243 recv_msg->msg.netfn = msg->rsp[4] >> 2;
1244 recv_msg->msg.data = recv_msg->msg_data;
1245 recv_msg->msg.data_len = msg->rsp_size - 10;
1246 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
1247 deliver_response(recv_msg);
1248
1249 return 0;
1250 }
1251
handle_get_msg_cmd(ipmi_smi_t intf,struct ipmi_smi_msg * msg)1252 static int handle_get_msg_cmd(ipmi_smi_t intf,
1253 struct ipmi_smi_msg *msg)
1254 {
1255 struct list_head *entry;
1256 struct cmd_rcvr *rcvr;
1257 int rv = 0;
1258 unsigned char netfn;
1259 unsigned char cmd;
1260 ipmi_user_t user = NULL;
1261 struct ipmi_ipmb_addr *ipmb_addr;
1262 struct ipmi_recv_msg *recv_msg;
1263
1264 if (msg->rsp_size < 10)
1265 /* Message not big enough, just ignore it. */
1266 return 0;
1267
1268 if (msg->rsp[2] != 0) {
1269 /* An error getting the response, just ignore it. */
1270 return 0;
1271 }
1272
1273 netfn = msg->rsp[4] >> 2;
1274 cmd = msg->rsp[8];
1275
1276 read_lock(&(intf->cmd_rcvr_lock));
1277
1278 if (intf->all_cmd_rcvr) {
1279 user = intf->all_cmd_rcvr;
1280 } else {
1281 /* Find the command/netfn. */
1282 list_for_each(entry, &(intf->cmd_rcvrs)) {
1283 rcvr = list_entry(entry, struct cmd_rcvr, link);
1284 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
1285 user = rcvr->user;
1286 break;
1287 }
1288 }
1289 }
1290 read_unlock(&(intf->cmd_rcvr_lock));
1291
1292 if (user == NULL) {
1293 /* We didn't find a user, deliver an error response. */
1294 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1295 msg->data[1] = IPMI_SEND_MSG_CMD;
1296 msg->data[2] = msg->rsp[3];
1297 msg->data[3] = msg->rsp[6];
1298 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
1299 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
1300 msg->data[6] = intf->my_address;
1301 /* rqseq/lun */
1302 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
1303 msg->data[8] = msg->rsp[8]; /* cmd */
1304 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
1305 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
1306 msg->data_size = 11;
1307
1308 intf->handlers->sender(intf->send_info, msg, 0);
1309
1310 rv = -1; /* We used the message, so return the value that
1311 causes it to not be freed or queued. */
1312 } else {
1313 /* Deliver the message to the user. */
1314 recv_msg = ipmi_alloc_recv_msg();
1315 if (! recv_msg) {
1316 /* We couldn't allocate memory for the
1317 message, so requeue it for handling
1318 later. */
1319 rv = 1;
1320 } else {
1321 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
1322 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1323 ipmb_addr->slave_addr = msg->rsp[6];
1324 ipmb_addr->lun = msg->rsp[7] & 3;
1325 ipmb_addr->channel = msg->rsp[3];
1326
1327 recv_msg->user = user;
1328 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
1329 recv_msg->msgid = msg->rsp[7] >> 2;
1330 recv_msg->msg.netfn = msg->rsp[4] >> 2;
1331 recv_msg->msg.cmd = msg->rsp[8];
1332 recv_msg->msg.data = recv_msg->msg_data;
1333 recv_msg->msg.data_len = msg->rsp_size - 10;
1334 memcpy(recv_msg->msg_data,
1335 &(msg->rsp[9]),
1336 msg->rsp_size - 10);
1337 deliver_response(recv_msg);
1338 }
1339 }
1340
1341 return rv;
1342 }
1343
copy_event_into_recv_msg(struct ipmi_recv_msg * recv_msg,struct ipmi_smi_msg * msg)1344 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
1345 struct ipmi_smi_msg *msg)
1346 {
1347 struct ipmi_system_interface_addr *smi_addr;
1348
1349 recv_msg->msgid = 0;
1350 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
1351 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
1352 smi_addr->channel = IPMI_BMC_CHANNEL;
1353 smi_addr->lun = msg->rsp[0] & 3;
1354 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
1355 recv_msg->msg.netfn = msg->rsp[0] >> 2;
1356 recv_msg->msg.cmd = msg->rsp[1];
1357 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
1358 recv_msg->msg.data = recv_msg->msg_data;
1359 recv_msg->msg.data_len = msg->rsp_size - 3;
1360 }
1361
1362 /* This will be called with the intf->users_lock read-locked, so no need
1363 to do that here. */
handle_read_event_rsp(ipmi_smi_t intf,struct ipmi_smi_msg * msg)1364 static int handle_read_event_rsp(ipmi_smi_t intf,
1365 struct ipmi_smi_msg *msg)
1366 {
1367 struct ipmi_recv_msg *recv_msg;
1368 struct list_head msgs;
1369 struct list_head *entry, *entry2;
1370 ipmi_user_t user;
1371 int rv = 0;
1372 int deliver_count = 0;
1373 unsigned long flags;
1374
1375 if (msg->rsp_size < 19) {
1376 /* Message is too small to be an IPMB event. */
1377 return 0;
1378 }
1379
1380 if (msg->rsp[2] != 0) {
1381 /* An error getting the event, just ignore it. */
1382 return 0;
1383 }
1384
1385 INIT_LIST_HEAD(&msgs);
1386
1387 spin_lock_irqsave(&(intf->events_lock), flags);
1388
1389 /* Allocate and fill in one message for every user that is getting
1390 events. */
1391 list_for_each(entry, &(intf->users)) {
1392 user = list_entry(entry, struct ipmi_user, link);
1393
1394 if (! user->gets_events)
1395 continue;
1396
1397 recv_msg = ipmi_alloc_recv_msg();
1398 if (! recv_msg) {
1399 list_for_each_safe(entry, entry2, &msgs) {
1400 recv_msg = list_entry(entry,
1401 struct ipmi_recv_msg,
1402 link);
1403 list_del(entry);
1404 ipmi_free_recv_msg(recv_msg);
1405 }
1406 /* We couldn't allocate memory for the
1407 message, so requeue it for handling
1408 later. */
1409 rv = 1;
1410 goto out;
1411 }
1412
1413 deliver_count++;
1414
1415 copy_event_into_recv_msg(recv_msg, msg);
1416 recv_msg->user = user;
1417 list_add_tail(&(recv_msg->link), &msgs);
1418 }
1419
1420 if (deliver_count) {
1421 /* Now deliver all the messages. */
1422 list_for_each_safe(entry, entry2, &msgs) {
1423 recv_msg = list_entry(entry,
1424 struct ipmi_recv_msg,
1425 link);
1426 list_del(entry);
1427 deliver_response(recv_msg);
1428 }
1429 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
1430 /* No one to receive the message, put it in queue if there's
1431 not already too many things in the queue. */
1432 recv_msg = ipmi_alloc_recv_msg();
1433 if (! recv_msg) {
1434 /* We couldn't allocate memory for the
1435 message, so requeue it for handling
1436 later. */
1437 rv = 1;
1438 goto out;
1439 }
1440
1441 copy_event_into_recv_msg(recv_msg, msg);
1442 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
1443 } else {
1444 /* There's too many things in the queue, discard this
1445 message. */
1446 printk(KERN_WARNING "ipmi: Event queue full, discarding an"
1447 " incoming event\n");
1448 }
1449
1450 out:
1451 spin_unlock_irqrestore(&(intf->events_lock), flags);
1452
1453 return rv;
1454 }
1455
handle_bmc_rsp(ipmi_smi_t intf,struct ipmi_smi_msg * msg)1456 static int handle_bmc_rsp(ipmi_smi_t intf,
1457 struct ipmi_smi_msg *msg)
1458 {
1459 struct ipmi_recv_msg *recv_msg;
1460 int found = 0;
1461 struct list_head *entry;
1462
1463 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
1464
1465 /* Make sure the user still exists. */
1466 list_for_each(entry, &(intf->users)) {
1467 if (list_entry(entry, struct ipmi_user, link)
1468 == recv_msg->user)
1469 {
1470 /* Found it, so we can deliver it */
1471 found = 1;
1472 break;
1473 }
1474 }
1475
1476 if (!found) {
1477 /* The user for the message went away, so give up. */
1478 ipmi_free_recv_msg(recv_msg);
1479 } else {
1480 struct ipmi_system_interface_addr *smi_addr;
1481
1482 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
1483 recv_msg->msgid = msg->msgid;
1484 smi_addr = ((struct ipmi_system_interface_addr *)
1485 &(recv_msg->addr));
1486 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
1487 smi_addr->channel = IPMI_BMC_CHANNEL;
1488 smi_addr->lun = msg->rsp[0] & 3;
1489 recv_msg->msg.netfn = msg->rsp[0] >> 2;
1490 recv_msg->msg.cmd = msg->rsp[1];
1491 memcpy(recv_msg->msg_data,
1492 &(msg->rsp[2]),
1493 msg->rsp_size - 2);
1494 recv_msg->msg.data = recv_msg->msg_data;
1495 recv_msg->msg.data_len = msg->rsp_size - 2;
1496 deliver_response(recv_msg);
1497 }
1498
1499 return 0;
1500 }
1501
1502 /* Handle a new message. Return 1 if the message should be requeued,
1503 0 if the message should be freed, or -1 if the message should not
1504 be freed or requeued. */
handle_new_recv_msg(ipmi_smi_t intf,struct ipmi_smi_msg * msg)1505 static int handle_new_recv_msg(ipmi_smi_t intf,
1506 struct ipmi_smi_msg *msg)
1507 {
1508 int requeue;
1509
1510 if (msg->rsp_size < 2) {
1511 /* Message is too small to be correct. */
1512 requeue = 0;
1513 } else if (msg->rsp[1] == IPMI_GET_MSG_CMD) {
1514 #if DEBUG_MSGING
1515 int m;
1516 printk("Response:");
1517 for (m=0; m<msg->rsp_size; m++)
1518 printk(" %2.2x", msg->rsp[m]);
1519 printk("\n");
1520 #endif
1521 /* It's from the receive queue. */
1522 if (msg->rsp[4] & 0x04) {
1523 /* It's a response, so find the
1524 requesting message and send it up. */
1525 requeue = handle_get_msg_rsp(intf, msg);
1526 } else {
1527 /* It's a command to the SMS from some other
1528 entity. Handle that. */
1529 requeue = handle_get_msg_cmd(intf, msg);
1530 }
1531 } else if (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD) {
1532 /* It's an asyncronous event. */
1533 requeue = handle_read_event_rsp(intf, msg);
1534 } else {
1535 /* It's a response from the local BMC. */
1536 requeue = handle_bmc_rsp(intf, msg);
1537 }
1538
1539 return requeue;
1540 }
1541
1542 /* Handle a new message from the lower layer. */
ipmi_smi_msg_received(ipmi_smi_t intf,struct ipmi_smi_msg * msg)1543 void ipmi_smi_msg_received(ipmi_smi_t intf,
1544 struct ipmi_smi_msg *msg)
1545 {
1546 unsigned long flags;
1547 int rv;
1548
1549
1550 /* Lock the user lock so the user can't go away while we are
1551 working on it. */
1552 read_lock(&(intf->users_lock));
1553
1554 if ((msg->data_size >= 2) && (msg->data[1] == IPMI_SEND_MSG_CMD)) {
1555 /* This is the local response to a send, start the
1556 timer for these. */
1557 intf_start_seq_timer(intf, msg->msgid);
1558 ipmi_free_smi_msg(msg);
1559 goto out_unlock;
1560 }
1561
1562 /* To preserve message order, if the list is not empty, we
1563 tack this message onto the end of the list. */
1564 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
1565 if (!list_empty(&(intf->waiting_msgs))) {
1566 list_add_tail(&(msg->link), &(intf->waiting_msgs));
1567 spin_unlock(&(intf->waiting_msgs_lock));
1568 goto out_unlock;
1569 }
1570 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
1571
1572 rv = handle_new_recv_msg(intf, msg);
1573 if (rv > 0) {
1574 /* Could not handle the message now, just add it to a
1575 list to handle later. */
1576 spin_lock(&(intf->waiting_msgs_lock));
1577 list_add_tail(&(msg->link), &(intf->waiting_msgs));
1578 spin_unlock(&(intf->waiting_msgs_lock));
1579 } else if (rv == 0) {
1580 ipmi_free_smi_msg(msg);
1581 }
1582
1583 out_unlock:
1584 read_unlock(&(intf->users_lock));
1585 }
1586
ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)1587 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
1588 {
1589 struct list_head *entry;
1590 ipmi_user_t user;
1591
1592 read_lock(&(intf->users_lock));
1593 list_for_each(entry, &(intf->users)) {
1594 user = list_entry(entry, struct ipmi_user, link);
1595
1596 if (! user->handler->ipmi_watchdog_pretimeout)
1597 continue;
1598
1599 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
1600 }
1601 read_unlock(&(intf->users_lock));
1602 }
1603
1604 static void
handle_msg_timeout(struct ipmi_recv_msg * msg)1605 handle_msg_timeout(struct ipmi_recv_msg *msg)
1606 {
1607 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
1608 msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
1609 msg->msg.netfn |= 1; /* Convert to a response. */
1610 msg->msg.data_len = 1;
1611 msg->msg.data = msg->msg_data;
1612 deliver_response(msg);
1613 }
1614
1615 static void
send_from_recv_msg(ipmi_smi_t intf,struct ipmi_recv_msg * recv_msg,struct ipmi_smi_msg * smi_msg,unsigned char seq,long seqid)1616 send_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
1617 struct ipmi_smi_msg *smi_msg,
1618 unsigned char seq, long seqid)
1619 {
1620 if (!smi_msg)
1621 smi_msg = ipmi_alloc_smi_msg();
1622 if (!smi_msg)
1623 /* If we can't allocate the message, then just return, we
1624 get 4 retries, so this should be ok. */
1625 return;
1626
1627 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
1628 smi_msg->data_size = recv_msg->msg.data_len;
1629 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
1630
1631 /* Send the new message. We send with a zero priority. It
1632 timed out, I doubt time is that critical now, and high
1633 priority messages are really only for messages to the local
1634 MC, which don't get resent. */
1635 intf->handlers->sender(intf->send_info, smi_msg, 0);
1636
1637 #if DEBUG_MSGING
1638 {
1639 int m;
1640 printk("Resend: ");
1641 for (m=0; m<smi_msg->data_size; m++)
1642 printk(" %2.2x", smi_msg->data[m]);
1643 printk("\n");
1644 }
1645 #endif
1646 }
1647
1648 static void
ipmi_timeout_handler(long timeout_period)1649 ipmi_timeout_handler(long timeout_period)
1650 {
1651 ipmi_smi_t intf;
1652 struct list_head timeouts;
1653 struct ipmi_recv_msg *msg;
1654 struct ipmi_smi_msg *smi_msg;
1655 unsigned long flags;
1656 struct list_head *entry, *entry2;
1657 int i, j;
1658
1659 INIT_LIST_HEAD(&timeouts);
1660
1661 spin_lock(&interfaces_lock);
1662 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
1663 intf = ipmi_interfaces[i];
1664 if (intf == NULL)
1665 continue;
1666
1667 read_lock(&(intf->users_lock));
1668
1669 /* See if any waiting messages need to be processed. */
1670 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
1671 list_for_each_safe(entry, entry2, &(intf->waiting_msgs)) {
1672 smi_msg = list_entry(entry, struct ipmi_smi_msg, link);
1673 if (! handle_new_recv_msg(intf, smi_msg)) {
1674 list_del(entry);
1675 ipmi_free_smi_msg(smi_msg);
1676 } else {
1677 /* To preserve message order, quit if we
1678 can't handle a message. */
1679 break;
1680 }
1681 }
1682 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
1683
1684 /* Go through the seq table and find any messages that
1685 have timed out, putting them in the timeouts
1686 list. */
1687 spin_lock_irqsave(&(intf->seq_lock), flags);
1688 for (j=0; j<IPMI_IPMB_NUM_SEQ; j++) {
1689 struct seq_table *ent = &(intf->seq_table[j]);
1690 if (!ent->inuse)
1691 continue;
1692
1693 ent->timeout -= timeout_period;
1694 if (ent->timeout > 0)
1695 continue;
1696
1697 if (ent->retries_left == 0) {
1698 /* The message has used all its retries. */
1699 ent->inuse = 0;
1700 msg = ent->recv_msg;
1701 list_add_tail(&(msg->link), &timeouts);
1702 } else {
1703 /* More retries, send again. */
1704
1705 /* Start with the max timer, set to normal
1706 timer after the message is sent. */
1707 ent->timeout = MAX_MSG_TIMEOUT;
1708 ent->retries_left--;
1709 send_from_recv_msg(intf, ent->recv_msg, NULL,
1710 j, ent->seqid);
1711 }
1712 }
1713 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1714
1715 list_for_each_safe(entry, entry2, &timeouts) {
1716 msg = list_entry(entry, struct ipmi_recv_msg, link);
1717 handle_msg_timeout(msg);
1718 }
1719
1720 read_unlock(&(intf->users_lock));
1721 }
1722 spin_unlock(&interfaces_lock);
1723 }
1724
ipmi_request_event(void)1725 static void ipmi_request_event(void)
1726 {
1727 ipmi_smi_t intf;
1728 int i;
1729
1730 spin_lock(&interfaces_lock);
1731 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
1732 intf = ipmi_interfaces[i];
1733 if (intf == NULL)
1734 continue;
1735
1736 intf->handlers->request_events(intf->send_info);
1737 }
1738 spin_unlock(&interfaces_lock);
1739 }
1740
1741 static struct timer_list ipmi_timer;
1742
1743 /* Call every 100 ms. */
1744 #define IPMI_TIMEOUT_TIME 100
1745 #define IPMI_TIMEOUT_JIFFIES (IPMI_TIMEOUT_TIME/(1000/HZ))
1746
1747 /* Request events from the queue every second. Hopefully, in the
1748 future, IPMI will add a way to know immediately if an event is
1749 in the queue. */
1750 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
1751
1752 static volatile int stop_operation = 0;
1753 static volatile int timer_stopped = 0;
1754 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
1755
ipmi_timeout(unsigned long data)1756 static void ipmi_timeout(unsigned long data)
1757 {
1758 if (stop_operation) {
1759 timer_stopped = 1;
1760 return;
1761 }
1762
1763 ticks_to_req_ev--;
1764 if (ticks_to_req_ev == 0) {
1765 ipmi_request_event();
1766 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
1767 }
1768
1769 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
1770
1771 ipmi_timer.expires += IPMI_TIMEOUT_JIFFIES;
1772 add_timer(&ipmi_timer);
1773 }
1774
1775
1776 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
1777 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
1778
1779 /* FIXME - convert these to slabs. */
free_smi_msg(struct ipmi_smi_msg * msg)1780 static void free_smi_msg(struct ipmi_smi_msg *msg)
1781 {
1782 atomic_dec(&smi_msg_inuse_count);
1783 kfree(msg);
1784 }
1785
ipmi_alloc_smi_msg(void)1786 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
1787 {
1788 struct ipmi_smi_msg *rv;
1789 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
1790 if (rv) {
1791 rv->done = free_smi_msg;
1792 atomic_inc(&smi_msg_inuse_count);
1793 }
1794 return rv;
1795 }
1796
free_recv_msg(struct ipmi_recv_msg * msg)1797 static void free_recv_msg(struct ipmi_recv_msg *msg)
1798 {
1799 atomic_dec(&recv_msg_inuse_count);
1800 kfree(msg);
1801 }
1802
ipmi_alloc_recv_msg(void)1803 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
1804 {
1805 struct ipmi_recv_msg *rv;
1806
1807 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
1808 if (rv) {
1809 rv->done = free_recv_msg;
1810 atomic_inc(&recv_msg_inuse_count);
1811 }
1812 return rv;
1813 }
1814
1815 #ifdef CONFIG_IPMI_PANIC_EVENT
1816
dummy_smi_done_handler(struct ipmi_smi_msg * msg)1817 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
1818 {
1819 }
1820
dummy_recv_done_handler(struct ipmi_recv_msg * msg)1821 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
1822 {
1823 }
1824
send_panic_events(void)1825 static void send_panic_events(void)
1826 {
1827 struct ipmi_msg msg;
1828 ipmi_smi_t intf;
1829 unsigned char data[8];
1830 int i;
1831 struct ipmi_system_interface_addr addr;
1832 struct ipmi_smi_msg smi_msg;
1833 struct ipmi_recv_msg recv_msg;
1834
1835 addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
1836 addr.channel = IPMI_BMC_CHANNEL;
1837
1838 /* Fill in an event telling that we have failed. */
1839 msg.netfn = 0x04; /* Sensor or Event. */
1840 msg.cmd = 2; /* Platform event command. */
1841 msg.data = data;
1842 msg.data_len = 8;
1843 data[0] = 0x21; /* Kernel generator ID, IPMI table 5-4 */
1844 data[1] = 0x03; /* This is for IPMI 1.0. */
1845 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
1846 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
1847 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
1848
1849 /* These used to have the first three bytes of the panic string,
1850 but not only is that not terribly useful, it's not available
1851 any more. */
1852 data[3] = 0;
1853 data[6] = 0;
1854 data[7] = 0;
1855
1856 smi_msg.done = dummy_smi_done_handler;
1857 recv_msg.done = dummy_recv_done_handler;
1858
1859 /* For every registered interface, send the event. */
1860 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
1861 intf = ipmi_interfaces[i];
1862 if (intf == NULL)
1863 continue;
1864
1865 intf->handlers->set_run_to_completion(intf->send_info, 1);
1866 i_ipmi_request(NULL,
1867 intf,
1868 (struct ipmi_addr *) &addr,
1869 0,
1870 &msg,
1871 &smi_msg,
1872 &recv_msg,
1873 0,
1874 intf->my_address,
1875 intf->my_lun);
1876 }
1877 }
1878 #endif /* CONFIG_IPMI_PANIC_EVENT */
1879
1880 static int has_paniced = 0;
1881
panic_event(struct notifier_block * this,unsigned long event,void * ptr)1882 static int panic_event(struct notifier_block *this,
1883 unsigned long event,
1884 void *ptr)
1885 {
1886 int i;
1887 ipmi_smi_t intf;
1888
1889 if (has_paniced)
1890 return NOTIFY_DONE;
1891 has_paniced = 1;
1892
1893 /* For every registered interface, set it to run to completion. */
1894 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
1895 intf = ipmi_interfaces[i];
1896 if (intf == NULL)
1897 continue;
1898
1899 intf->handlers->set_run_to_completion(intf->send_info, 1);
1900 }
1901
1902 #ifdef CONFIG_IPMI_PANIC_EVENT
1903 send_panic_events();
1904 #endif
1905
1906 return NOTIFY_DONE;
1907 }
1908
1909 static struct notifier_block panic_block = {
1910 panic_event,
1911 NULL,
1912 200 /* priority: INT_MAX >= x >= 0 */
1913 };
1914
1915
ipmi_init_msghandler(void)1916 static __init int ipmi_init_msghandler(void)
1917 {
1918 int i;
1919
1920 if (initialized)
1921 return 0;
1922
1923 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
1924 ipmi_interfaces[i] = NULL;
1925 }
1926
1927 init_timer(&ipmi_timer);
1928 ipmi_timer.data = 0;
1929 ipmi_timer.function = ipmi_timeout;
1930 ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES;
1931 add_timer(&ipmi_timer);
1932
1933 notifier_chain_register(&panic_notifier_list, &panic_block);
1934
1935 initialized = 1;
1936
1937 printk(KERN_INFO "ipmi: message handler initialized\n");
1938
1939 return 0;
1940 }
1941
cleanup_ipmi(void)1942 static __exit void cleanup_ipmi(void)
1943 {
1944 int count;
1945
1946 if (!initialized)
1947 return;
1948
1949 notifier_chain_unregister(&panic_notifier_list, &panic_block);
1950
1951 /* This can't be called if any interfaces exist, so no worry about
1952 shutting down the interfaces. */
1953
1954 /* Tell the timer to stop, then wait for it to stop. This avoids
1955 problems with race conditions removing the timer here. */
1956 stop_operation = 1;
1957 while (!timer_stopped) {
1958 schedule_timeout(1);
1959 }
1960
1961 initialized = 0;
1962
1963 /* Check for buffer leaks. */
1964 count = atomic_read(&smi_msg_inuse_count);
1965 if (count != 0)
1966 printk("ipmi_msghandler: SMI message count %d at exit\n",
1967 count);
1968 count = atomic_read(&recv_msg_inuse_count);
1969 if (count != 0)
1970 printk("ipmi_msghandler: recv message count %d at exit\n",
1971 count);
1972 }
1973 module_exit(cleanup_ipmi);
1974
1975 module_init(ipmi_init_msghandler);
1976 MODULE_LICENSE("GPL");
1977
1978 EXPORT_SYMBOL(ipmi_alloc_recv_msg);
1979 EXPORT_SYMBOL(ipmi_create_user);
1980 EXPORT_SYMBOL(ipmi_destroy_user);
1981 EXPORT_SYMBOL(ipmi_get_version);
1982 EXPORT_SYMBOL(ipmi_request);
1983 EXPORT_SYMBOL(ipmi_request_supply_msgs);
1984 EXPORT_SYMBOL(ipmi_request_with_source);
1985 EXPORT_SYMBOL(ipmi_register_smi);
1986 EXPORT_SYMBOL(ipmi_unregister_smi);
1987 EXPORT_SYMBOL(ipmi_register_for_cmd);
1988 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1989 EXPORT_SYMBOL(ipmi_smi_msg_received);
1990 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
1991 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
1992 EXPORT_SYMBOL(ipmi_register_all_cmd_rcvr);
1993 EXPORT_SYMBOL(ipmi_unregister_all_cmd_rcvr);
1994 EXPORT_SYMBOL(ipmi_addr_length);
1995 EXPORT_SYMBOL(ipmi_validate_addr);
1996 EXPORT_SYMBOL(ipmi_set_gets_events);
1997 EXPORT_SYMBOL(ipmi_addr_equal);
1998 EXPORT_SYMBOL(ipmi_smi_watcher_register);
1999 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
2000 EXPORT_SYMBOL(ipmi_set_my_address);
2001 EXPORT_SYMBOL(ipmi_get_my_address);
2002 EXPORT_SYMBOL(ipmi_set_my_LUN);
2003 EXPORT_SYMBOL(ipmi_get_my_LUN);
2004