1 /*
2 * Core I2O structure management
3 *
4 * (C) Copyright 1999 Red Hat Software
5 *
6 * Written by Alan Cox, Building Number Three Ltd
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * A lot of the I2O message side code from this is taken from the
14 * Red Creek RCPCI45 adapter driver by Red Creek Communications
15 *
16 * Fixes by:
17 * Philipp Rumpf
18 * Juha Siev�nen <Juha.Sievanen@cs.Helsinki.FI>
19 * Auvo H�kkinen <Auvo.Hakkinen@cs.Helsinki.FI>
20 * Deepak Saxena <deepak@plexity.net>
21 * Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
22 *
23 */
24
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/pci.h>
29
30 #include <linux/i2o.h>
31
32 #include <linux/errno.h>
33 #include <linux/init.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/smp_lock.h>
37
38 #include <linux/bitops.h>
39 #include <linux/wait.h>
40 #include <linux/delay.h>
41 #include <linux/timer.h>
42 #include <linux/tqueue.h>
43 #include <linux/interrupt.h>
44 #include <linux/sched.h>
45 #include <asm/semaphore.h>
46 #include <linux/completion.h>
47
48 #include <asm/io.h>
49 #include <linux/reboot.h>
50
51 #include "i2o_lan.h"
52
53 //#define DRIVERDEBUG
54
55 #ifdef DRIVERDEBUG
56 #define dprintk(s, args...) printk(s, ## args)
57 #else
58 #define dprintk(s, args...)
59 #endif
60
61 /* OSM table */
62 static struct i2o_handler *i2o_handlers[MAX_I2O_MODULES];
63
64 /* Controller list */
65 static struct i2o_controller *i2o_controllers[MAX_I2O_CONTROLLERS];
66 struct i2o_controller *i2o_controller_chain;
67 int i2o_num_controllers;
68
69 /* Initiator Context for Core message */
70 static int core_context;
71
72 /* Initialization && shutdown functions */
73 void i2o_sys_init(void);
74 static void i2o_sys_shutdown(void);
75 static int i2o_reset_controller(struct i2o_controller *);
76 static int i2o_reboot_event(struct notifier_block *, unsigned long , void *);
77 static int i2o_online_controller(struct i2o_controller *);
78 static int i2o_init_outbound_q(struct i2o_controller *);
79 static int i2o_post_outbound_messages(struct i2o_controller *);
80
81 /* Reply handler */
82 static void i2o_core_reply(struct i2o_handler *, struct i2o_controller *,
83 struct i2o_message *);
84
85 /* Various helper functions */
86 static int i2o_lct_get(struct i2o_controller *);
87 static int i2o_lct_notify(struct i2o_controller *);
88 static int i2o_hrt_get(struct i2o_controller *);
89
90 static int i2o_build_sys_table(void);
91 static int i2o_systab_send(struct i2o_controller *c);
92
93 /* I2O core event handler */
94 static int i2o_core_evt(void *);
95 static int evt_pid;
96 static int evt_running;
97
98 /* Dynamic LCT update handler */
99 static int i2o_dyn_lct(void *);
100
101 void i2o_report_controller_unit(struct i2o_controller *, struct i2o_device *);
102
103 /*
104 * I2O System Table. Contains information about
105 * all the IOPs in the system. Used to inform IOPs
106 * about each other's existence.
107 *
108 * sys_tbl_ver is the CurrentChangeIndicator that is
109 * used by IOPs to track changes.
110 */
111 static struct i2o_sys_tbl *sys_tbl;
112 static int sys_tbl_ind;
113 static int sys_tbl_len;
114
115 /*
116 * This spin lock is used to keep a device from being
117 * added and deleted concurrently across CPUs or interrupts.
118 * This can occur when a user creates a device and immediatelly
119 * deletes it before the new_dev_notify() handler is called.
120 */
121 static spinlock_t i2o_dev_lock = SPIN_LOCK_UNLOCKED;
122
123 /*
124 * Structures and definitions for synchronous message posting.
125 * See i2o_post_wait() for description.
126 */
127 struct i2o_post_wait_data
128 {
129 int *status; /* Pointer to status block on caller stack */
130 int *complete; /* Pointer to completion flag on caller stack */
131 u32 id; /* Unique identifier */
132 wait_queue_head_t *wq; /* Wake up for caller (NULL for dead) */
133 struct i2o_post_wait_data *next; /* Chain */
134 void *mem[2]; /* Memory blocks to recover on failure path */
135 };
136 static struct i2o_post_wait_data *post_wait_queue;
137 static u32 post_wait_id; // Unique ID for each post_wait
138 static spinlock_t post_wait_lock = SPIN_LOCK_UNLOCKED;
139 static void i2o_post_wait_complete(u32, int);
140
141 /* OSM descriptor handler */
142 static struct i2o_handler i2o_core_handler =
143 {
144 (void *)i2o_core_reply,
145 NULL,
146 NULL,
147 NULL,
148 "I2O core layer",
149 0,
150 I2O_CLASS_EXECUTIVE
151 };
152
153 /*
154 * Used when queueing a reply to be handled later
155 */
156
157 struct reply_info
158 {
159 struct i2o_controller *iop;
160 u32 msg[MSG_FRAME_SIZE];
161 };
162 static struct reply_info evt_reply;
163 static struct reply_info events[I2O_EVT_Q_LEN];
164 static int evt_in;
165 static int evt_out;
166 static int evt_q_len;
167 #define MODINC(x,y) ((x) = ((x) + 1) % (y))
168
169 /*
170 * I2O configuration spinlock. This isnt a big deal for contention
171 * so we have one only
172 */
173
174 static DECLARE_MUTEX(i2o_configuration_lock);
175
176 /*
177 * Event spinlock. Used to keep event queue sane and from
178 * handling multiple events simultaneously.
179 */
180 static spinlock_t i2o_evt_lock = SPIN_LOCK_UNLOCKED;
181
182 /*
183 * Semaphore used to synchronize event handling thread with
184 * interrupt handler.
185 */
186
187 static DECLARE_MUTEX(evt_sem);
188 static DECLARE_COMPLETION(evt_dead);
189 static DECLARE_WAIT_QUEUE_HEAD(evt_wait);
190
191 static struct notifier_block i2o_reboot_notifier =
192 {
193 i2o_reboot_event,
194 NULL,
195 0
196 };
197
198 /*
199 * Config options
200 */
201
202 static int verbose;
203 MODULE_PARM(verbose, "i");
204
205 /*
206 * I2O Core reply handler
207 */
i2o_core_reply(struct i2o_handler * h,struct i2o_controller * c,struct i2o_message * m)208 static void i2o_core_reply(struct i2o_handler *h, struct i2o_controller *c,
209 struct i2o_message *m)
210 {
211 u32 *msg=(u32 *)m;
212 u32 status;
213 u32 context = msg[2];
214
215 if (msg[0] & MSG_FAIL) // Fail bit is set
216 {
217 u32 *preserved_msg = (u32*)(c->mem_offset + msg[7]);
218
219 i2o_report_status(KERN_INFO, "i2o_core", msg);
220 i2o_dump_message(preserved_msg);
221
222 /* If the failed request needs special treatment,
223 * it should be done here. */
224
225 /* Release the preserved msg by resubmitting it as a NOP */
226
227 preserved_msg[0] = cpu_to_le32(THREE_WORD_MSG_SIZE | SGL_OFFSET_0);
228 preserved_msg[1] = cpu_to_le32(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0);
229 preserved_msg[2] = 0;
230 i2o_post_message(c, msg[7]);
231
232 /* If reply to i2o_post_wait failed, return causes a timeout */
233
234 return;
235 }
236
237 #ifdef DRIVERDEBUG
238 i2o_report_status(KERN_INFO, "i2o_core", msg);
239 #endif
240
241 if(msg[2]&0x80000000) // Post wait message
242 {
243 if (msg[4] >> 24)
244 status = (msg[4] & 0xFFFF);
245 else
246 status = I2O_POST_WAIT_OK;
247
248 i2o_post_wait_complete(context, status);
249 return;
250 }
251
252 if(m->function == I2O_CMD_UTIL_EVT_REGISTER)
253 {
254 memcpy(events[evt_in].msg, msg, (msg[0]>>16)<<2);
255 events[evt_in].iop = c;
256
257 spin_lock(&i2o_evt_lock);
258 MODINC(evt_in, I2O_EVT_Q_LEN);
259 if(evt_q_len == I2O_EVT_Q_LEN)
260 MODINC(evt_out, I2O_EVT_Q_LEN);
261 else
262 evt_q_len++;
263 spin_unlock(&i2o_evt_lock);
264
265 up(&evt_sem);
266 wake_up_interruptible(&evt_wait);
267 return;
268 }
269
270 if(m->function == I2O_CMD_LCT_NOTIFY)
271 {
272 up(&c->lct_sem);
273 return;
274 }
275
276 /*
277 * If this happens, we want to dump the message to the syslog so
278 * it can be sent back to the card manufacturer by the end user
279 * to aid in debugging.
280 *
281 */
282 printk(KERN_WARNING "%s: Unsolicited message reply sent to core!"
283 "Message dumped to syslog\n",
284 c->name);
285 i2o_dump_message(msg);
286
287 return;
288 }
289
290 /**
291 * i2o_install_handler - install a message handler
292 * @h: Handler structure
293 *
294 * Install an I2O handler - these handle the asynchronous messaging
295 * from the card once it has initialised. If the table of handlers is
296 * full then -ENOSPC is returned. On a success 0 is returned and the
297 * context field is set by the function. The structure is part of the
298 * system from this time onwards. It must not be freed until it has
299 * been uninstalled
300 */
301
i2o_install_handler(struct i2o_handler * h)302 int i2o_install_handler(struct i2o_handler *h)
303 {
304 int i;
305 down(&i2o_configuration_lock);
306 for(i=0;i<MAX_I2O_MODULES;i++)
307 {
308 if(i2o_handlers[i]==NULL)
309 {
310 h->context = i;
311 i2o_handlers[i]=h;
312 up(&i2o_configuration_lock);
313 return 0;
314 }
315 }
316 up(&i2o_configuration_lock);
317 return -ENOSPC;
318 }
319
320 /**
321 * i2o_remove_handler - remove an i2o message handler
322 * @h: handler
323 *
324 * Remove a message handler previously installed with i2o_install_handler.
325 * After this function returns the handler object can be freed or re-used
326 */
327
i2o_remove_handler(struct i2o_handler * h)328 int i2o_remove_handler(struct i2o_handler *h)
329 {
330 i2o_handlers[h->context]=NULL;
331 return 0;
332 }
333
334
335 /*
336 * Each I2O controller has a chain of devices on it.
337 * Each device has a pointer to it's LCT entry to be used
338 * for fun purposes.
339 */
340
341 /**
342 * i2o_install_device - attach a device to a controller
343 * @c: controller
344 * @d: device
345 *
346 * Add a new device to an i2o controller. This can be called from
347 * non interrupt contexts only. It adds the device and marks it as
348 * unclaimed. The device memory becomes part of the kernel and must
349 * be uninstalled before being freed or reused. Zero is returned
350 * on success.
351 */
352
i2o_install_device(struct i2o_controller * c,struct i2o_device * d)353 int i2o_install_device(struct i2o_controller *c, struct i2o_device *d)
354 {
355 int i;
356
357 down(&i2o_configuration_lock);
358 d->controller=c;
359 d->owner=NULL;
360 d->next=c->devices;
361 d->prev=NULL;
362 if (c->devices != NULL)
363 c->devices->prev=d;
364 c->devices=d;
365 *d->dev_name = 0;
366
367 for(i = 0; i < I2O_MAX_MANAGERS; i++)
368 d->managers[i] = NULL;
369
370 up(&i2o_configuration_lock);
371 return 0;
372 }
373
374 /* we need this version to call out of i2o_delete_controller */
375
__i2o_delete_device(struct i2o_device * d)376 int __i2o_delete_device(struct i2o_device *d)
377 {
378 struct i2o_device **p;
379 int i;
380
381 p=&(d->controller->devices);
382
383 /*
384 * Hey we have a driver!
385 * Check to see if the driver wants us to notify it of
386 * device deletion. If it doesn't we assume that it
387 * is unsafe to delete a device with an owner and
388 * fail.
389 */
390 if(d->owner)
391 {
392 if(d->owner->dev_del_notify)
393 {
394 dprintk(KERN_INFO "Device has owner, notifying\n");
395 d->owner->dev_del_notify(d->controller, d);
396 if(d->owner)
397 {
398 printk(KERN_WARNING
399 "Driver \"%s\" did not release device!\n", d->owner->name);
400 return -EBUSY;
401 }
402 }
403 else
404 return -EBUSY;
405 }
406
407 /*
408 * Tell any other users who are talking to this device
409 * that it's going away. We assume that everything works.
410 */
411 for(i=0; i < I2O_MAX_MANAGERS; i++)
412 {
413 if(d->managers[i] && d->managers[i]->dev_del_notify)
414 d->managers[i]->dev_del_notify(d->controller, d);
415 }
416
417 while(*p!=NULL)
418 {
419 if(*p==d)
420 {
421 /*
422 * Destroy
423 */
424 *p=d->next;
425 kfree(d);
426 return 0;
427 }
428 p=&((*p)->next);
429 }
430 printk(KERN_ERR "i2o_delete_device: passed invalid device.\n");
431 return -EINVAL;
432 }
433
434 /**
435 * i2o_delete_device - remove an i2o device
436 * @d: device to remove
437 *
438 * This function unhooks a device from a controller. The device
439 * will not be unhooked if it has an owner who does not wish to free
440 * it, or if the owner lacks a dev_del_notify function. In that case
441 * -EBUSY is returned. On success 0 is returned. Other errors cause
442 * negative errno values to be returned
443 */
444
i2o_delete_device(struct i2o_device * d)445 int i2o_delete_device(struct i2o_device *d)
446 {
447 int ret;
448
449 down(&i2o_configuration_lock);
450
451 /*
452 * Seek, locate
453 */
454
455 ret = __i2o_delete_device(d);
456
457 up(&i2o_configuration_lock);
458
459 return ret;
460 }
461
462 /**
463 * i2o_install_controller - attach a controller
464 * @c: controller
465 *
466 * Add a new controller to the i2o layer. This can be called from
467 * non interrupt contexts only. It adds the controller and marks it as
468 * unused with no devices. If the tables are full or memory allocations
469 * fail then a negative errno code is returned. On success zero is
470 * returned and the controller is bound to the system. The structure
471 * must not be freed or reused until being uninstalled.
472 */
473
i2o_install_controller(struct i2o_controller * c)474 int i2o_install_controller(struct i2o_controller *c)
475 {
476 int i;
477 down(&i2o_configuration_lock);
478 for(i=0;i<MAX_I2O_CONTROLLERS;i++)
479 {
480 if(i2o_controllers[i]==NULL)
481 {
482 c->dlct = (i2o_lct*)kmalloc(8192, GFP_KERNEL);
483 if(c->dlct==NULL)
484 {
485 up(&i2o_configuration_lock);
486 return -ENOMEM;
487 }
488 i2o_controllers[i]=c;
489 c->devices = NULL;
490 c->next=i2o_controller_chain;
491 i2o_controller_chain=c;
492 c->unit = i;
493 c->page_frame = NULL;
494 c->hrt = NULL;
495 c->lct = NULL;
496 c->status_block = NULL;
497 sprintf(c->name, "i2o/iop%d", i);
498 i2o_num_controllers++;
499 init_MUTEX_LOCKED(&c->lct_sem);
500 up(&i2o_configuration_lock);
501 return 0;
502 }
503 }
504 printk(KERN_ERR "No free i2o controller slots.\n");
505 up(&i2o_configuration_lock);
506 return -EBUSY;
507 }
508
509 /**
510 * i2o_delete_controller - delete a controller
511 * @c: controller
512 *
513 * Remove an i2o controller from the system. If the controller or its
514 * devices are busy then -EBUSY is returned. On a failure a negative
515 * errno code is returned. On success zero is returned.
516 */
517
i2o_delete_controller(struct i2o_controller * c)518 int i2o_delete_controller(struct i2o_controller *c)
519 {
520 struct i2o_controller **p;
521 int users;
522 char name[16];
523 int stat;
524
525 dprintk(KERN_INFO "Deleting controller %s\n", c->name);
526
527 /*
528 * Clear event registration as this can cause weird behavior
529 */
530 if(c->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
531 i2o_event_register(c, core_context, 0, 0, 0);
532
533 down(&i2o_configuration_lock);
534 if((users=atomic_read(&c->users)))
535 {
536 dprintk(KERN_INFO "I2O: %d users for controller %s\n", users,
537 c->name);
538 up(&i2o_configuration_lock);
539 return -EBUSY;
540 }
541 while(c->devices)
542 {
543 if(__i2o_delete_device(c->devices)<0)
544 {
545 /* Shouldnt happen */
546 c->bus_disable(c);
547 up(&i2o_configuration_lock);
548 return -EBUSY;
549 }
550 }
551
552 /*
553 * If this is shutdown time, the thread's already been killed
554 */
555 if(c->lct_running) {
556 stat = kill_proc(c->lct_pid, SIGTERM, 1);
557 if(!stat) {
558 int count = 10 * 100;
559 while(c->lct_running && --count) {
560 current->state = TASK_INTERRUPTIBLE;
561 schedule_timeout(1);
562 }
563
564 if(!count)
565 printk(KERN_ERR
566 "%s: LCT thread still running!\n",
567 c->name);
568 }
569 }
570
571 p=&i2o_controller_chain;
572
573 while(*p)
574 {
575 if(*p==c)
576 {
577 /* Ask the IOP to switch to RESET state */
578 i2o_reset_controller(c);
579
580 /* Release IRQ */
581 c->destructor(c);
582
583 *p=c->next;
584 up(&i2o_configuration_lock);
585
586 if(c->page_frame)
587 {
588 pci_unmap_single(c->pdev, c->page_frame_map, MSG_POOL_SIZE, PCI_DMA_FROMDEVICE);
589 kfree(c->page_frame);
590 }
591 if(c->hrt)
592 kfree(c->hrt);
593 if(c->lct)
594 kfree(c->lct);
595 if(c->status_block)
596 kfree(c->status_block);
597 if(c->dlct)
598 kfree(c->dlct);
599
600 i2o_controllers[c->unit]=NULL;
601 memcpy(name, c->name, strlen(c->name)+1);
602 kfree(c);
603 dprintk(KERN_INFO "%s: Deleted from controller chain.\n", name);
604
605 i2o_num_controllers--;
606 return 0;
607 }
608 p=&((*p)->next);
609 }
610 up(&i2o_configuration_lock);
611 printk(KERN_ERR "i2o_delete_controller: bad pointer!\n");
612 return -ENOENT;
613 }
614
615 /**
616 * i2o_unlock_controller - unlock a controller
617 * @c: controller to unlock
618 *
619 * Take a lock on an i2o controller. This prevents it being deleted.
620 * i2o controllers are not refcounted so a deletion of an in use device
621 * will fail, not take affect on the last dereference.
622 */
623
i2o_unlock_controller(struct i2o_controller * c)624 void i2o_unlock_controller(struct i2o_controller *c)
625 {
626 atomic_dec(&c->users);
627 }
628
629 /**
630 * i2o_find_controller - return a locked controller
631 * @n: controller number
632 *
633 * Returns a pointer to the controller object. The controller is locked
634 * on return. NULL is returned if the controller is not found.
635 */
636
i2o_find_controller(int n)637 struct i2o_controller *i2o_find_controller(int n)
638 {
639 struct i2o_controller *c;
640
641 if(n<0 || n>=MAX_I2O_CONTROLLERS)
642 return NULL;
643
644 down(&i2o_configuration_lock);
645 c=i2o_controllers[n];
646 if(c!=NULL)
647 atomic_inc(&c->users);
648 up(&i2o_configuration_lock);
649 return c;
650 }
651
652 /**
653 * i2o_issue_claim - claim or release a device
654 * @cmd: command
655 * @c: controller to claim for
656 * @tid: i2o task id
657 * @type: type of claim
658 *
659 * Issue I2O UTIL_CLAIM and UTIL_RELEASE messages. The message to be sent
660 * is set by cmd. The tid is the task id of the object to claim and the
661 * type is the claim type (see the i2o standard)
662 *
663 * Zero is returned on success.
664 */
665
i2o_issue_claim(u32 cmd,struct i2o_controller * c,int tid,u32 type)666 static int i2o_issue_claim(u32 cmd, struct i2o_controller *c, int tid, u32 type)
667 {
668 u32 msg[5];
669
670 msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
671 msg[1] = cmd << 24 | HOST_TID<<12 | tid;
672 msg[3] = 0;
673 msg[4] = type;
674
675 return i2o_post_wait(c, msg, sizeof(msg), 60);
676 }
677
678 /*
679 * i2o_claim_device - claim a device for use by an OSM
680 * @d: device to claim
681 * @h: handler for this device
682 *
683 * Do the leg work to assign a device to a given OSM on Linux. The
684 * kernel updates the internal handler data for the device and then
685 * performs an I2O claim for the device, attempting to claim the
686 * device as primary. If the attempt fails a negative errno code
687 * is returned. On success zero is returned.
688 */
689
i2o_claim_device(struct i2o_device * d,struct i2o_handler * h)690 int i2o_claim_device(struct i2o_device *d, struct i2o_handler *h)
691 {
692 down(&i2o_configuration_lock);
693 if (d->owner) {
694 printk(KERN_INFO "Device claim called, but dev already owned by %s!",
695 h->name);
696 up(&i2o_configuration_lock);
697 return -EBUSY;
698 }
699 d->owner=h;
700
701 if(i2o_issue_claim(I2O_CMD_UTIL_CLAIM ,d->controller,d->lct_data.tid,
702 I2O_CLAIM_PRIMARY))
703 {
704 d->owner = NULL;
705 return -EBUSY;
706 }
707 up(&i2o_configuration_lock);
708 return 0;
709 }
710
711 /**
712 * i2o_release_device - release a device that the OSM is using
713 * @d: device to claim
714 * @h: handler for this device
715 *
716 * Drop a claim by an OSM on a given I2O device. The handler is cleared
717 * and 0 is returned on success.
718 *
719 * AC - some devices seem to want to refuse an unclaim until they have
720 * finished internal processing. It makes sense since you don't want a
721 * new device to go reconfiguring the entire system until you are done.
722 * Thus we are prepared to wait briefly.
723 */
724
i2o_release_device(struct i2o_device * d,struct i2o_handler * h)725 int i2o_release_device(struct i2o_device *d, struct i2o_handler *h)
726 {
727 int err = 0;
728 int tries;
729
730 down(&i2o_configuration_lock);
731 if (d->owner != h) {
732 printk(KERN_INFO "Claim release called, but not owned by %s!\n",
733 h->name);
734 up(&i2o_configuration_lock);
735 return -ENOENT;
736 }
737
738 for(tries=0;tries<10;tries++)
739 {
740 d->owner = NULL;
741
742 /*
743 * If the controller takes a nonblocking approach to
744 * releases we have to sleep/poll for a few times.
745 */
746
747 if((err=i2o_issue_claim(I2O_CMD_UTIL_RELEASE, d->controller, d->lct_data.tid, I2O_CLAIM_PRIMARY)) )
748 {
749 err = -ENXIO;
750 current->state = TASK_UNINTERRUPTIBLE;
751 schedule_timeout(HZ);
752 }
753 else
754 {
755 err=0;
756 break;
757 }
758 }
759 up(&i2o_configuration_lock);
760 return err;
761 }
762
763 /**
764 * i2o_device_notify_on - Enable deletion notifiers
765 * @d: device for notification
766 * @h: handler to install
767 *
768 * Called by OSMs to let the core know that they want to be
769 * notified if the given device is deleted from the system.
770 */
771
i2o_device_notify_on(struct i2o_device * d,struct i2o_handler * h)772 int i2o_device_notify_on(struct i2o_device *d, struct i2o_handler *h)
773 {
774 int i;
775
776 if(d->num_managers == I2O_MAX_MANAGERS)
777 return -ENOSPC;
778
779 for(i = 0; i < I2O_MAX_MANAGERS; i++)
780 {
781 if(!d->managers[i])
782 {
783 d->managers[i] = h;
784 break;
785 }
786 }
787
788 d->num_managers++;
789
790 return 0;
791 }
792
793 /**
794 * i2o_device_notify_off - Remove deletion notifiers
795 * @d: device for notification
796 * @h: handler to remove
797 *
798 * Called by OSMs to let the core know that they no longer
799 * are interested in the fate of the given device.
800 */
i2o_device_notify_off(struct i2o_device * d,struct i2o_handler * h)801 int i2o_device_notify_off(struct i2o_device *d, struct i2o_handler *h)
802 {
803 int i;
804
805 for(i=0; i < I2O_MAX_MANAGERS; i++)
806 {
807 if(d->managers[i] == h)
808 {
809 d->managers[i] = NULL;
810 d->num_managers--;
811 return 0;
812 }
813 }
814
815 return -ENOENT;
816 }
817
818 /**
819 * i2o_event_register - register interest in an event
820 * @c: Controller to register interest with
821 * @tid: I2O task id
822 * @init_context: initiator context to use with this notifier
823 * @tr_context: transaction context to use with this notifier
824 * @evt_mask: mask of events
825 *
826 * Create and posts an event registration message to the task. No reply
827 * is waited for, or expected. Errors in posting will be reported.
828 */
829
i2o_event_register(struct i2o_controller * c,u32 tid,u32 init_context,u32 tr_context,u32 evt_mask)830 int i2o_event_register(struct i2o_controller *c, u32 tid,
831 u32 init_context, u32 tr_context, u32 evt_mask)
832 {
833 u32 msg[5]; // Not performance critical, so we just
834 // i2o_post_this it instead of building it
835 // in IOP memory
836
837 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
838 msg[1] = I2O_CMD_UTIL_EVT_REGISTER<<24 | HOST_TID<<12 | tid;
839 msg[2] = init_context;
840 msg[3] = tr_context;
841 msg[4] = evt_mask;
842
843 return i2o_post_this(c, msg, sizeof(msg));
844 }
845
846 /*
847 * i2o_event_ack - acknowledge an event
848 * @c: controller
849 * @msg: pointer to the UTIL_EVENT_REGISTER reply we received
850 *
851 * We just take a pointer to the original UTIL_EVENT_REGISTER reply
852 * message and change the function code since that's what spec
853 * describes an EventAck message looking like.
854 */
855
i2o_event_ack(struct i2o_controller * c,u32 * msg)856 int i2o_event_ack(struct i2o_controller *c, u32 *msg)
857 {
858 struct i2o_message *m = (struct i2o_message *)msg;
859
860 m->function = I2O_CMD_UTIL_EVT_ACK;
861
862 return i2o_post_wait(c, msg, m->size * 4, 2);
863 }
864
865 /*
866 * Core event handler. Runs as a separate thread and is woken
867 * up whenever there is an Executive class event.
868 */
i2o_core_evt(void * reply_data)869 static int i2o_core_evt(void *reply_data)
870 {
871 struct reply_info *reply = (struct reply_info *) reply_data;
872 u32 *msg = reply->msg;
873 struct i2o_controller *c = NULL;
874 unsigned long flags;
875
876 lock_kernel();
877 daemonize();
878 unlock_kernel();
879
880 strcpy(current->comm, "i2oevtd");
881 evt_running = 1;
882
883 while(1)
884 {
885 if(down_interruptible(&evt_sem))
886 {
887 dprintk(KERN_INFO "I2O event thread dead\n");
888 printk("exiting...");
889 evt_running = 0;
890 complete_and_exit(&evt_dead, 0);
891 }
892
893 /*
894 * Copy the data out of the queue so that we don't have to lock
895 * around the whole function and just around the qlen update
896 */
897 spin_lock_irqsave(&i2o_evt_lock, flags);
898 memcpy(reply, &events[evt_out], sizeof(struct reply_info));
899 MODINC(evt_out, I2O_EVT_Q_LEN);
900 evt_q_len--;
901 spin_unlock_irqrestore(&i2o_evt_lock, flags);
902
903 c = reply->iop;
904 dprintk(KERN_INFO "I2O IRTOS EVENT: iop%d, event %#10x\n", c->unit, msg[4]);
905
906 /*
907 * We do not attempt to delete/quiesce/etc. the controller if
908 * some sort of error indidication occurs. We may want to do
909 * so in the future, but for now we just let the user deal with
910 * it. One reason for this is that what to do with an error
911 * or when to send what �rror is not really agreed on, so
912 * we get errors that may not be fatal but just look like they
913 * are...so let the user deal with it.
914 */
915 switch(msg[4])
916 {
917 case I2O_EVT_IND_EXEC_RESOURCE_LIMITS:
918 printk(KERN_ERR "%s: Out of resources\n", c->name);
919 break;
920
921 case I2O_EVT_IND_EXEC_POWER_FAIL:
922 printk(KERN_ERR "%s: Power failure\n", c->name);
923 break;
924
925 case I2O_EVT_IND_EXEC_HW_FAIL:
926 {
927 char *fail[] =
928 {
929 "Unknown Error",
930 "Power Lost",
931 "Code Violation",
932 "Parity Error",
933 "Code Execution Exception",
934 "Watchdog Timer Expired"
935 };
936
937 if(msg[5] <= 6)
938 printk(KERN_ERR "%s: Hardware Failure: %s\n",
939 c->name, fail[msg[5]]);
940 else
941 printk(KERN_ERR "%s: Unknown Hardware Failure\n", c->name);
942
943 break;
944 }
945
946 /*
947 * New device created
948 * - Create a new i2o_device entry
949 * - Inform all interested drivers about this device's existence
950 */
951 case I2O_EVT_IND_EXEC_NEW_LCT_ENTRY:
952 {
953 struct i2o_device *d = (struct i2o_device *)
954 kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
955 int i;
956
957 if (d == NULL) {
958 printk(KERN_EMERG "i2oevtd: out of memory\n");
959 break;
960 }
961 memcpy(&d->lct_data, &msg[5], sizeof(i2o_lct_entry));
962
963 d->next = NULL;
964 d->controller = c;
965 d->flags = 0;
966
967 i2o_report_controller_unit(c, d);
968 i2o_install_device(c,d);
969
970 for(i = 0; i < MAX_I2O_MODULES; i++)
971 {
972 if(i2o_handlers[i] &&
973 i2o_handlers[i]->new_dev_notify &&
974 (i2o_handlers[i]->class&d->lct_data.class_id))
975 {
976 spin_lock(&i2o_dev_lock);
977 i2o_handlers[i]->new_dev_notify(c,d);
978 spin_unlock(&i2o_dev_lock);
979 }
980 }
981
982 break;
983 }
984
985 /*
986 * LCT entry for a device has been modified, so update it
987 * internally.
988 */
989 case I2O_EVT_IND_EXEC_MODIFIED_LCT:
990 {
991 struct i2o_device *d;
992 i2o_lct_entry *new_lct = (i2o_lct_entry *)&msg[5];
993
994 for(d = c->devices; d; d = d->next)
995 {
996 if(d->lct_data.tid == new_lct->tid)
997 {
998 memcpy(&d->lct_data, new_lct, sizeof(i2o_lct_entry));
999 break;
1000 }
1001 }
1002 break;
1003 }
1004
1005 case I2O_EVT_IND_CONFIGURATION_FLAG:
1006 printk(KERN_WARNING "%s requires user configuration\n", c->name);
1007 break;
1008
1009 case I2O_EVT_IND_GENERAL_WARNING:
1010 printk(KERN_WARNING "%s: Warning notification received!"
1011 "Check configuration for errors!\n", c->name);
1012 break;
1013
1014 case I2O_EVT_IND_EVT_MASK_MODIFIED:
1015 /* Well I guess that was us hey .. */
1016 break;
1017
1018 default:
1019 printk(KERN_WARNING "%s: No handler for event (0x%08x)\n", c->name, msg[4]);
1020 break;
1021 }
1022 }
1023
1024 return 0;
1025 }
1026
1027 /*
1028 * Dynamic LCT update. This compares the LCT with the currently
1029 * installed devices to check for device deletions..this needed b/c there
1030 * is no DELETED_LCT_ENTRY EventIndicator for the Executive class so
1031 * we can't just have the event handler do this...annoying
1032 *
1033 * This is a hole in the spec that will hopefully be fixed someday.
1034 */
i2o_dyn_lct(void * foo)1035 static int i2o_dyn_lct(void *foo)
1036 {
1037 struct i2o_controller *c = (struct i2o_controller *)foo;
1038 struct i2o_device *d = NULL;
1039 struct i2o_device *d1 = NULL;
1040 int i = 0;
1041 int found = 0;
1042 int entries;
1043 void *tmp;
1044 char name[16];
1045
1046 lock_kernel();
1047 daemonize();
1048 unlock_kernel();
1049
1050 sprintf(name, "iop%d_lctd", c->unit);
1051 strcpy(current->comm, name);
1052
1053 c->lct_running = 1;
1054
1055 while(1)
1056 {
1057 down_interruptible(&c->lct_sem);
1058 if(signal_pending(current))
1059 {
1060 dprintk(KERN_ERR "%s: LCT thread dead\n", c->name);
1061 c->lct_running = 0;
1062 return 0;
1063 }
1064
1065 entries = c->dlct->table_size;
1066 entries -= 3;
1067 entries /= 9;
1068
1069 dprintk(KERN_INFO "%s: Dynamic LCT Update\n",c->name);
1070 dprintk(KERN_INFO "%s: Dynamic LCT contains %d entries\n", c->name, entries);
1071
1072 if(!entries)
1073 {
1074 printk(KERN_INFO "%s: Empty LCT???\n", c->name);
1075 continue;
1076 }
1077
1078 /*
1079 * Loop through all the devices on the IOP looking for their
1080 * LCT data in the LCT. We assume that TIDs are not repeated.
1081 * as that is the only way to really tell. It's been confirmed
1082 * by the IRTOS vendor(s?) that TIDs are not reused until they
1083 * wrap arround(4096), and I doubt a system will up long enough
1084 * to create/delete that many devices.
1085 */
1086 for(d = c->devices; d; )
1087 {
1088 found = 0;
1089 d1 = d->next;
1090
1091 for(i = 0; i < entries; i++)
1092 {
1093 if(d->lct_data.tid == c->dlct->lct_entry[i].tid)
1094 {
1095 found = 1;
1096 break;
1097 }
1098 }
1099 if(!found)
1100 {
1101 dprintk(KERN_INFO "i2o_core: Deleted device!\n");
1102 spin_lock(&i2o_dev_lock);
1103 i2o_delete_device(d);
1104 spin_unlock(&i2o_dev_lock);
1105 }
1106 d = d1;
1107 }
1108
1109 /*
1110 * Tell LCT to renotify us next time there is a change
1111 */
1112 i2o_lct_notify(c);
1113
1114 /*
1115 * Copy new LCT into public LCT
1116 *
1117 * Possible race if someone is reading LCT while we are copying
1118 * over it. If this happens, we'll fix it then. but I doubt that
1119 * the LCT will get updated often enough or will get read by
1120 * a user often enough to worry.
1121 */
1122 if(c->lct->table_size < c->dlct->table_size)
1123 {
1124 tmp = c->lct;
1125 c->lct = kmalloc(c->dlct->table_size<<2, GFP_KERNEL);
1126 if(!c->lct)
1127 {
1128 printk(KERN_ERR "%s: No memory for LCT!\n", c->name);
1129 c->lct = tmp;
1130 continue;
1131 }
1132 kfree(tmp);
1133 }
1134 memcpy(c->lct, c->dlct, c->dlct->table_size<<2);
1135 }
1136
1137 return 0;
1138 }
1139
1140 /**
1141 * i2o_run_queue - process pending events on a controller
1142 * @c: controller to process
1143 *
1144 * This is called by the bus specific driver layer when an interrupt
1145 * or poll of this card interface is desired.
1146 */
1147
i2o_run_queue(struct i2o_controller * c)1148 void i2o_run_queue(struct i2o_controller *c)
1149 {
1150 struct i2o_message *m;
1151 u32 mv;
1152 u32 *msg;
1153
1154 /*
1155 * Old 960 steppings had a bug in the I2O unit that caused
1156 * the queue to appear empty when it wasn't.
1157 */
1158 if((mv=I2O_REPLY_READ32(c))==0xFFFFFFFF)
1159 mv=I2O_REPLY_READ32(c);
1160
1161 while(mv!=0xFFFFFFFF)
1162 {
1163 struct i2o_handler *i;
1164 /* Map the message from the page frame map to kernel virtual */
1165 /* m=(struct i2o_message *)(mv - (unsigned long)c->page_frame_map + (unsigned long)c->page_frame); */
1166 m=(struct i2o_message *)bus_to_virt(mv);
1167 msg=(u32*)m;
1168
1169 /*
1170 * Ensure this message is seen coherently but cachably by
1171 * the processor
1172 */
1173
1174 pci_dma_sync_single(c->pdev, c->page_frame_map, MSG_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1175
1176 /*
1177 * Despatch it
1178 */
1179
1180 i=i2o_handlers[m->initiator_context&(MAX_I2O_MODULES-1)];
1181 if(i && i->reply)
1182 i->reply(i,c,m);
1183 else
1184 {
1185 printk(KERN_WARNING "I2O: Spurious reply to handler %d\n",
1186 m->initiator_context&(MAX_I2O_MODULES-1));
1187 }
1188 i2o_flush_reply(c,mv);
1189 mb();
1190
1191 /* That 960 bug again... */
1192 if((mv=I2O_REPLY_READ32(c))==0xFFFFFFFF)
1193 mv=I2O_REPLY_READ32(c);
1194 }
1195 }
1196
1197
1198 /**
1199 * i2o_get_class_name - do i2o class name lookup
1200 * @class: class number
1201 *
1202 * Return a descriptive string for an i2o class
1203 */
1204
i2o_get_class_name(int class)1205 const char *i2o_get_class_name(int class)
1206 {
1207 int idx = 16;
1208 static char *i2o_class_name[] = {
1209 "Executive",
1210 "Device Driver Module",
1211 "Block Device",
1212 "Tape Device",
1213 "LAN Interface",
1214 "WAN Interface",
1215 "Fibre Channel Port",
1216 "Fibre Channel Device",
1217 "SCSI Device",
1218 "ATE Port",
1219 "ATE Device",
1220 "Floppy Controller",
1221 "Floppy Device",
1222 "Secondary Bus Port",
1223 "Peer Transport Agent",
1224 "Peer Transport",
1225 "Unknown"
1226 };
1227
1228 switch(class&0xFFF)
1229 {
1230 case I2O_CLASS_EXECUTIVE:
1231 idx = 0; break;
1232 case I2O_CLASS_DDM:
1233 idx = 1; break;
1234 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1235 idx = 2; break;
1236 case I2O_CLASS_SEQUENTIAL_STORAGE:
1237 idx = 3; break;
1238 case I2O_CLASS_LAN:
1239 idx = 4; break;
1240 case I2O_CLASS_WAN:
1241 idx = 5; break;
1242 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1243 idx = 6; break;
1244 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1245 idx = 7; break;
1246 case I2O_CLASS_SCSI_PERIPHERAL:
1247 idx = 8; break;
1248 case I2O_CLASS_ATE_PORT:
1249 idx = 9; break;
1250 case I2O_CLASS_ATE_PERIPHERAL:
1251 idx = 10; break;
1252 case I2O_CLASS_FLOPPY_CONTROLLER:
1253 idx = 11; break;
1254 case I2O_CLASS_FLOPPY_DEVICE:
1255 idx = 12; break;
1256 case I2O_CLASS_BUS_ADAPTER_PORT:
1257 idx = 13; break;
1258 case I2O_CLASS_PEER_TRANSPORT_AGENT:
1259 idx = 14; break;
1260 case I2O_CLASS_PEER_TRANSPORT:
1261 idx = 15; break;
1262 }
1263
1264 return i2o_class_name[idx];
1265 }
1266
1267
1268 /**
1269 * i2o_wait_message - obtain an i2o message from the IOP
1270 * @c: controller
1271 * @why: explanation
1272 *
1273 * This function waits up to 5 seconds for a message slot to be
1274 * available. If no message is available it prints an error message
1275 * that is expected to be what the message will be used for (eg
1276 * "get_status"). 0xFFFFFFFF is returned on a failure.
1277 *
1278 * On a success the message is returned. This is the physical page
1279 * frame offset address from the read port. (See the i2o spec)
1280 */
1281
i2o_wait_message(struct i2o_controller * c,char * why)1282 u32 i2o_wait_message(struct i2o_controller *c, char *why)
1283 {
1284 long time=jiffies;
1285 u32 m;
1286 while((m=I2O_POST_READ32(c))==0xFFFFFFFF)
1287 {
1288 if((jiffies-time)>=5*HZ)
1289 {
1290 dprintk(KERN_ERR "%s: Timeout waiting for message frame to send %s.\n",
1291 c->name, why);
1292 return 0xFFFFFFFF;
1293 }
1294 schedule();
1295 barrier();
1296 }
1297 return m;
1298 }
1299
1300 /**
1301 * i2o_report_controller_unit - print information about a tid
1302 * @c: controller
1303 * @d: device
1304 *
1305 * Dump an information block associated with a given unit (TID). The
1306 * tables are read and a block of text is output to printk that is
1307 * formatted intended for the user.
1308 */
1309
i2o_report_controller_unit(struct i2o_controller * c,struct i2o_device * d)1310 void i2o_report_controller_unit(struct i2o_controller *c, struct i2o_device *d)
1311 {
1312 char buf[64];
1313 char str[22];
1314 int ret;
1315 int unit = d->lct_data.tid;
1316
1317 if(verbose==0)
1318 return;
1319
1320 printk(KERN_INFO "Target ID %d.\n", unit);
1321 if((ret=i2o_query_scalar(c, unit, 0xF100, 3, buf, 16))>=0)
1322 {
1323 buf[16]=0;
1324 printk(KERN_INFO " Vendor: %s\n", buf);
1325 }
1326 if((ret=i2o_query_scalar(c, unit, 0xF100, 4, buf, 16))>=0)
1327 {
1328 buf[16]=0;
1329 printk(KERN_INFO " Device: %s\n", buf);
1330 }
1331 if(i2o_query_scalar(c, unit, 0xF100, 5, buf, 16)>=0)
1332 {
1333 buf[16]=0;
1334 printk(KERN_INFO " Description: %s\n", buf);
1335 }
1336 if((ret=i2o_query_scalar(c, unit, 0xF100, 6, buf, 8))>=0)
1337 {
1338 buf[8]=0;
1339 printk(KERN_INFO " Rev: %s\n", buf);
1340 }
1341
1342 printk(KERN_INFO " Class: ");
1343 sprintf(str, "%-21s", i2o_get_class_name(d->lct_data.class_id));
1344 printk("%s\n", str);
1345
1346 printk(KERN_INFO " Subclass: 0x%04X\n", d->lct_data.sub_class);
1347 printk(KERN_INFO " Flags: ");
1348
1349 if(d->lct_data.device_flags&(1<<0))
1350 printk("C"); // ConfigDialog requested
1351 if(d->lct_data.device_flags&(1<<1))
1352 printk("U"); // Multi-user capable
1353 if(!(d->lct_data.device_flags&(1<<4)))
1354 printk("P"); // Peer service enabled!
1355 if(!(d->lct_data.device_flags&(1<<5)))
1356 printk("M"); // Mgmt service enabled!
1357 printk("\n");
1358
1359 }
1360
1361
1362 /*
1363 * Parse the hardware resource table. Right now we print it out
1364 * and don't do a lot with it. We should collate these and then
1365 * interact with the Linux resource allocation block.
1366 *
1367 * Lets prove we can read it first eh ?
1368 *
1369 * This is full of endianisms!
1370 */
1371
i2o_parse_hrt(struct i2o_controller * c)1372 static int i2o_parse_hrt(struct i2o_controller *c)
1373 {
1374 #ifdef DRIVERDEBUG
1375 u32 *rows=(u32*)c->hrt;
1376 u8 *p=(u8 *)c->hrt;
1377 u8 *d;
1378 int count;
1379 int length;
1380 int i;
1381 int state;
1382
1383 if(p[3]!=0)
1384 {
1385 printk(KERN_ERR "%s: HRT table for controller is too new a version.\n",
1386 c->name);
1387 return -1;
1388 }
1389
1390 count=p[0]|(p[1]<<8);
1391 length = p[2];
1392
1393 printk(KERN_INFO "%s: HRT has %d entries of %d bytes each.\n",
1394 c->name, count, length<<2);
1395
1396 rows+=2;
1397
1398 for(i=0;i<count;i++)
1399 {
1400 printk(KERN_INFO "Adapter %08X: ", rows[0]);
1401 p=(u8 *)(rows+1);
1402 d=(u8 *)(rows+2);
1403 state=p[1]<<8|p[0];
1404
1405 printk("TID %04X:[", state&0xFFF);
1406 state>>=12;
1407 if(state&(1<<0))
1408 printk("H"); /* Hidden */
1409 if(state&(1<<2))
1410 {
1411 printk("P"); /* Present */
1412 if(state&(1<<1))
1413 printk("C"); /* Controlled */
1414 }
1415 if(state>9)
1416 printk("*"); /* Hard */
1417
1418 printk("]:");
1419
1420 switch(p[3]&0xFFFF)
1421 {
1422 case 0:
1423 /* Adapter private bus - easy */
1424 printk("Local bus %d: I/O at 0x%04X Mem 0x%08X",
1425 p[2], d[1]<<8|d[0], *(u32 *)(d+4));
1426 break;
1427 case 1:
1428 /* ISA bus */
1429 printk("ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X",
1430 p[2], d[2], d[1]<<8|d[0], *(u32 *)(d+4));
1431 break;
1432
1433 case 2: /* EISA bus */
1434 printk("EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
1435 p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4));
1436 break;
1437
1438 case 3: /* MCA bus */
1439 printk("MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
1440 p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4));
1441 break;
1442
1443 case 4: /* PCI bus */
1444 printk("PCI %d: Bus %d Device %d Function %d",
1445 p[2], d[2], d[1], d[0]);
1446 break;
1447
1448 case 0x80: /* Other */
1449 default:
1450 printk("Unsupported bus type.");
1451 break;
1452 }
1453 printk("\n");
1454 rows+=length;
1455 }
1456 #endif
1457 return 0;
1458 }
1459
1460 /*
1461 * The logical configuration table tells us what we can talk to
1462 * on the board. Most of the stuff isn't interesting to us.
1463 */
1464
i2o_parse_lct(struct i2o_controller * c)1465 static int i2o_parse_lct(struct i2o_controller *c)
1466 {
1467 int i;
1468 int max;
1469 int tid;
1470 struct i2o_device *d;
1471 i2o_lct *lct = c->lct;
1472
1473 if (lct == NULL) {
1474 printk(KERN_ERR "%s: LCT is empty???\n", c->name);
1475 return -1;
1476 }
1477
1478 max = lct->table_size;
1479 max -= 3;
1480 max /= 9;
1481
1482 printk(KERN_INFO "%s: LCT has %d entries.\n", c->name, max);
1483
1484 if(lct->iop_flags&(1<<0))
1485 printk(KERN_WARNING "%s: Configuration dialog desired.\n", c->name);
1486
1487 for(i=0;i<max;i++)
1488 {
1489 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1490 if(d==NULL)
1491 {
1492 printk(KERN_CRIT "i2o_core: Out of memory for I2O device data.\n");
1493 return -ENOMEM;
1494 }
1495
1496 d->controller = c;
1497 d->next = NULL;
1498
1499 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1500
1501 d->flags = 0;
1502 tid = d->lct_data.tid;
1503
1504 i2o_report_controller_unit(c, d);
1505
1506 i2o_install_device(c, d);
1507 }
1508 return 0;
1509 }
1510
1511
1512 /**
1513 * i2o_quiesce_controller - quiesce controller
1514 * @c: controller
1515 *
1516 * Quiesce an IOP. Causes IOP to make external operation quiescent
1517 * (i2o 'READY' state). Internal operation of the IOP continues normally.
1518 */
1519
i2o_quiesce_controller(struct i2o_controller * c)1520 int i2o_quiesce_controller(struct i2o_controller *c)
1521 {
1522 u32 msg[4];
1523 int ret;
1524
1525 i2o_status_get(c);
1526
1527 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
1528
1529 if ((c->status_block->iop_state != ADAPTER_STATE_READY) &&
1530 (c->status_block->iop_state != ADAPTER_STATE_OPERATIONAL))
1531 {
1532 return 0;
1533 }
1534
1535 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1536 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
1537 msg[3] = 0;
1538
1539 /* Long timeout needed for quiesce if lots of devices */
1540
1541 if ((ret = i2o_post_wait(c, msg, sizeof(msg), 240)))
1542 printk(KERN_INFO "%s: Unable to quiesce (status=%#x).\n",
1543 c->name, -ret);
1544 else
1545 dprintk(KERN_INFO "%s: Quiesced.\n", c->name);
1546
1547 i2o_status_get(c); // Entered READY state
1548 return ret;
1549 }
1550
1551 /**
1552 * i2o_enable_controller - move controller from ready to operational
1553 * @c: controller
1554 *
1555 * Enable IOP. This allows the IOP to resume external operations and
1556 * reverses the effect of a quiesce. In the event of an error a negative
1557 * errno code is returned.
1558 */
1559
i2o_enable_controller(struct i2o_controller * c)1560 int i2o_enable_controller(struct i2o_controller *c)
1561 {
1562 u32 msg[4];
1563 int ret;
1564
1565 i2o_status_get(c);
1566
1567 /* Enable only allowed on READY state */
1568 if(c->status_block->iop_state != ADAPTER_STATE_READY)
1569 return -EINVAL;
1570
1571 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1572 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
1573
1574 /* How long of a timeout do we need? */
1575
1576 if ((ret = i2o_post_wait(c, msg, sizeof(msg), 240)))
1577 printk(KERN_ERR "%s: Could not enable (status=%#x).\n",
1578 c->name, -ret);
1579 else
1580 dprintk(KERN_INFO "%s: Enabled.\n", c->name);
1581
1582 i2o_status_get(c); // entered OPERATIONAL state
1583
1584 return ret;
1585 }
1586
1587 /**
1588 * i2o_clear_controller - clear a controller
1589 * @c: controller
1590 *
1591 * Clear an IOP to HOLD state, ie. terminate external operations, clear all
1592 * input queues and prepare for a system restart. IOP's internal operation
1593 * continues normally and the outbound queue is alive.
1594 * The IOP is not expected to rebuild its LCT.
1595 */
1596
i2o_clear_controller(struct i2o_controller * c)1597 int i2o_clear_controller(struct i2o_controller *c)
1598 {
1599 struct i2o_controller *iop;
1600 u32 msg[4];
1601 int ret;
1602
1603 /* Quiesce all IOPs first */
1604
1605 for (iop = i2o_controller_chain; iop; iop = iop->next)
1606 i2o_quiesce_controller(iop);
1607
1608 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1609 msg[1]=I2O_CMD_ADAPTER_CLEAR<<24|HOST_TID<<12|ADAPTER_TID;
1610 msg[3]=0;
1611
1612 if ((ret=i2o_post_wait(c, msg, sizeof(msg), 30)))
1613 printk(KERN_INFO "%s: Unable to clear (status=%#x).\n",
1614 c->name, -ret);
1615 else
1616 dprintk(KERN_INFO "%s: Cleared.\n",c->name);
1617
1618 i2o_status_get(c);
1619
1620 /* Enable other IOPs */
1621
1622 for (iop = i2o_controller_chain; iop; iop = iop->next)
1623 if (iop != c)
1624 i2o_enable_controller(iop);
1625
1626 return ret;
1627 }
1628
1629
1630 /**
1631 * i2o_reset_controller - reset an IOP
1632 * @c: controller to reset
1633 *
1634 * Reset the IOP into INIT state and wait until IOP gets into RESET state.
1635 * Terminate all external operations, clear IOP's inbound and outbound
1636 * queues, terminate all DDMs, and reload the IOP's operating environment
1637 * and all local DDMs. The IOP rebuilds its LCT.
1638 */
1639
i2o_reset_controller(struct i2o_controller * c)1640 static int i2o_reset_controller(struct i2o_controller *c)
1641 {
1642 struct i2o_controller *iop;
1643 u32 m;
1644 u8 *status;
1645 u32 *msg;
1646 long time;
1647
1648 /* Quiesce all IOPs first */
1649
1650 for (iop = i2o_controller_chain; iop; iop = iop->next)
1651 {
1652 if(iop->type != I2O_TYPE_PCI || !iop->bus.pci.dpt)
1653 i2o_quiesce_controller(iop);
1654 }
1655
1656 m=i2o_wait_message(c, "AdapterReset");
1657 if(m==0xFFFFFFFF)
1658 return -ETIMEDOUT;
1659 msg=(u32 *)(c->mem_offset+m);
1660
1661 status=(void *)kmalloc(4, GFP_KERNEL);
1662 if(status==NULL) {
1663 printk(KERN_ERR "IOP reset failed - no free memory.\n");
1664 return -ENOMEM;
1665 }
1666 memset(status, 0, 4);
1667
1668 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1669 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1670 msg[2]=core_context;
1671 msg[3]=0;
1672 msg[4]=0;
1673 msg[5]=0;
1674 msg[6]=virt_to_bus(status);
1675 msg[7]=0; /* 64bit host FIXME */
1676
1677 i2o_post_message(c,m);
1678
1679 /* Wait for a reply */
1680 time=jiffies;
1681 while(*status==0)
1682 {
1683 if((jiffies-time)>=20*HZ)
1684 {
1685 printk(KERN_ERR "IOP reset timeout.\n");
1686 // Better to leak this for safety: kfree(status);
1687 return -ETIMEDOUT;
1688 }
1689 schedule();
1690 barrier();
1691 }
1692
1693 if (*status==I2O_CMD_IN_PROGRESS)
1694 {
1695 /*
1696 * Once the reset is sent, the IOP goes into the INIT state
1697 * which is indeterminate. We need to wait until the IOP
1698 * has rebooted before we can let the system talk to
1699 * it. We read the inbound Free_List until a message is
1700 * available. If we can't read one in the given ammount of
1701 * time, we assume the IOP could not reboot properly.
1702 */
1703
1704 dprintk(KERN_INFO "%s: Reset in progress, waiting for reboot...\n",
1705 c->name);
1706
1707 time = jiffies;
1708 m = I2O_POST_READ32(c);
1709 while(m == 0XFFFFFFFF)
1710 {
1711 if((jiffies-time) >= 30*HZ)
1712 {
1713 printk(KERN_ERR "%s: Timeout waiting for IOP reset.\n",
1714 c->name);
1715 return -ETIMEDOUT;
1716 }
1717 schedule();
1718 barrier();
1719 m = I2O_POST_READ32(c);
1720 }
1721 i2o_flush_reply(c,m);
1722 }
1723
1724 /* If IopReset was rejected or didn't perform reset, try IopClear */
1725
1726 i2o_status_get(c);
1727 if (status[0] == I2O_CMD_REJECTED ||
1728 c->status_block->iop_state != ADAPTER_STATE_RESET)
1729 {
1730 printk(KERN_WARNING "%s: Reset rejected, trying to clear\n",c->name);
1731 i2o_clear_controller(c);
1732 }
1733 else
1734 dprintk(KERN_INFO "%s: Reset completed.\n", c->name);
1735
1736 /* Enable other IOPs */
1737
1738 for (iop = i2o_controller_chain; iop; iop = iop->next)
1739 if (iop != c)
1740 i2o_enable_controller(iop);
1741
1742 kfree(status);
1743 return 0;
1744 }
1745
1746
1747 /**
1748 * i2o_status_get - get the status block for the IOP
1749 * @c: controller
1750 *
1751 * Issue a status query on the controller. This updates the
1752 * attached status_block. If the controller fails to reply or an
1753 * error occurs then a negative errno code is returned. On success
1754 * zero is returned and the status_blok is updated.
1755 */
1756
i2o_status_get(struct i2o_controller * c)1757 int i2o_status_get(struct i2o_controller *c)
1758 {
1759 long time;
1760 u32 m;
1761 u32 *msg;
1762 u8 *status_block;
1763
1764 if (c->status_block == NULL)
1765 {
1766 c->status_block = (i2o_status_block *)
1767 kmalloc(sizeof(i2o_status_block),GFP_KERNEL);
1768 if (c->status_block == NULL)
1769 {
1770 printk(KERN_CRIT "%s: Get Status Block failed; Out of memory.\n",
1771 c->name);
1772 return -ENOMEM;
1773 }
1774 }
1775
1776 status_block = (u8*)c->status_block;
1777 memset(c->status_block,0,sizeof(i2o_status_block));
1778
1779 m=i2o_wait_message(c, "StatusGet");
1780 if(m==0xFFFFFFFF)
1781 return -ETIMEDOUT;
1782 msg=(u32 *)(c->mem_offset+m);
1783
1784 msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_0;
1785 msg[1]=I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID;
1786 msg[2]=core_context;
1787 msg[3]=0;
1788 msg[4]=0;
1789 msg[5]=0;
1790 msg[6]=virt_to_bus(c->status_block);
1791 msg[7]=0; /* 64bit host FIXME */
1792 msg[8]=sizeof(i2o_status_block); /* always 88 bytes */
1793
1794 i2o_post_message(c,m);
1795
1796 /* Wait for a reply */
1797
1798 time=jiffies;
1799 while(status_block[87]!=0xFF)
1800 {
1801 if((jiffies-time)>=5*HZ)
1802 {
1803 printk(KERN_ERR "%s: Get status timeout.\n",c->name);
1804 return -ETIMEDOUT;
1805 }
1806 schedule();
1807 barrier();
1808 }
1809
1810 #ifdef DRIVERDEBUG
1811 printk(KERN_INFO "%s: State = ", c->name);
1812 switch (c->status_block->iop_state) {
1813 case 0x01:
1814 printk("INIT\n");
1815 break;
1816 case 0x02:
1817 printk("RESET\n");
1818 break;
1819 case 0x04:
1820 printk("HOLD\n");
1821 break;
1822 case 0x05:
1823 printk("READY\n");
1824 break;
1825 case 0x08:
1826 printk("OPERATIONAL\n");
1827 break;
1828 case 0x10:
1829 printk("FAILED\n");
1830 break;
1831 case 0x11:
1832 printk("FAULTED\n");
1833 break;
1834 default:
1835 printk("%x (unknown !!)\n",c->status_block->iop_state);
1836 }
1837 #endif
1838
1839 return 0;
1840 }
1841
1842 /*
1843 * Get the Hardware Resource Table for the device.
1844 * The HRT contains information about possible hidden devices
1845 * but is mostly useless to us
1846 */
i2o_hrt_get(struct i2o_controller * c)1847 int i2o_hrt_get(struct i2o_controller *c)
1848 {
1849 u32 msg[6];
1850 int ret, size = sizeof(i2o_hrt);
1851
1852 /* First read just the header to figure out the real size */
1853
1854 do {
1855 if (c->hrt == NULL) {
1856 c->hrt=kmalloc(size, GFP_KERNEL);
1857 if (c->hrt == NULL) {
1858 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", c->name);
1859 return -ENOMEM;
1860 }
1861 }
1862
1863 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
1864 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
1865 msg[3]= 0;
1866 msg[4]= (0xD0000000 | size); /* Simple transaction */
1867 msg[5]= virt_to_bus(c->hrt); /* Dump it here */
1868
1869 ret = i2o_post_wait_mem(c, msg, sizeof(msg), 20, c->hrt, NULL);
1870
1871 if(ret == -ETIMEDOUT)
1872 {
1873 /* The HRT block we used is in limbo somewhere. When the iop wakes up
1874 we will recover it */
1875 c->hrt = NULL;
1876 return ret;
1877 }
1878
1879 if(ret<0)
1880 {
1881 printk(KERN_ERR "%s: Unable to get HRT (status=%#x)\n",
1882 c->name, -ret);
1883 return ret;
1884 }
1885
1886 if (c->hrt->num_entries * c->hrt->entry_len << 2 > size) {
1887 size = c->hrt->num_entries * c->hrt->entry_len << 2;
1888 kfree(c->hrt);
1889 c->hrt = NULL;
1890 }
1891 } while (c->hrt == NULL);
1892
1893 i2o_parse_hrt(c); // just for debugging
1894
1895 return 0;
1896 }
1897
1898 /*
1899 * Send the I2O System Table to the specified IOP
1900 *
1901 * The system table contains information about all the IOPs in the
1902 * system. It is build and then sent to each IOP so that IOPs can
1903 * establish connections between each other.
1904 *
1905 */
i2o_systab_send(struct i2o_controller * iop)1906 static int i2o_systab_send(struct i2o_controller *iop)
1907 {
1908 u32 msg[12];
1909 int ret;
1910 u32 *privbuf = kmalloc(16, GFP_KERNEL);
1911 if(privbuf == NULL)
1912 return -ENOMEM;
1913
1914 if(iop->type == I2O_TYPE_PCI)
1915 {
1916 struct resource *root;
1917
1918 if(iop->status_block->current_mem_size < iop->status_block->desired_mem_size)
1919 {
1920 struct resource *res = &iop->mem_resource;
1921 res->name = iop->pdev->bus->name;
1922 res->flags = IORESOURCE_MEM;
1923 res->start = 0;
1924 res->end = 0;
1925 printk("%s: requires private memory resources.\n", iop->name);
1926 root = pci_find_parent_resource(iop->pdev, res);
1927 if(root==NULL)
1928 printk("Can't find parent resource!\n");
1929 if(root && allocate_resource(root, res,
1930 iop->status_block->desired_mem_size,
1931 iop->status_block->desired_mem_size,
1932 iop->status_block->desired_mem_size,
1933 1<<20, /* Unspecified, so use 1Mb and play safe */
1934 NULL,
1935 NULL)>=0)
1936 {
1937 iop->mem_alloc = 1;
1938 iop->status_block->current_mem_size = 1 + res->end - res->start;
1939 iop->status_block->current_mem_base = res->start;
1940 printk(KERN_INFO "%s: allocated %ld bytes of PCI memory at 0x%08lX.\n",
1941 iop->name, 1+res->end-res->start, res->start);
1942 }
1943 }
1944 if(iop->status_block->current_io_size < iop->status_block->desired_io_size)
1945 {
1946 struct resource *res = &iop->io_resource;
1947 res->name = iop->pdev->bus->name;
1948 res->flags = IORESOURCE_IO;
1949 res->start = 0;
1950 res->end = 0;
1951 printk("%s: requires private memory resources.\n", iop->name);
1952 root = pci_find_parent_resource(iop->pdev, res);
1953 if(root==NULL)
1954 printk("Can't find parent resource!\n");
1955 if(root && allocate_resource(root, res,
1956 iop->status_block->desired_io_size,
1957 iop->status_block->desired_io_size,
1958 iop->status_block->desired_io_size,
1959 1<<20, /* Unspecified, so use 1Mb and play safe */
1960 NULL,
1961 NULL)>=0)
1962 {
1963 iop->io_alloc = 1;
1964 iop->status_block->current_io_size = 1 + res->end - res->start;
1965 iop->status_block->current_mem_base = res->start;
1966 printk(KERN_INFO "%s: allocated %ld bytes of PCI I/O at 0x%08lX.\n",
1967 iop->name, 1+res->end-res->start, res->start);
1968 }
1969 }
1970 }
1971 else
1972 {
1973 privbuf[0] = iop->status_block->current_mem_base;
1974 privbuf[1] = iop->status_block->current_mem_size;
1975 privbuf[2] = iop->status_block->current_io_base;
1976 privbuf[3] = iop->status_block->current_io_size;
1977 }
1978
1979 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
1980 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
1981 msg[3] = 0;
1982 msg[4] = (0<<16) | ((iop->unit+2) ); /* Host 0 IOP ID (unit + 2) */
1983 msg[5] = 0; /* Segment 0 */
1984
1985 /*
1986 * Provide three SGL-elements:
1987 * System table (SysTab), Private memory space declaration and
1988 * Private i/o space declaration
1989 *
1990 * FIXME: provide these for controllers needing them
1991 */
1992 msg[6] = 0x54000000 | sys_tbl_len;
1993 msg[7] = virt_to_bus(sys_tbl);
1994 msg[8] = 0x54000000 | privbuf[1];
1995 msg[9] = privbuf[0];
1996 msg[10] = 0xD4000000 | privbuf[3];
1997 msg[11] = privbuf[2];
1998
1999 ret=i2o_post_wait_mem(iop, msg, sizeof(msg), 120, privbuf, NULL);
2000
2001 if(ret==-ETIMEDOUT)
2002 {
2003 printk(KERN_ERR "%s: SysTab setup timed out.\n", iop->name);
2004 }
2005 else if(ret<0)
2006 {
2007 printk(KERN_ERR "%s: Unable to set SysTab (status=%#x).\n",
2008 iop->name, -ret);
2009 kfree(privbuf);
2010 }
2011 else
2012 {
2013 dprintk(KERN_INFO "%s: SysTab set.\n", iop->name);
2014 kfree(privbuf);
2015 }
2016 i2o_status_get(iop); // Entered READY state
2017
2018 return ret;
2019
2020 }
2021
2022 /*
2023 * Initialize I2O subsystem.
2024 */
i2o_sys_init(void)2025 void __init i2o_sys_init(void)
2026 {
2027 struct i2o_controller *iop, *niop = NULL;
2028
2029 printk(KERN_INFO "Activating I2O controllers...\n");
2030 printk(KERN_INFO "This may take a few minutes if there are many devices\n");
2031
2032 /* In INIT state, Activate IOPs */
2033 for (iop = i2o_controller_chain; iop; iop = niop) {
2034 dprintk(KERN_INFO "Calling i2o_activate_controller for %s...\n",
2035 iop->name);
2036 niop = iop->next;
2037 if (i2o_activate_controller(iop) < 0)
2038 i2o_delete_controller(iop);
2039 }
2040
2041 /* Active IOPs in HOLD state */
2042
2043 rebuild_sys_tab:
2044 if (i2o_controller_chain == NULL)
2045 return;
2046
2047 /*
2048 * If build_sys_table fails, we kill everything and bail
2049 * as we can't init the IOPs w/o a system table
2050 */
2051 dprintk(KERN_INFO "i2o_core: Calling i2o_build_sys_table...\n");
2052 if (i2o_build_sys_table() < 0) {
2053 i2o_sys_shutdown();
2054 return;
2055 }
2056
2057 /* If IOP don't get online, we need to rebuild the System table */
2058 for (iop = i2o_controller_chain; iop; iop = niop) {
2059 niop = iop->next;
2060 dprintk(KERN_INFO "Calling i2o_online_controller for %s...\n", iop->name);
2061 if (i2o_online_controller(iop) < 0) {
2062 i2o_delete_controller(iop);
2063 goto rebuild_sys_tab;
2064 }
2065 }
2066
2067 /* Active IOPs now in OPERATIONAL state */
2068
2069 /*
2070 * Register for status updates from all IOPs
2071 */
2072 for(iop = i2o_controller_chain; iop; iop=iop->next) {
2073
2074 /* Create a kernel thread to deal with dynamic LCT updates */
2075 iop->lct_pid = kernel_thread(i2o_dyn_lct, iop, CLONE_SIGHAND);
2076
2077 /* Update change ind on DLCT */
2078 iop->dlct->change_ind = iop->lct->change_ind;
2079
2080 /* Start dynamic LCT updates */
2081 i2o_lct_notify(iop);
2082
2083 /* Register for all events from IRTOS */
2084 i2o_event_register(iop, core_context, 0, 0, 0xFFFFFFFF);
2085 }
2086 }
2087
2088 /**
2089 * i2o_sys_shutdown - shutdown I2O system
2090 *
2091 * Bring down each i2o controller and then return. Each controller
2092 * is taken through an orderly shutdown
2093 */
2094
i2o_sys_shutdown(void)2095 static void i2o_sys_shutdown(void)
2096 {
2097 struct i2o_controller *iop, *niop;
2098
2099 /* Delete all IOPs from the controller chain */
2100 /* that will reset all IOPs too */
2101
2102 for (iop = i2o_controller_chain; iop; iop = niop) {
2103 niop = iop->next;
2104 i2o_delete_controller(iop);
2105 }
2106 }
2107
2108 /**
2109 * i2o_activate_controller - bring controller up to HOLD
2110 * @iop: controller
2111 *
2112 * This function brings an I2O controller into HOLD state. The adapter
2113 * is reset if neccessary and then the queues and resource table
2114 * are read. -1 is returned on a failure, 0 on success.
2115 *
2116 */
2117
i2o_activate_controller(struct i2o_controller * iop)2118 int i2o_activate_controller(struct i2o_controller *iop)
2119 {
2120 /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */
2121 /* In READY state, Get status */
2122
2123 if (i2o_status_get(iop) < 0) {
2124 printk(KERN_INFO "Unable to obtain status of %s, "
2125 "attempting a reset.\n", iop->name);
2126 if (i2o_reset_controller(iop) < 0)
2127 return -1;
2128 }
2129
2130 if(iop->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2131 printk(KERN_CRIT "%s: hardware fault\n", iop->name);
2132 return -1;
2133 }
2134
2135 if (iop->status_block->i2o_version > I2OVER15) {
2136 printk(KERN_ERR "%s: Not running vrs. 1.5. of the I2O Specification.\n",
2137 iop->name);
2138 return -1;
2139 }
2140
2141 if (iop->status_block->iop_state == ADAPTER_STATE_READY ||
2142 iop->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2143 iop->status_block->iop_state == ADAPTER_STATE_HOLD ||
2144 iop->status_block->iop_state == ADAPTER_STATE_FAILED)
2145 {
2146 dprintk(KERN_INFO "%s: Already running, trying to reset...\n",
2147 iop->name);
2148 if (i2o_reset_controller(iop) < 0)
2149 return -1;
2150 }
2151
2152 if (i2o_init_outbound_q(iop) < 0)
2153 return -1;
2154
2155 if (i2o_post_outbound_messages(iop))
2156 return -1;
2157
2158 /* In HOLD state */
2159
2160 if (i2o_hrt_get(iop) < 0)
2161 return -1;
2162
2163 return 0;
2164 }
2165
2166
2167 /**
2168 * i2o_init_outbound_queue - setup the outbound queue
2169 * @c: controller
2170 *
2171 * Clear and (re)initialize IOP's outbound queue. Returns 0 on
2172 * success or a negative errno code on a failure.
2173 */
2174
i2o_init_outbound_q(struct i2o_controller * c)2175 int i2o_init_outbound_q(struct i2o_controller *c)
2176 {
2177 u8 *status;
2178 u32 m;
2179 u32 *msg;
2180 u32 time;
2181
2182 dprintk(KERN_INFO "%s: Initializing Outbound Queue...\n", c->name);
2183 m=i2o_wait_message(c, "OutboundInit");
2184 if(m==0xFFFFFFFF)
2185 return -ETIMEDOUT;
2186 msg=(u32 *)(c->mem_offset+m);
2187
2188 status = kmalloc(4,GFP_KERNEL);
2189 if (status==NULL) {
2190 printk(KERN_ERR "%s: Outbound Queue initialization failed - no free memory.\n",
2191 c->name);
2192 return -ENOMEM;
2193 }
2194 memset(status, 0, 4);
2195
2196 msg[0]= EIGHT_WORD_MSG_SIZE| TRL_OFFSET_6;
2197 msg[1]= I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID;
2198 msg[2]= core_context;
2199 msg[3]= 0x0106; /* Transaction context */
2200 msg[4]= 4096; /* Host page frame size */
2201 /* Frame size is in words. 256 bytes a frame for now */
2202 msg[5]= MSG_FRAME_SIZE<<16|0x80; /* Outbound msg frame size in words and Initcode */
2203 msg[6]= 0xD0000004; /* Simple SG LE, EOB */
2204 msg[7]= virt_to_bus(status);
2205
2206 i2o_post_message(c,m);
2207
2208 barrier();
2209 time=jiffies;
2210 while(status[0] < I2O_CMD_REJECTED)
2211 {
2212 if((jiffies-time)>=30*HZ)
2213 {
2214 if(status[0]==0x00)
2215 printk(KERN_ERR "%s: Ignored queue initialize request.\n",
2216 c->name);
2217 else
2218 printk(KERN_ERR "%s: Outbound queue initialize timeout.\n",
2219 c->name);
2220 kfree(status);
2221 return -ETIMEDOUT;
2222 }
2223 schedule();
2224 barrier();
2225 }
2226
2227 if(status[0] != I2O_CMD_COMPLETED)
2228 {
2229 printk(KERN_ERR "%s: IOP outbound initialise failed.\n", c->name);
2230 kfree(status);
2231 return -ETIMEDOUT;
2232 }
2233
2234 kfree(status);
2235 return 0;
2236 }
2237
2238 /**
2239 * i2o_post_outbound_messages - fill message queue
2240 * @c: controller
2241 *
2242 * Allocate a message frame and load the messages into the IOP. The
2243 * function returns zero on success or a negative errno code on
2244 * failure.
2245 */
2246
i2o_post_outbound_messages(struct i2o_controller * c)2247 int i2o_post_outbound_messages(struct i2o_controller *c)
2248 {
2249 int i;
2250 u32 m;
2251 /* Alloc space for IOP's outbound queue message frames */
2252
2253 c->page_frame = kmalloc(MSG_POOL_SIZE, GFP_KERNEL);
2254 if(c->page_frame==NULL) {
2255 printk(KERN_ERR "%s: Outbound Q initialize failed; out of memory.\n",
2256 c->name);
2257 return -ENOMEM;
2258 }
2259
2260 c->page_frame_map = pci_map_single(c->pdev, c->page_frame, MSG_POOL_SIZE, PCI_DMA_FROMDEVICE);
2261
2262 if(c->page_frame_map == 0)
2263 {
2264 kfree(c->page_frame);
2265 printk(KERN_ERR "%s: Unable to map outbound queue.\n", c->name);
2266 return -ENOMEM;
2267 }
2268
2269 m = c->page_frame_map;
2270
2271 /* Post frames */
2272
2273 for(i=0; i< NMBR_MSG_FRAMES; i++) {
2274 I2O_REPLY_WRITE32(c,m);
2275 mb();
2276 m += (MSG_FRAME_SIZE << 2);
2277 }
2278
2279 return 0;
2280 }
2281
2282 /*
2283 * Get the IOP's Logical Configuration Table
2284 */
i2o_lct_get(struct i2o_controller * c)2285 int i2o_lct_get(struct i2o_controller *c)
2286 {
2287 u32 msg[8];
2288 int ret, size = c->status_block->expected_lct_size;
2289
2290 do {
2291 if (c->lct == NULL) {
2292 c->lct = kmalloc(size, GFP_KERNEL);
2293 if(c->lct == NULL) {
2294 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2295 c->name);
2296 return -ENOMEM;
2297 }
2298 }
2299 memset(c->lct, 0, size);
2300
2301 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2302 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2303 /* msg[2] filled in i2o_post_wait */
2304 msg[3] = 0;
2305 msg[4] = 0xFFFFFFFF; /* All devices */
2306 msg[5] = 0x00000000; /* Report now */
2307 msg[6] = 0xD0000000|size;
2308 msg[7] = virt_to_bus(c->lct);
2309
2310 ret=i2o_post_wait_mem(c, msg, sizeof(msg), 120, c->lct, NULL);
2311
2312 if(ret == -ETIMEDOUT)
2313 {
2314 c->lct = NULL;
2315 return ret;
2316 }
2317
2318 if(ret<0)
2319 {
2320 printk(KERN_ERR "%s: LCT Get failed (status=%#x.\n",
2321 c->name, -ret);
2322 return ret;
2323 }
2324
2325 if (c->lct->table_size << 2 > size) {
2326 size = c->lct->table_size << 2;
2327 kfree(c->lct);
2328 c->lct = NULL;
2329 }
2330 } while (c->lct == NULL);
2331
2332 if ((ret=i2o_parse_lct(c)) < 0)
2333 return ret;
2334
2335 return 0;
2336 }
2337
2338 /*
2339 * Like above, but used for async notification. The main
2340 * difference is that we keep track of the CurrentChangeIndiicator
2341 * so that we only get updates when it actually changes.
2342 *
2343 */
i2o_lct_notify(struct i2o_controller * c)2344 int i2o_lct_notify(struct i2o_controller *c)
2345 {
2346 u32 msg[8];
2347
2348 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2349 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2350 msg[2] = core_context;
2351 msg[3] = 0xDEADBEEF;
2352 msg[4] = 0xFFFFFFFF; /* All devices */
2353 msg[5] = c->dlct->change_ind+1; /* Next change */
2354 msg[6] = 0xD0000000|8192;
2355 msg[7] = virt_to_bus(c->dlct);
2356
2357 return i2o_post_this(c, msg, sizeof(msg));
2358 }
2359
2360 /*
2361 * Bring a controller online into OPERATIONAL state.
2362 */
2363
i2o_online_controller(struct i2o_controller * iop)2364 int i2o_online_controller(struct i2o_controller *iop)
2365 {
2366 u32 v;
2367
2368 if (i2o_systab_send(iop) < 0)
2369 return -1;
2370
2371 /* In READY state */
2372
2373 dprintk(KERN_INFO "%s: Attempting to enable...\n", iop->name);
2374 if (i2o_enable_controller(iop) < 0)
2375 return -1;
2376
2377 /* In OPERATIONAL state */
2378
2379 dprintk(KERN_INFO "%s: Attempting to get/parse lct...\n", iop->name);
2380 if (i2o_lct_get(iop) < 0)
2381 return -1;
2382
2383 /* Check battery status */
2384
2385 iop->battery = 0;
2386 if(i2o_query_scalar(iop, ADAPTER_TID, 0x0000, 4, &v, 4)>=0)
2387 {
2388 if(v&16)
2389 iop->battery = 1;
2390 }
2391
2392 return 0;
2393 }
2394
2395 /*
2396 * Build system table
2397 *
2398 * The system table contains information about all the IOPs in the
2399 * system (duh) and is used by the Executives on the IOPs to establish
2400 * peer2peer connections. We're not supporting peer2peer at the moment,
2401 * but this will be needed down the road for things like lan2lan forwarding.
2402 */
i2o_build_sys_table(void)2403 static int i2o_build_sys_table(void)
2404 {
2405 struct i2o_controller *iop = NULL;
2406 struct i2o_controller *niop = NULL;
2407 int count = 0;
2408
2409 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2410 (i2o_num_controllers) *
2411 sizeof(struct i2o_sys_tbl_entry);
2412
2413 if(sys_tbl)
2414 kfree(sys_tbl);
2415
2416 sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL);
2417 if(!sys_tbl) {
2418 printk(KERN_CRIT "SysTab Set failed. Out of memory.\n");
2419 return -ENOMEM;
2420 }
2421 memset((void*)sys_tbl, 0, sys_tbl_len);
2422
2423 sys_tbl->num_entries = i2o_num_controllers;
2424 sys_tbl->version = I2OVERSION; /* TODO: Version 2.0 */
2425 sys_tbl->change_ind = sys_tbl_ind++;
2426
2427 for(iop = i2o_controller_chain; iop; iop = niop)
2428 {
2429 niop = iop->next;
2430
2431 /*
2432 * Get updated IOP state so we have the latest information
2433 *
2434 * We should delete the controller at this point if it
2435 * doesn't respond since if it's not on the system table
2436 * it is techninically not part of the I2O subsy�tem...
2437 */
2438 if(i2o_status_get(iop)) {
2439 printk(KERN_ERR "%s: Deleting b/c could not get status while"
2440 "attempting to build system table\n", iop->name);
2441 i2o_delete_controller(iop);
2442 sys_tbl->num_entries--;
2443 continue; // try the next one
2444 }
2445
2446 sys_tbl->iops[count].org_id = iop->status_block->org_id;
2447 sys_tbl->iops[count].iop_id = iop->unit + 2;
2448 sys_tbl->iops[count].seg_num = 0;
2449 sys_tbl->iops[count].i2o_version =
2450 iop->status_block->i2o_version;
2451 sys_tbl->iops[count].iop_state =
2452 iop->status_block->iop_state;
2453 sys_tbl->iops[count].msg_type =
2454 iop->status_block->msg_type;
2455 sys_tbl->iops[count].frame_size =
2456 iop->status_block->inbound_frame_size;
2457 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2458 sys_tbl->iops[count].iop_capabilities =
2459 iop->status_block->iop_capabilities;
2460 sys_tbl->iops[count].inbound_low = iop->post_port;
2461 sys_tbl->iops[count].inbound_high = 0; // FIXME: 64-bit support
2462
2463 count++;
2464 }
2465
2466 #ifdef DRIVERDEBUG
2467 {
2468 u32 *table;
2469 table = (u32*)sys_tbl;
2470 for(count = 0; count < (sys_tbl_len >>2); count++)
2471 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n", count, table[count]);
2472 }
2473 #endif
2474
2475 return 0;
2476 }
2477
2478
2479 /*
2480 * Run time support routines
2481 */
2482
2483 /*
2484 * Generic "post and forget" helpers. This is less efficient - we do
2485 * a memcpy for example that isnt strictly needed, but for most uses
2486 * this is simply not worth optimising
2487 */
2488
i2o_post_this(struct i2o_controller * c,u32 * data,int len)2489 int i2o_post_this(struct i2o_controller *c, u32 *data, int len)
2490 {
2491 u32 m;
2492 u32 *msg;
2493 unsigned long t=jiffies;
2494
2495 do
2496 {
2497 mb();
2498 m = I2O_POST_READ32(c);
2499 }
2500 while(m==0xFFFFFFFF && (jiffies-t)<HZ);
2501
2502 if(m==0xFFFFFFFF)
2503 {
2504 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",
2505 c->name);
2506 return -ETIMEDOUT;
2507 }
2508 msg = (u32 *)(c->mem_offset + m);
2509 memcpy_toio(msg, data, len);
2510 i2o_post_message(c,m);
2511 return 0;
2512 }
2513
2514 /**
2515 * i2o_post_wait_mem - I2O query/reply with DMA buffers
2516 * @c: controller
2517 * @msg: message to send
2518 * @len: length of message
2519 * @timeout: time in seconds to wait
2520 * @mem1: attached memory buffer 1
2521 * @mem2: attached memory buffer 2
2522 *
2523 * This core API allows an OSM to post a message and then be told whether
2524 * or not the system received a successful reply.
2525 *
2526 * If the message times out then the value '-ETIMEDOUT' is returned. This
2527 * is a special case. In this situation the message may (should) complete
2528 * at an indefinite time in the future. When it completes it will use the
2529 * memory buffers attached to the request. If -ETIMEDOUT is returned then
2530 * the memory buffers must not be freed. Instead the event completion will
2531 * free them for you. In all other cases the buffers are your problem.
2532 *
2533 * Pass NULL for unneeded buffers.
2534 */
2535
i2o_post_wait_mem(struct i2o_controller * c,u32 * msg,int len,int timeout,void * mem1,void * mem2)2536 int i2o_post_wait_mem(struct i2o_controller *c, u32 *msg, int len, int timeout, void *mem1, void *mem2)
2537 {
2538 DECLARE_WAIT_QUEUE_HEAD(wq_i2o_post);
2539 DECLARE_WAITQUEUE(wait, current);
2540 int complete = 0;
2541 int status;
2542 unsigned long flags = 0;
2543 struct i2o_post_wait_data *wait_data =
2544 kmalloc(sizeof(struct i2o_post_wait_data), GFP_KERNEL);
2545
2546 if(!wait_data)
2547 return -ENOMEM;
2548
2549 /*
2550 * Create a new notification object
2551 */
2552 wait_data->status = &status;
2553 wait_data->complete = &complete;
2554 wait_data->mem[0] = mem1;
2555 wait_data->mem[1] = mem2;
2556 /*
2557 * Queue the event with its unique id
2558 */
2559 spin_lock_irqsave(&post_wait_lock, flags);
2560
2561 wait_data->next = post_wait_queue;
2562 post_wait_queue = wait_data;
2563 wait_data->id = (++post_wait_id) & 0x7fff;
2564 wait_data->wq = &wq_i2o_post;
2565
2566 spin_unlock_irqrestore(&post_wait_lock, flags);
2567
2568 /*
2569 * Fill in the message id
2570 */
2571
2572 msg[2] = 0x80000000|(u32)core_context|((u32)wait_data->id<<16);
2573
2574 /*
2575 * Post the message to the controller. At some point later it
2576 * will return. If we time out before it returns then
2577 * complete will be zero. From the point post_this returns
2578 * the wait_data may have been deleted.
2579 */
2580
2581 add_wait_queue(&wq_i2o_post, &wait);
2582 set_current_state(TASK_INTERRUPTIBLE);
2583 if ((status = i2o_post_this(c, msg, len))==0) {
2584 schedule_timeout(HZ * timeout);
2585 }
2586 else
2587 {
2588 remove_wait_queue(&wq_i2o_post, &wait);
2589 return -EIO;
2590 }
2591 remove_wait_queue(&wq_i2o_post, &wait);
2592
2593 if(signal_pending(current))
2594 status = -EINTR;
2595
2596 spin_lock_irqsave(&post_wait_lock, flags);
2597 barrier(); /* Be sure we see complete as it is locked */
2598 if(!complete)
2599 {
2600 /*
2601 * Mark the entry dead. We cannot remove it. This is important.
2602 * When it does terminate (which it must do if the controller hasnt
2603 * died..) then it will otherwise scribble on stuff.
2604 * !complete lets us safely check if the entry is still
2605 * allocated and thus we can write into it
2606 */
2607 wait_data->wq = NULL;
2608 status = -ETIMEDOUT;
2609 }
2610 else
2611 {
2612 /* Debugging check - remove me soon */
2613 if(status == -ETIMEDOUT)
2614 {
2615 printk("TIMEDOUT BUG!\n");
2616 status = -EIO;
2617 }
2618 }
2619 /* And the wait_data is not leaked either! */
2620 spin_unlock_irqrestore(&post_wait_lock, flags);
2621 return status;
2622 }
2623
2624 /**
2625 * i2o_post_wait - I2O query/reply
2626 * @c: controller
2627 * @msg: message to send
2628 * @len: length of message
2629 * @timeout: time in seconds to wait
2630 *
2631 * This core API allows an OSM to post a message and then be told whether
2632 * or not the system received a successful reply.
2633 */
2634
i2o_post_wait(struct i2o_controller * c,u32 * msg,int len,int timeout)2635 int i2o_post_wait(struct i2o_controller *c, u32 *msg, int len, int timeout)
2636 {
2637 return i2o_post_wait_mem(c, msg, len, timeout, NULL, NULL);
2638 }
2639
2640 /*
2641 * i2o_post_wait is completed and we want to wake up the
2642 * sleeping proccess. Called by core's reply handler.
2643 */
2644
i2o_post_wait_complete(u32 context,int status)2645 static void i2o_post_wait_complete(u32 context, int status)
2646 {
2647 struct i2o_post_wait_data **p1, *q;
2648 unsigned long flags;
2649
2650 /*
2651 * We need to search through the post_wait
2652 * queue to see if the given message is still
2653 * outstanding. If not, it means that the IOP
2654 * took longer to respond to the message than we
2655 * had allowed and timer has already expired.
2656 * Not much we can do about that except log
2657 * it for debug purposes, increase timeout, and recompile
2658 *
2659 * Lock needed to keep anyone from moving queue pointers
2660 * around while we're looking through them.
2661 */
2662
2663 spin_lock_irqsave(&post_wait_lock, flags);
2664
2665 for(p1 = &post_wait_queue; *p1!=NULL; p1 = &((*p1)->next))
2666 {
2667 q = (*p1);
2668 if(q->id == ((context >> 16) & 0x7fff)) {
2669 /*
2670 * Delete it
2671 */
2672
2673 *p1 = q->next;
2674
2675 /*
2676 * Live or dead ?
2677 */
2678
2679 if(q->wq)
2680 {
2681 /* Live entry - wakeup and set status */
2682 *q->status = status;
2683 *q->complete = 1;
2684 wake_up(q->wq);
2685 }
2686 else
2687 {
2688 /*
2689 * Free resources. Caller is dead
2690 */
2691 if(q->mem[0])
2692 kfree(q->mem[0]);
2693 if(q->mem[1])
2694 kfree(q->mem[1]);
2695 printk(KERN_WARNING "i2o_post_wait event completed after timeout.\n");
2696 }
2697 kfree(q);
2698 spin_unlock(&post_wait_lock);
2699 return;
2700 }
2701 }
2702 spin_unlock(&post_wait_lock);
2703
2704 printk(KERN_DEBUG "i2o_post_wait: Bogus reply!\n");
2705 }
2706
2707 /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
2708 *
2709 * This function can be used for all UtilParamsGet/Set operations.
2710 * The OperationList is given in oplist-buffer,
2711 * and results are returned in reslist-buffer.
2712 * Note that the minimum sized reslist is 8 bytes and contains
2713 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
2714 */
i2o_issue_params(int cmd,struct i2o_controller * iop,int tid,void * oplist,int oplen,void * reslist,int reslen)2715 int i2o_issue_params(int cmd, struct i2o_controller *iop, int tid,
2716 void *oplist, int oplen, void *reslist, int reslen)
2717 {
2718 u32 msg[9];
2719 u32 *res32 = (u32*)reslist;
2720 u32 *restmp = (u32*)reslist;
2721 int len = 0;
2722 int i = 0;
2723 int wait_status;
2724 u32 *opmem, *resmem;
2725
2726 /* Get DMAable memory */
2727 opmem = kmalloc(oplen, GFP_KERNEL);
2728 if(opmem == NULL)
2729 return -ENOMEM;
2730 memcpy(opmem, oplist, oplen);
2731
2732 resmem = kmalloc(reslen, GFP_KERNEL);
2733 if(resmem == NULL)
2734 {
2735 kfree(opmem);
2736 return -ENOMEM;
2737 }
2738
2739 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
2740 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
2741 msg[3] = 0;
2742 msg[4] = 0;
2743 msg[5] = 0x54000000 | oplen; /* OperationList */
2744 msg[6] = virt_to_bus(opmem);
2745 msg[7] = 0xD0000000 | reslen; /* ResultList */
2746 msg[8] = virt_to_bus(resmem);
2747
2748 wait_status = i2o_post_wait_mem(iop, msg, sizeof(msg), 10, opmem, resmem);
2749
2750 /*
2751 * This only looks like a memory leak - don't "fix" it.
2752 */
2753 if(wait_status == -ETIMEDOUT)
2754 return wait_status;
2755
2756 /* Query failed */
2757 if(wait_status != 0)
2758 {
2759 kfree(resmem);
2760 kfree(opmem);
2761 return wait_status;
2762 }
2763
2764 memcpy(reslist, resmem, reslen);
2765 /*
2766 * Calculate number of bytes of Result LIST
2767 * We need to loop through each Result BLOCK and grab the length
2768 */
2769 restmp = res32 + 1;
2770 len = 1;
2771 for(i = 0; i < (res32[0]&0X0000FFFF); i++)
2772 {
2773 if(restmp[0]&0x00FF0000) /* BlockStatus != SUCCESS */
2774 {
2775 printk(KERN_WARNING "%s - Error:\n ErrorInfoSize = 0x%02x, "
2776 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
2777 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
2778 : "PARAMS_GET",
2779 res32[1]>>24, (res32[1]>>16)&0xFF, res32[1]&0xFFFF);
2780
2781 /*
2782 * If this is the only request,than we return an error
2783 */
2784 if((res32[0]&0x0000FFFF) == 1)
2785 {
2786 return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */
2787 }
2788 }
2789 len += restmp[0] & 0x0000FFFF; /* Length of res BLOCK */
2790 restmp += restmp[0] & 0x0000FFFF; /* Skip to next BLOCK */
2791 }
2792 return (len << 2); /* bytes used by result list */
2793 }
2794
2795 /*
2796 * Query one scalar group value or a whole scalar group.
2797 */
i2o_query_scalar(struct i2o_controller * iop,int tid,int group,int field,void * buf,int buflen)2798 int i2o_query_scalar(struct i2o_controller *iop, int tid,
2799 int group, int field, void *buf, int buflen)
2800 {
2801 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
2802 u8 resblk[8+buflen]; /* 8 bytes for header */
2803 int size;
2804
2805 if (field == -1) /* whole group */
2806 opblk[4] = -1;
2807
2808 size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, iop, tid,
2809 opblk, sizeof(opblk), resblk, sizeof(resblk));
2810
2811 memcpy(buf, resblk+8, buflen); /* cut off header */
2812
2813 if(size>buflen)
2814 return buflen;
2815 return size;
2816 }
2817
2818 /*
2819 * Set a scalar group value or a whole group.
2820 */
i2o_set_scalar(struct i2o_controller * iop,int tid,int group,int field,void * buf,int buflen)2821 int i2o_set_scalar(struct i2o_controller *iop, int tid,
2822 int group, int field, void *buf, int buflen)
2823 {
2824 u16 *opblk;
2825 u8 resblk[8+buflen]; /* 8 bytes for header */
2826 int size;
2827
2828 opblk = kmalloc(buflen+64, GFP_KERNEL);
2829 if (opblk == NULL)
2830 {
2831 printk(KERN_ERR "i2o: no memory for operation buffer.\n");
2832 return -ENOMEM;
2833 }
2834
2835 opblk[0] = 1; /* operation count */
2836 opblk[1] = 0; /* pad */
2837 opblk[2] = I2O_PARAMS_FIELD_SET;
2838 opblk[3] = group;
2839
2840 if(field == -1) { /* whole group */
2841 opblk[4] = -1;
2842 memcpy(opblk+5, buf, buflen);
2843 }
2844 else /* single field */
2845 {
2846 opblk[4] = 1;
2847 opblk[5] = field;
2848 memcpy(opblk+6, buf, buflen);
2849 }
2850
2851 size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid,
2852 opblk, 12+buflen, resblk, sizeof(resblk));
2853
2854 kfree(opblk);
2855 if(size>buflen)
2856 return buflen;
2857 return size;
2858 }
2859
2860 /*
2861 * if oper == I2O_PARAMS_TABLE_GET, get from all rows
2862 * if fieldcount == -1 return all fields
2863 * ibuf and ibuflen are unused (use NULL, 0)
2864 * else return specific fields
2865 * ibuf contains fieldindexes
2866 *
2867 * if oper == I2O_PARAMS_LIST_GET, get from specific rows
2868 * if fieldcount == -1 return all fields
2869 * ibuf contains rowcount, keyvalues
2870 * else return specific fields
2871 * fieldcount is # of fieldindexes
2872 * ibuf contains fieldindexes, rowcount, keyvalues
2873 *
2874 * You could also use directly function i2o_issue_params().
2875 */
i2o_query_table(int oper,struct i2o_controller * iop,int tid,int group,int fieldcount,void * ibuf,int ibuflen,void * resblk,int reslen)2876 int i2o_query_table(int oper, struct i2o_controller *iop, int tid, int group,
2877 int fieldcount, void *ibuf, int ibuflen,
2878 void *resblk, int reslen)
2879 {
2880 u16 *opblk;
2881 int size;
2882
2883 opblk = kmalloc(10 + ibuflen, GFP_KERNEL);
2884 if (opblk == NULL)
2885 {
2886 printk(KERN_ERR "i2o: no memory for query buffer.\n");
2887 return -ENOMEM;
2888 }
2889
2890 opblk[0] = 1; /* operation count */
2891 opblk[1] = 0; /* pad */
2892 opblk[2] = oper;
2893 opblk[3] = group;
2894 opblk[4] = fieldcount;
2895 memcpy(opblk+5, ibuf, ibuflen); /* other params */
2896
2897 size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET,iop, tid,
2898 opblk, 10+ibuflen, resblk, reslen);
2899
2900 kfree(opblk);
2901 if(size>reslen)
2902 return reslen;
2903 return size;
2904 }
2905
2906 /*
2907 * Clear table group, i.e. delete all rows.
2908 */
i2o_clear_table(struct i2o_controller * iop,int tid,int group)2909 int i2o_clear_table(struct i2o_controller *iop, int tid, int group)
2910 {
2911 u16 opblk[] = { 1, 0, I2O_PARAMS_TABLE_CLEAR, group };
2912 u8 resblk[32]; /* min 8 bytes for result header */
2913
2914 return i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid,
2915 opblk, sizeof(opblk), resblk, sizeof(resblk));
2916 }
2917
2918 /*
2919 * Add a new row into a table group.
2920 *
2921 * if fieldcount==-1 then we add whole rows
2922 * buf contains rowcount, keyvalues
2923 * else just specific fields are given, rest use defaults
2924 * buf contains fieldindexes, rowcount, keyvalues
2925 */
i2o_row_add_table(struct i2o_controller * iop,int tid,int group,int fieldcount,void * buf,int buflen)2926 int i2o_row_add_table(struct i2o_controller *iop, int tid,
2927 int group, int fieldcount, void *buf, int buflen)
2928 {
2929 u16 *opblk;
2930 u8 resblk[32]; /* min 8 bytes for header */
2931 int size;
2932
2933 opblk = kmalloc(buflen+64, GFP_KERNEL);
2934 if (opblk == NULL)
2935 {
2936 printk(KERN_ERR "i2o: no memory for operation buffer.\n");
2937 return -ENOMEM;
2938 }
2939
2940 opblk[0] = 1; /* operation count */
2941 opblk[1] = 0; /* pad */
2942 opblk[2] = I2O_PARAMS_ROW_ADD;
2943 opblk[3] = group;
2944 opblk[4] = fieldcount;
2945 memcpy(opblk+5, buf, buflen);
2946
2947 size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid,
2948 opblk, 10+buflen, resblk, sizeof(resblk));
2949
2950 kfree(opblk);
2951 if(size>buflen)
2952 return buflen;
2953 return size;
2954 }
2955
2956
2957 /*
2958 * Used for error reporting/debugging purposes.
2959 * Following fail status are common to all classes.
2960 * The preserved message must be handled in the reply handler.
2961 */
i2o_report_fail_status(u8 req_status,u32 * msg)2962 void i2o_report_fail_status(u8 req_status, u32* msg)
2963 {
2964 static char *FAIL_STATUS[] = {
2965 "0x80", /* not used */
2966 "SERVICE_SUSPENDED", /* 0x81 */
2967 "SERVICE_TERMINATED", /* 0x82 */
2968 "CONGESTION",
2969 "FAILURE",
2970 "STATE_ERROR",
2971 "TIME_OUT",
2972 "ROUTING_FAILURE",
2973 "INVALID_VERSION",
2974 "INVALID_OFFSET",
2975 "INVALID_MSG_FLAGS",
2976 "FRAME_TOO_SMALL",
2977 "FRAME_TOO_LARGE",
2978 "INVALID_TARGET_ID",
2979 "INVALID_INITIATOR_ID",
2980 "INVALID_INITIATOR_CONTEX", /* 0x8F */
2981 "UNKNOWN_FAILURE" /* 0xFF */
2982 };
2983
2984 if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
2985 printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x)\n.", req_status);
2986 else
2987 printk("TRANSPORT_%s.\n", FAIL_STATUS[req_status & 0x0F]);
2988
2989 /* Dump some details */
2990
2991 printk(KERN_ERR " InitiatorId = %d, TargetId = %d\n",
2992 (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF);
2993 printk(KERN_ERR " LowestVersion = 0x%02X, HighestVersion = 0x%02X\n",
2994 (msg[4] >> 8) & 0xFF, msg[4] & 0xFF);
2995 printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n",
2996 msg[5] >> 16, msg[5] & 0xFFF);
2997
2998 printk(KERN_ERR " Severity: 0x%02X ", (msg[4] >> 16) & 0xFF);
2999 if (msg[4] & (1<<16))
3000 printk("(FormatError), "
3001 "this msg can never be delivered/processed.\n");
3002 if (msg[4] & (1<<17))
3003 printk("(PathError), "
3004 "this msg can no longer be delivered/processed.\n");
3005 if (msg[4] & (1<<18))
3006 printk("(PathState), "
3007 "the system state does not allow delivery.\n");
3008 if (msg[4] & (1<<19))
3009 printk("(Congestion), resources temporarily not available;"
3010 "do not retry immediately.\n");
3011 }
3012
3013 /*
3014 * Used for error reporting/debugging purposes.
3015 * Following reply status are common to all classes.
3016 */
i2o_report_common_status(u8 req_status)3017 void i2o_report_common_status(u8 req_status)
3018 {
3019 static char *REPLY_STATUS[] = {
3020 "SUCCESS",
3021 "ABORT_DIRTY",
3022 "ABORT_NO_DATA_TRANSFER",
3023 "ABORT_PARTIAL_TRANSFER",
3024 "ERROR_DIRTY",
3025 "ERROR_NO_DATA_TRANSFER",
3026 "ERROR_PARTIAL_TRANSFER",
3027 "PROCESS_ABORT_DIRTY",
3028 "PROCESS_ABORT_NO_DATA_TRANSFER",
3029 "PROCESS_ABORT_PARTIAL_TRANSFER",
3030 "TRANSACTION_ERROR",
3031 "PROGRESS_REPORT"
3032 };
3033
3034 if (req_status > I2O_REPLY_STATUS_PROGRESS_REPORT)
3035 printk("RequestStatus = %0#2x", req_status);
3036 else
3037 printk("%s", REPLY_STATUS[req_status]);
3038 }
3039
3040 /*
3041 * Used for error reporting/debugging purposes.
3042 * Following detailed status are valid for executive class,
3043 * utility class, DDM class and for transaction error replies.
3044 */
i2o_report_common_dsc(u16 detailed_status)3045 static void i2o_report_common_dsc(u16 detailed_status)
3046 {
3047 static char *COMMON_DSC[] = {
3048 "SUCCESS",
3049 "0x01", // not used
3050 "BAD_KEY",
3051 "TCL_ERROR",
3052 "REPLY_BUFFER_FULL",
3053 "NO_SUCH_PAGE",
3054 "INSUFFICIENT_RESOURCE_SOFT",
3055 "INSUFFICIENT_RESOURCE_HARD",
3056 "0x08", // not used
3057 "CHAIN_BUFFER_TOO_LARGE",
3058 "UNSUPPORTED_FUNCTION",
3059 "DEVICE_LOCKED",
3060 "DEVICE_RESET",
3061 "INAPPROPRIATE_FUNCTION",
3062 "INVALID_INITIATOR_ADDRESS",
3063 "INVALID_MESSAGE_FLAGS",
3064 "INVALID_OFFSET",
3065 "INVALID_PARAMETER",
3066 "INVALID_REQUEST",
3067 "INVALID_TARGET_ADDRESS",
3068 "MESSAGE_TOO_LARGE",
3069 "MESSAGE_TOO_SMALL",
3070 "MISSING_PARAMETER",
3071 "TIMEOUT",
3072 "UNKNOWN_ERROR",
3073 "UNKNOWN_FUNCTION",
3074 "UNSUPPORTED_VERSION",
3075 "DEVICE_BUSY",
3076 "DEVICE_NOT_AVAILABLE"
3077 };
3078
3079 if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
3080 printk(" / DetailedStatus = %0#4x.\n", detailed_status);
3081 else
3082 printk(" / %s.\n", COMMON_DSC[detailed_status]);
3083 }
3084
3085 /*
3086 * Used for error reporting/debugging purposes
3087 */
i2o_report_lan_dsc(u16 detailed_status)3088 static void i2o_report_lan_dsc(u16 detailed_status)
3089 {
3090 static char *LAN_DSC[] = { // Lan detailed status code strings
3091 "SUCCESS",
3092 "DEVICE_FAILURE",
3093 "DESTINATION_NOT_FOUND",
3094 "TRANSMIT_ERROR",
3095 "TRANSMIT_ABORTED",
3096 "RECEIVE_ERROR",
3097 "RECEIVE_ABORTED",
3098 "DMA_ERROR",
3099 "BAD_PACKET_DETECTED",
3100 "OUT_OF_MEMORY",
3101 "BUCKET_OVERRUN",
3102 "IOP_INTERNAL_ERROR",
3103 "CANCELED",
3104 "INVALID_TRANSACTION_CONTEXT",
3105 "DEST_ADDRESS_DETECTED",
3106 "DEST_ADDRESS_OMITTED",
3107 "PARTIAL_PACKET_RETURNED",
3108 "TEMP_SUSPENDED_STATE", // last Lan detailed status code
3109 "INVALID_REQUEST" // general detailed status code
3110 };
3111
3112 if (detailed_status > I2O_DSC_INVALID_REQUEST)
3113 printk(" / %0#4x.\n", detailed_status);
3114 else
3115 printk(" / %s.\n", LAN_DSC[detailed_status]);
3116 }
3117
3118 /*
3119 * Used for error reporting/debugging purposes
3120 */
i2o_report_util_cmd(u8 cmd)3121 static void i2o_report_util_cmd(u8 cmd)
3122 {
3123 switch (cmd) {
3124 case I2O_CMD_UTIL_NOP:
3125 printk("UTIL_NOP, ");
3126 break;
3127 case I2O_CMD_UTIL_ABORT:
3128 printk("UTIL_ABORT, ");
3129 break;
3130 case I2O_CMD_UTIL_CLAIM:
3131 printk("UTIL_CLAIM, ");
3132 break;
3133 case I2O_CMD_UTIL_RELEASE:
3134 printk("UTIL_CLAIM_RELEASE, ");
3135 break;
3136 case I2O_CMD_UTIL_CONFIG_DIALOG:
3137 printk("UTIL_CONFIG_DIALOG, ");
3138 break;
3139 case I2O_CMD_UTIL_DEVICE_RESERVE:
3140 printk("UTIL_DEVICE_RESERVE, ");
3141 break;
3142 case I2O_CMD_UTIL_DEVICE_RELEASE:
3143 printk("UTIL_DEVICE_RELEASE, ");
3144 break;
3145 case I2O_CMD_UTIL_EVT_ACK:
3146 printk("UTIL_EVENT_ACKNOWLEDGE, ");
3147 break;
3148 case I2O_CMD_UTIL_EVT_REGISTER:
3149 printk("UTIL_EVENT_REGISTER, ");
3150 break;
3151 case I2O_CMD_UTIL_LOCK:
3152 printk("UTIL_LOCK, ");
3153 break;
3154 case I2O_CMD_UTIL_LOCK_RELEASE:
3155 printk("UTIL_LOCK_RELEASE, ");
3156 break;
3157 case I2O_CMD_UTIL_PARAMS_GET:
3158 printk("UTIL_PARAMS_GET, ");
3159 break;
3160 case I2O_CMD_UTIL_PARAMS_SET:
3161 printk("UTIL_PARAMS_SET, ");
3162 break;
3163 case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
3164 printk("UTIL_REPLY_FAULT_NOTIFY, ");
3165 break;
3166 default:
3167 printk("Cmd = %0#2x, ",cmd);
3168 }
3169 }
3170
3171 /*
3172 * Used for error reporting/debugging purposes
3173 */
i2o_report_exec_cmd(u8 cmd)3174 static void i2o_report_exec_cmd(u8 cmd)
3175 {
3176 switch (cmd) {
3177 case I2O_CMD_ADAPTER_ASSIGN:
3178 printk("EXEC_ADAPTER_ASSIGN, ");
3179 break;
3180 case I2O_CMD_ADAPTER_READ:
3181 printk("EXEC_ADAPTER_READ, ");
3182 break;
3183 case I2O_CMD_ADAPTER_RELEASE:
3184 printk("EXEC_ADAPTER_RELEASE, ");
3185 break;
3186 case I2O_CMD_BIOS_INFO_SET:
3187 printk("EXEC_BIOS_INFO_SET, ");
3188 break;
3189 case I2O_CMD_BOOT_DEVICE_SET:
3190 printk("EXEC_BOOT_DEVICE_SET, ");
3191 break;
3192 case I2O_CMD_CONFIG_VALIDATE:
3193 printk("EXEC_CONFIG_VALIDATE, ");
3194 break;
3195 case I2O_CMD_CONN_SETUP:
3196 printk("EXEC_CONN_SETUP, ");
3197 break;
3198 case I2O_CMD_DDM_DESTROY:
3199 printk("EXEC_DDM_DESTROY, ");
3200 break;
3201 case I2O_CMD_DDM_ENABLE:
3202 printk("EXEC_DDM_ENABLE, ");
3203 break;
3204 case I2O_CMD_DDM_QUIESCE:
3205 printk("EXEC_DDM_QUIESCE, ");
3206 break;
3207 case I2O_CMD_DDM_RESET:
3208 printk("EXEC_DDM_RESET, ");
3209 break;
3210 case I2O_CMD_DDM_SUSPEND:
3211 printk("EXEC_DDM_SUSPEND, ");
3212 break;
3213 case I2O_CMD_DEVICE_ASSIGN:
3214 printk("EXEC_DEVICE_ASSIGN, ");
3215 break;
3216 case I2O_CMD_DEVICE_RELEASE:
3217 printk("EXEC_DEVICE_RELEASE, ");
3218 break;
3219 case I2O_CMD_HRT_GET:
3220 printk("EXEC_HRT_GET, ");
3221 break;
3222 case I2O_CMD_ADAPTER_CLEAR:
3223 printk("EXEC_IOP_CLEAR, ");
3224 break;
3225 case I2O_CMD_ADAPTER_CONNECT:
3226 printk("EXEC_IOP_CONNECT, ");
3227 break;
3228 case I2O_CMD_ADAPTER_RESET:
3229 printk("EXEC_IOP_RESET, ");
3230 break;
3231 case I2O_CMD_LCT_NOTIFY:
3232 printk("EXEC_LCT_NOTIFY, ");
3233 break;
3234 case I2O_CMD_OUTBOUND_INIT:
3235 printk("EXEC_OUTBOUND_INIT, ");
3236 break;
3237 case I2O_CMD_PATH_ENABLE:
3238 printk("EXEC_PATH_ENABLE, ");
3239 break;
3240 case I2O_CMD_PATH_QUIESCE:
3241 printk("EXEC_PATH_QUIESCE, ");
3242 break;
3243 case I2O_CMD_PATH_RESET:
3244 printk("EXEC_PATH_RESET, ");
3245 break;
3246 case I2O_CMD_STATIC_MF_CREATE:
3247 printk("EXEC_STATIC_MF_CREATE, ");
3248 break;
3249 case I2O_CMD_STATIC_MF_RELEASE:
3250 printk("EXEC_STATIC_MF_RELEASE, ");
3251 break;
3252 case I2O_CMD_STATUS_GET:
3253 printk("EXEC_STATUS_GET, ");
3254 break;
3255 case I2O_CMD_SW_DOWNLOAD:
3256 printk("EXEC_SW_DOWNLOAD, ");
3257 break;
3258 case I2O_CMD_SW_UPLOAD:
3259 printk("EXEC_SW_UPLOAD, ");
3260 break;
3261 case I2O_CMD_SW_REMOVE:
3262 printk("EXEC_SW_REMOVE, ");
3263 break;
3264 case I2O_CMD_SYS_ENABLE:
3265 printk("EXEC_SYS_ENABLE, ");
3266 break;
3267 case I2O_CMD_SYS_MODIFY:
3268 printk("EXEC_SYS_MODIFY, ");
3269 break;
3270 case I2O_CMD_SYS_QUIESCE:
3271 printk("EXEC_SYS_QUIESCE, ");
3272 break;
3273 case I2O_CMD_SYS_TAB_SET:
3274 printk("EXEC_SYS_TAB_SET, ");
3275 break;
3276 default:
3277 printk("Cmd = %#02x, ",cmd);
3278 }
3279 }
3280
3281 /*
3282 * Used for error reporting/debugging purposes
3283 */
i2o_report_lan_cmd(u8 cmd)3284 static void i2o_report_lan_cmd(u8 cmd)
3285 {
3286 switch (cmd) {
3287 case LAN_PACKET_SEND:
3288 printk("LAN_PACKET_SEND, ");
3289 break;
3290 case LAN_SDU_SEND:
3291 printk("LAN_SDU_SEND, ");
3292 break;
3293 case LAN_RECEIVE_POST:
3294 printk("LAN_RECEIVE_POST, ");
3295 break;
3296 case LAN_RESET:
3297 printk("LAN_RESET, ");
3298 break;
3299 case LAN_SUSPEND:
3300 printk("LAN_SUSPEND, ");
3301 break;
3302 default:
3303 printk("Cmd = %0#2x, ",cmd);
3304 }
3305 }
3306
3307 /*
3308 * Used for error reporting/debugging purposes.
3309 * Report Cmd name, Request status, Detailed Status.
3310 */
i2o_report_status(const char * severity,const char * str,u32 * msg)3311 void i2o_report_status(const char *severity, const char *str, u32 *msg)
3312 {
3313 u8 cmd = (msg[1]>>24)&0xFF;
3314 u8 req_status = (msg[4]>>24)&0xFF;
3315 u16 detailed_status = msg[4]&0xFFFF;
3316 struct i2o_handler *h = i2o_handlers[msg[2] & (MAX_I2O_MODULES-1)];
3317
3318 printk("%s%s: ", severity, str);
3319
3320 if (cmd < 0x1F) // Utility cmd
3321 i2o_report_util_cmd(cmd);
3322
3323 else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd
3324 i2o_report_exec_cmd(cmd);
3325
3326 else if (h->class == I2O_CLASS_LAN && cmd >= 0x30 && cmd <= 0x3F)
3327 i2o_report_lan_cmd(cmd); // LAN cmd
3328 else
3329 printk("Cmd = %0#2x, ", cmd); // Other cmds
3330
3331 if (msg[0] & MSG_FAIL) {
3332 i2o_report_fail_status(req_status, msg);
3333 return;
3334 }
3335
3336 i2o_report_common_status(req_status);
3337
3338 if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF))
3339 i2o_report_common_dsc(detailed_status);
3340 else if (h->class == I2O_CLASS_LAN && cmd >= 0x30 && cmd <= 0x3F)
3341 i2o_report_lan_dsc(detailed_status);
3342 else
3343 printk(" / DetailedStatus = %0#4x.\n", detailed_status);
3344 }
3345
3346 /* Used to dump a message to syslog during debugging */
i2o_dump_message(u32 * msg)3347 void i2o_dump_message(u32 *msg)
3348 {
3349 #ifdef DRIVERDEBUG
3350 int i;
3351 printk(KERN_INFO "Dumping I2O message size %d @ %p\n",
3352 msg[0]>>16&0xffff, msg);
3353 for(i = 0; i < ((msg[0]>>16)&0xffff); i++)
3354 printk(KERN_INFO " msg[%d] = %0#10x\n", i, msg[i]);
3355 #endif
3356 }
3357
3358 /*
3359 * I2O reboot/shutdown notification.
3360 *
3361 * - Call each OSM's reboot notifier (if one exists)
3362 * - Quiesce each IOP in the system
3363 *
3364 * Each IOP has to be quiesced before we can ensure that the system
3365 * can be properly shutdown as a transaction that has already been
3366 * acknowledged still needs to be placed in permanent store on the IOP.
3367 * The SysQuiesce causes the IOP to force all HDMs to complete their
3368 * transactions before returning, so only at that point is it safe
3369 *
3370 */
i2o_reboot_event(struct notifier_block * n,unsigned long code,void * p)3371 static int i2o_reboot_event(struct notifier_block *n, unsigned long code, void
3372 *p)
3373 {
3374 int i = 0;
3375 struct i2o_controller *c = NULL;
3376
3377 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
3378 return NOTIFY_DONE;
3379
3380 printk(KERN_INFO "Shutting down I2O system.\n");
3381 printk(KERN_INFO
3382 " This could take a few minutes if there are many devices attached\n");
3383
3384 for(i = 0; i < MAX_I2O_MODULES; i++)
3385 {
3386 if(i2o_handlers[i] && i2o_handlers[i]->reboot_notify)
3387 i2o_handlers[i]->reboot_notify();
3388 }
3389
3390 for(c = i2o_controller_chain; c; c = c->next)
3391 {
3392 if(i2o_quiesce_controller(c))
3393 {
3394 printk(KERN_WARNING "i2o: Could not quiesce %s.\n"
3395 "Verify setup on next system power up.\n",
3396 c->name);
3397 }
3398 }
3399
3400 printk(KERN_INFO "I2O system down.\n");
3401 return NOTIFY_DONE;
3402 }
3403
3404
3405 EXPORT_SYMBOL(i2o_controller_chain);
3406 EXPORT_SYMBOL(i2o_num_controllers);
3407 EXPORT_SYMBOL(i2o_find_controller);
3408 EXPORT_SYMBOL(i2o_unlock_controller);
3409 EXPORT_SYMBOL(i2o_status_get);
3410
3411 EXPORT_SYMBOL(i2o_install_handler);
3412 EXPORT_SYMBOL(i2o_remove_handler);
3413
3414 EXPORT_SYMBOL(i2o_install_controller);
3415 EXPORT_SYMBOL(i2o_delete_controller);
3416 EXPORT_SYMBOL(i2o_run_queue);
3417
3418 EXPORT_SYMBOL(i2o_claim_device);
3419 EXPORT_SYMBOL(i2o_release_device);
3420 EXPORT_SYMBOL(i2o_device_notify_on);
3421 EXPORT_SYMBOL(i2o_device_notify_off);
3422
3423 EXPORT_SYMBOL(i2o_post_this);
3424 EXPORT_SYMBOL(i2o_post_wait);
3425 EXPORT_SYMBOL(i2o_post_wait_mem);
3426
3427 EXPORT_SYMBOL(i2o_query_scalar);
3428 EXPORT_SYMBOL(i2o_set_scalar);
3429 EXPORT_SYMBOL(i2o_query_table);
3430 EXPORT_SYMBOL(i2o_clear_table);
3431 EXPORT_SYMBOL(i2o_row_add_table);
3432 EXPORT_SYMBOL(i2o_issue_params);
3433
3434 EXPORT_SYMBOL(i2o_event_register);
3435 EXPORT_SYMBOL(i2o_event_ack);
3436
3437 EXPORT_SYMBOL(i2o_report_status);
3438 EXPORT_SYMBOL(i2o_dump_message);
3439
3440 EXPORT_SYMBOL(i2o_get_class_name);
3441
3442 EXPORT_SYMBOL_GPL(i2o_sys_init);
3443
3444 MODULE_AUTHOR("Red Hat Software");
3445 MODULE_DESCRIPTION("I2O Core");
3446 MODULE_LICENSE("GPL");
3447
i2o_core_init(void)3448 static int i2o_core_init(void)
3449 {
3450 printk(KERN_INFO "I2O Core - (C) Copyright 1999 Red Hat Software\n");
3451 if (i2o_install_handler(&i2o_core_handler) < 0)
3452 {
3453 printk(KERN_ERR "i2o_core: Unable to install core handler.\nI2O stack not loaded!");
3454 return 0;
3455 }
3456
3457 core_context = i2o_core_handler.context;
3458
3459 /*
3460 * Initialize event handling thread
3461 */
3462
3463 init_MUTEX_LOCKED(&evt_sem);
3464 evt_pid = kernel_thread(i2o_core_evt, &evt_reply, CLONE_SIGHAND);
3465 if(evt_pid < 0)
3466 {
3467 printk(KERN_ERR "I2O: Could not create event handler kernel thread\n");
3468 i2o_remove_handler(&i2o_core_handler);
3469 return 0;
3470 }
3471 else
3472 printk(KERN_INFO "I2O: Event thread created as pid %d\n", evt_pid);
3473
3474 if(i2o_num_controllers)
3475 i2o_sys_init();
3476
3477 register_reboot_notifier(&i2o_reboot_notifier);
3478
3479 return 0;
3480 }
3481
i2o_core_exit(void)3482 static void i2o_core_exit(void)
3483 {
3484 int stat;
3485
3486 unregister_reboot_notifier(&i2o_reboot_notifier);
3487
3488 if(i2o_num_controllers)
3489 i2o_sys_shutdown();
3490
3491 /*
3492 * If this is shutdown time, the thread has already been killed
3493 */
3494 if(evt_running) {
3495 printk("Terminating i2o threads...");
3496 stat = kill_proc(evt_pid, SIGTERM, 1);
3497 if(!stat) {
3498 printk("waiting...");
3499 wait_for_completion(&evt_dead);
3500 }
3501 printk("done.\n");
3502 }
3503 i2o_remove_handler(&i2o_core_handler);
3504 unregister_reboot_notifier(&i2o_reboot_notifier);
3505 }
3506
3507 module_init(i2o_core_init);
3508 module_exit(i2o_core_exit);
3509
3510