1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /***************************************************************************
3 dpti.c - description
4 -------------------
5 begin : Thu Sep 7 2000
6 copyright : (C) 2000 by Adaptec
7
8 July 30, 2001 First version being submitted
9 for inclusion in the kernel. V2.4
10
11 See Documentation/scsi/dpti.rst for history, notes, license info
12 and credits
13 ***************************************************************************/
14
15 /***************************************************************************
16 * *
17 * *
18 ***************************************************************************/
19 /***************************************************************************
20 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
21 - Support 2.6 kernel and DMA-mapping
22 - ioctl fix for raid tools
23 - use schedule_timeout in long long loop
24 **************************************************************************/
25
26 /*#define DEBUG 1 */
27 /*#define UARTDELAY 1 */
28
29 #include <linux/module.h>
30 #include <linux/pgtable.h>
31
32 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
33 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
34
35 ////////////////////////////////////////////////////////////////
36
37 #include <linux/ioctl.h> /* For SCSI-Passthrough */
38 #include <linux/uaccess.h>
39
40 #include <linux/stat.h>
41 #include <linux/slab.h> /* for kmalloc() */
42 #include <linux/pci.h> /* for PCI support */
43 #include <linux/proc_fs.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h> /* for udelay */
46 #include <linux/interrupt.h>
47 #include <linux/kernel.h> /* for printk */
48 #include <linux/sched.h>
49 #include <linux/reboot.h>
50 #include <linux/spinlock.h>
51 #include <linux/dma-mapping.h>
52
53 #include <linux/timer.h>
54 #include <linux/string.h>
55 #include <linux/ioport.h>
56 #include <linux/mutex.h>
57
58 #include <asm/processor.h> /* for boot_cpu_data */
59 #include <asm/io.h> /* for virt_to_bus, etc. */
60
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_device.h>
64 #include <scsi/scsi_host.h>
65 #include <scsi/scsi_tcq.h>
66
67 #include "dpt/dptsig.h"
68 #include "dpti.h"
69
70 /*============================================================================
71 * Create a binary signature - this is read by dptsig
72 * Needed for our management apps
73 *============================================================================
74 */
75 static DEFINE_MUTEX(adpt_mutex);
76 static dpt_sig_S DPTI_sig = {
77 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
78 #ifdef __i386__
79 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
80 #elif defined(__ia64__)
81 PROC_INTEL, PROC_IA64,
82 #elif defined(__sparc__)
83 PROC_ULTRASPARC, PROC_ULTRASPARC,
84 #elif defined(__alpha__)
85 PROC_ALPHA, PROC_ALPHA,
86 #else
87 (-1),(-1),
88 #endif
89 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
90 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
91 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
92 };
93
94
95
96
97 /*============================================================================
98 * Globals
99 *============================================================================
100 */
101
102 static DEFINE_MUTEX(adpt_configuration_lock);
103
104 static struct i2o_sys_tbl *sys_tbl;
105 static dma_addr_t sys_tbl_pa;
106 static int sys_tbl_ind;
107 static int sys_tbl_len;
108
109 static adpt_hba* hba_chain = NULL;
110 static int hba_count = 0;
111
112 static struct class *adpt_sysfs_class;
113
114 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
115 #ifdef CONFIG_COMPAT
116 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
117 #endif
118
119 static const struct file_operations adpt_fops = {
120 .unlocked_ioctl = adpt_unlocked_ioctl,
121 .open = adpt_open,
122 .release = adpt_close,
123 #ifdef CONFIG_COMPAT
124 .compat_ioctl = compat_adpt_ioctl,
125 #endif
126 .llseek = noop_llseek,
127 };
128
129 /* Structures and definitions for synchronous message posting.
130 * See adpt_i2o_post_wait() for description
131 * */
132 struct adpt_i2o_post_wait_data
133 {
134 int status;
135 u32 id;
136 adpt_wait_queue_head_t *wq;
137 struct adpt_i2o_post_wait_data *next;
138 };
139
140 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
141 static u32 adpt_post_wait_id = 0;
142 static DEFINE_SPINLOCK(adpt_post_wait_lock);
143
144
145 /*============================================================================
146 * Functions
147 *============================================================================
148 */
149
dpt_dma64(adpt_hba * pHba)150 static inline int dpt_dma64(adpt_hba *pHba)
151 {
152 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
153 }
154
dma_high(dma_addr_t addr)155 static inline u32 dma_high(dma_addr_t addr)
156 {
157 return upper_32_bits(addr);
158 }
159
dma_low(dma_addr_t addr)160 static inline u32 dma_low(dma_addr_t addr)
161 {
162 return (u32)addr;
163 }
164
adpt_read_blink_led(adpt_hba * host)165 static u8 adpt_read_blink_led(adpt_hba* host)
166 {
167 if (host->FwDebugBLEDflag_P) {
168 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
169 return readb(host->FwDebugBLEDvalue_P);
170 }
171 }
172 return 0;
173 }
174
175 /*============================================================================
176 * Scsi host template interface functions
177 *============================================================================
178 */
179
180 #ifdef MODULE
181 static struct pci_device_id dptids[] = {
182 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
184 { 0, }
185 };
186 #endif
187
188 MODULE_DEVICE_TABLE(pci,dptids);
189
adpt_detect(struct scsi_host_template * sht)190 static int adpt_detect(struct scsi_host_template* sht)
191 {
192 struct pci_dev *pDev = NULL;
193 adpt_hba *pHba;
194 adpt_hba *next;
195
196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
197
198 /* search for all Adatpec I2O RAID cards */
199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 if(adpt_install_hba(sht, pDev) ){
203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
206 }
207 pci_dev_get(pDev);
208 }
209 }
210
211 /* In INIT state, Activate IOPs */
212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
214 // Activate does get status , init outbound, and get hrt
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
217 }
218 }
219
220
221 /* Active IOPs in HOLD state */
222
223 rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
226
227 /*
228 * If build_sys_table fails, we kill everything and bail
229 * as we can't init the IOPs w/o a system table
230 */
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
234 }
235
236 PDEBUG("HBA's in HOLD state\n");
237
238 /* If IOP don't get online, we need to rebuild the System table */
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
243 }
244 }
245
246 /* Active IOPs now in OPERATIONAL state */
247 PDEBUG("HBA's in OPERATIONAL state\n");
248
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
261 }
262 adpt_inquiry(pHba);
263 }
264
265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
269 }
270
271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 adpt_i2o_delete_hba(pHba);
275 continue;
276 }
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
279 if (adpt_sysfs_class) {
280 struct device *dev = device_create(adpt_sysfs_class,
281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
287 }
288 }
289 }
290
291 // Register our control device node
292 // nodes will need to be created in /dev to access this
293 // the nodes can not be created from within the driver
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 adpt_i2o_sys_shutdown();
296 return 0;
297 }
298 return hba_count;
299 }
300
301
adpt_release(adpt_hba * pHba)302 static void adpt_release(adpt_hba *pHba)
303 {
304 struct Scsi_Host *shost = pHba->host;
305
306 scsi_remove_host(shost);
307 // adpt_i2o_quiesce_hba(pHba);
308 adpt_i2o_delete_hba(pHba);
309 scsi_host_put(shost);
310 }
311
312
adpt_inquiry(adpt_hba * pHba)313 static void adpt_inquiry(adpt_hba* pHba)
314 {
315 u32 msg[17];
316 u32 *mptr;
317 u32 *lenptr;
318 int direction;
319 int scsidir;
320 u32 len;
321 u32 reqlen;
322 u8* buf;
323 dma_addr_t addr;
324 u8 scb[16];
325 s32 rcode;
326
327 memset(msg, 0, sizeof(msg));
328 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
329 if(!buf){
330 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
331 return;
332 }
333 memset((void*)buf, 0, 36);
334
335 len = 36;
336 direction = 0x00000000;
337 scsidir =0x40000000; // DATA IN (iop<--dev)
338
339 if (dpt_dma64(pHba))
340 reqlen = 17; // SINGLE SGE, 64 bit
341 else
342 reqlen = 14; // SINGLE SGE, 32 bit
343 /* Stick the headers on */
344 msg[0] = reqlen<<16 | SGL_OFFSET_12;
345 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
346 msg[2] = 0;
347 msg[3] = 0;
348 // Adaptec/DPT Private stuff
349 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
350 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
351 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
352 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
353 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
354 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
355 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
356
357 mptr=msg+7;
358
359 memset(scb, 0, sizeof(scb));
360 // Write SCSI command into the message - always 16 byte block
361 scb[0] = INQUIRY;
362 scb[1] = 0;
363 scb[2] = 0;
364 scb[3] = 0;
365 scb[4] = 36;
366 scb[5] = 0;
367 // Don't care about the rest of scb
368
369 memcpy(mptr, scb, sizeof(scb));
370 mptr+=4;
371 lenptr=mptr++; /* Remember me - fill in when we know */
372
373 /* Now fill in the SGList and command */
374 *lenptr = len;
375 if (dpt_dma64(pHba)) {
376 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
377 *mptr++ = 1 << PAGE_SHIFT;
378 *mptr++ = 0xD0000000|direction|len;
379 *mptr++ = dma_low(addr);
380 *mptr++ = dma_high(addr);
381 } else {
382 *mptr++ = 0xD0000000|direction|len;
383 *mptr++ = addr;
384 }
385
386 // Send it on it's way
387 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
388 if (rcode != 0) {
389 sprintf(pHba->detail, "Adaptec I2O RAID");
390 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
391 if (rcode != -ETIME && rcode != -EINTR)
392 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
393 } else {
394 memset(pHba->detail, 0, sizeof(pHba->detail));
395 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
396 memcpy(&(pHba->detail[16]), " Model: ", 8);
397 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
398 memcpy(&(pHba->detail[40]), " FW: ", 4);
399 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
400 pHba->detail[48] = '\0'; /* precautionary */
401 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
402 }
403 adpt_i2o_status_get(pHba);
404 return ;
405 }
406
407
adpt_slave_configure(struct scsi_device * device)408 static int adpt_slave_configure(struct scsi_device * device)
409 {
410 struct Scsi_Host *host = device->host;
411
412 if (host->can_queue && device->tagged_supported) {
413 scsi_change_queue_depth(device,
414 host->can_queue - 1);
415 }
416 return 0;
417 }
418
adpt_queue_lck(struct scsi_cmnd * cmd)419 static int adpt_queue_lck(struct scsi_cmnd *cmd)
420 {
421 adpt_hba* pHba = NULL;
422 struct adpt_device* pDev = NULL; /* dpt per device information */
423
424 /*
425 * SCSI REQUEST_SENSE commands will be executed automatically by the
426 * Host Adapter for any errors, so they should not be executed
427 * explicitly unless the Sense Data is zero indicating that no error
428 * occurred.
429 */
430
431 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
432 cmd->result = (DID_OK << 16);
433 scsi_done(cmd);
434 return 0;
435 }
436
437 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
438 if (!pHba) {
439 return FAILED;
440 }
441
442 rmb();
443 if ((pHba->state) & DPTI_STATE_RESET)
444 return SCSI_MLQUEUE_HOST_BUSY;
445
446 // TODO if the cmd->device if offline then I may need to issue a bus rescan
447 // followed by a get_lct to see if the device is there anymore
448 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
449 /*
450 * First command request for this device. Set up a pointer
451 * to the device structure. This should be a TEST_UNIT_READY
452 * command from scan_scsis_single.
453 */
454 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
455 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
456 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
457 cmd->result = (DID_NO_CONNECT << 16);
458 scsi_done(cmd);
459 return 0;
460 }
461 cmd->device->hostdata = pDev;
462 }
463 pDev->pScsi_dev = cmd->device;
464
465 /*
466 * If we are being called from when the device is being reset,
467 * delay processing of the command until later.
468 */
469 if (pDev->state & DPTI_DEV_RESET ) {
470 return FAILED;
471 }
472 return adpt_scsi_to_i2o(pHba, cmd, pDev);
473 }
474
DEF_SCSI_QCMD(adpt_queue)475 static DEF_SCSI_QCMD(adpt_queue)
476
477 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
478 sector_t capacity, int geom[])
479 {
480 int heads=-1;
481 int sectors=-1;
482 int cylinders=-1;
483
484 // *** First lets set the default geometry ****
485
486 // If the capacity is less than ox2000
487 if (capacity < 0x2000 ) { // floppy
488 heads = 18;
489 sectors = 2;
490 }
491 // else if between 0x2000 and 0x20000
492 else if (capacity < 0x20000) {
493 heads = 64;
494 sectors = 32;
495 }
496 // else if between 0x20000 and 0x40000
497 else if (capacity < 0x40000) {
498 heads = 65;
499 sectors = 63;
500 }
501 // else if between 0x4000 and 0x80000
502 else if (capacity < 0x80000) {
503 heads = 128;
504 sectors = 63;
505 }
506 // else if greater than 0x80000
507 else {
508 heads = 255;
509 sectors = 63;
510 }
511 cylinders = sector_div(capacity, heads * sectors);
512
513 // Special case if CDROM
514 if(sdev->type == 5) { // CDROM
515 heads = 252;
516 sectors = 63;
517 cylinders = 1111;
518 }
519
520 geom[0] = heads;
521 geom[1] = sectors;
522 geom[2] = cylinders;
523
524 PDEBUG("adpt_bios_param: exit\n");
525 return 0;
526 }
527
528
adpt_info(struct Scsi_Host * host)529 static const char *adpt_info(struct Scsi_Host *host)
530 {
531 adpt_hba* pHba;
532
533 pHba = (adpt_hba *) host->hostdata[0];
534 return (char *) (pHba->detail);
535 }
536
adpt_show_info(struct seq_file * m,struct Scsi_Host * host)537 static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
538 {
539 struct adpt_device* d;
540 int id;
541 int chan;
542 adpt_hba* pHba;
543 int unit;
544
545 // Find HBA (host bus adapter) we are looking for
546 mutex_lock(&adpt_configuration_lock);
547 for (pHba = hba_chain; pHba; pHba = pHba->next) {
548 if (pHba->host == host) {
549 break; /* found adapter */
550 }
551 }
552 mutex_unlock(&adpt_configuration_lock);
553 if (pHba == NULL) {
554 return 0;
555 }
556 host = pHba->host;
557
558 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
559 seq_printf(m, "%s\n", pHba->detail);
560 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
561 pHba->host->host_no, pHba->name, host->irq);
562 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
563 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
564
565 seq_puts(m, "Devices:\n");
566 for(chan = 0; chan < MAX_CHANNEL; chan++) {
567 for(id = 0; id < MAX_ID; id++) {
568 d = pHba->channel[chan].device[id];
569 while(d) {
570 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
571 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
572
573 unit = d->pI2o_dev->lct_data.tid;
574 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
575 unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
576 scsi_device_online(d->pScsi_dev)? "online":"offline");
577 d = d->next_lun;
578 }
579 }
580 }
581 return 0;
582 }
583
584 /*
585 * Turn a pointer to ioctl reply data into an u32 'context'
586 */
adpt_ioctl_to_context(adpt_hba * pHba,void * reply)587 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
588 {
589 #if BITS_PER_LONG == 32
590 return (u32)(unsigned long)reply;
591 #else
592 ulong flags = 0;
593 u32 nr, i;
594
595 spin_lock_irqsave(pHba->host->host_lock, flags);
596 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
597 for (i = 0; i < nr; i++) {
598 if (pHba->ioctl_reply_context[i] == NULL) {
599 pHba->ioctl_reply_context[i] = reply;
600 break;
601 }
602 }
603 spin_unlock_irqrestore(pHba->host->host_lock, flags);
604 if (i >= nr) {
605 printk(KERN_WARNING"%s: Too many outstanding "
606 "ioctl commands\n", pHba->name);
607 return (u32)-1;
608 }
609
610 return i;
611 #endif
612 }
613
614 /*
615 * Go from an u32 'context' to a pointer to ioctl reply data.
616 */
adpt_ioctl_from_context(adpt_hba * pHba,u32 context)617 static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
618 {
619 #if BITS_PER_LONG == 32
620 return (void *)(unsigned long)context;
621 #else
622 void *p = pHba->ioctl_reply_context[context];
623 pHba->ioctl_reply_context[context] = NULL;
624
625 return p;
626 #endif
627 }
628
629 /*===========================================================================
630 * Error Handling routines
631 *===========================================================================
632 */
633
adpt_abort(struct scsi_cmnd * cmd)634 static int adpt_abort(struct scsi_cmnd * cmd)
635 {
636 adpt_hba* pHba = NULL; /* host bus adapter structure */
637 struct adpt_device* dptdevice; /* dpt per device information */
638 u32 msg[5];
639 int rcode;
640
641 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
642 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
643 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
644 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
645 return FAILED;
646 }
647
648 memset(msg, 0, sizeof(msg));
649 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
650 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
651 msg[2] = 0;
652 msg[3]= 0;
653 /* Add 1 to avoid firmware treating it as invalid command */
654 msg[4] = scsi_cmd_to_rq(cmd)->tag + 1;
655 if (pHba->host)
656 spin_lock_irq(pHba->host->host_lock);
657 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
658 if (pHba->host)
659 spin_unlock_irq(pHba->host->host_lock);
660 if (rcode != 0) {
661 if(rcode == -EOPNOTSUPP ){
662 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
663 return FAILED;
664 }
665 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
666 return FAILED;
667 }
668 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
669 return SUCCESS;
670 }
671
672
673 #define I2O_DEVICE_RESET 0x27
674 // This is the same for BLK and SCSI devices
675 // NOTE this is wrong in the i2o.h definitions
676 // This is not currently supported by our adapter but we issue it anyway
adpt_device_reset(struct scsi_cmnd * cmd)677 static int adpt_device_reset(struct scsi_cmnd* cmd)
678 {
679 adpt_hba* pHba;
680 u32 msg[4];
681 u32 rcode;
682 int old_state;
683 struct adpt_device* d = cmd->device->hostdata;
684
685 pHba = (void*) cmd->device->host->hostdata[0];
686 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
687 if (!d) {
688 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
689 return FAILED;
690 }
691 memset(msg, 0, sizeof(msg));
692 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
693 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
694 msg[2] = 0;
695 msg[3] = 0;
696
697 if (pHba->host)
698 spin_lock_irq(pHba->host->host_lock);
699 old_state = d->state;
700 d->state |= DPTI_DEV_RESET;
701 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
702 d->state = old_state;
703 if (pHba->host)
704 spin_unlock_irq(pHba->host->host_lock);
705 if (rcode != 0) {
706 if(rcode == -EOPNOTSUPP ){
707 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
708 return FAILED;
709 }
710 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
711 return FAILED;
712 } else {
713 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
714 return SUCCESS;
715 }
716 }
717
718
719 #define I2O_HBA_BUS_RESET 0x87
720 // This version of bus reset is called by the eh_error handler
adpt_bus_reset(struct scsi_cmnd * cmd)721 static int adpt_bus_reset(struct scsi_cmnd* cmd)
722 {
723 adpt_hba* pHba;
724 u32 msg[4];
725 u32 rcode;
726
727 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
728 memset(msg, 0, sizeof(msg));
729 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
730 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
731 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
732 msg[2] = 0;
733 msg[3] = 0;
734 if (pHba->host)
735 spin_lock_irq(pHba->host->host_lock);
736 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
737 if (pHba->host)
738 spin_unlock_irq(pHba->host->host_lock);
739 if (rcode != 0) {
740 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
741 return FAILED;
742 } else {
743 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
744 return SUCCESS;
745 }
746 }
747
748 // This version of reset is called by the eh_error_handler
__adpt_reset(struct scsi_cmnd * cmd)749 static int __adpt_reset(struct scsi_cmnd* cmd)
750 {
751 adpt_hba* pHba;
752 int rcode;
753 char name[32];
754
755 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
756 strncpy(name, pHba->name, sizeof(name));
757 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
758 rcode = adpt_hba_reset(pHba);
759 if(rcode == 0){
760 printk(KERN_WARNING"%s: HBA reset complete\n", name);
761 return SUCCESS;
762 } else {
763 printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
764 return FAILED;
765 }
766 }
767
adpt_reset(struct scsi_cmnd * cmd)768 static int adpt_reset(struct scsi_cmnd* cmd)
769 {
770 int rc;
771
772 spin_lock_irq(cmd->device->host->host_lock);
773 rc = __adpt_reset(cmd);
774 spin_unlock_irq(cmd->device->host->host_lock);
775
776 return rc;
777 }
778
779 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
adpt_hba_reset(adpt_hba * pHba)780 static int adpt_hba_reset(adpt_hba* pHba)
781 {
782 int rcode;
783
784 pHba->state |= DPTI_STATE_RESET;
785
786 // Activate does get status , init outbound, and get hrt
787 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
788 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
789 adpt_i2o_delete_hba(pHba);
790 return rcode;
791 }
792
793 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
794 adpt_i2o_delete_hba(pHba);
795 return rcode;
796 }
797 PDEBUG("%s: in HOLD state\n",pHba->name);
798
799 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
800 adpt_i2o_delete_hba(pHba);
801 return rcode;
802 }
803 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
804
805 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
806 adpt_i2o_delete_hba(pHba);
807 return rcode;
808 }
809
810 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
811 adpt_i2o_delete_hba(pHba);
812 return rcode;
813 }
814 pHba->state &= ~DPTI_STATE_RESET;
815
816 scsi_host_complete_all_commands(pHba->host, DID_RESET);
817 return 0; /* return success */
818 }
819
820 /*===========================================================================
821 *
822 *===========================================================================
823 */
824
825
adpt_i2o_sys_shutdown(void)826 static void adpt_i2o_sys_shutdown(void)
827 {
828 adpt_hba *pHba, *pNext;
829 struct adpt_i2o_post_wait_data *p1, *old;
830
831 printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
832 printk(KERN_INFO " This could take a few minutes if there are many devices attached\n");
833 /* Delete all IOPs from the controller chain */
834 /* They should have already been released by the
835 * scsi-core
836 */
837 for (pHba = hba_chain; pHba; pHba = pNext) {
838 pNext = pHba->next;
839 adpt_i2o_delete_hba(pHba);
840 }
841
842 /* Remove any timedout entries from the wait queue. */
843 // spin_lock_irqsave(&adpt_post_wait_lock, flags);
844 /* Nothing should be outstanding at this point so just
845 * free them
846 */
847 for(p1 = adpt_post_wait_queue; p1;) {
848 old = p1;
849 p1 = p1->next;
850 kfree(old);
851 }
852 // spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
853 adpt_post_wait_queue = NULL;
854
855 printk(KERN_INFO "Adaptec I2O controllers down.\n");
856 }
857
adpt_install_hba(struct scsi_host_template * sht,struct pci_dev * pDev)858 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
859 {
860
861 adpt_hba* pHba = NULL;
862 adpt_hba* p = NULL;
863 ulong base_addr0_phys = 0;
864 ulong base_addr1_phys = 0;
865 u32 hba_map0_area_size = 0;
866 u32 hba_map1_area_size = 0;
867 void __iomem *base_addr_virt = NULL;
868 void __iomem *msg_addr_virt = NULL;
869 int dma64 = 0;
870
871 int raptorFlag = FALSE;
872
873 if(pci_enable_device(pDev)) {
874 return -EINVAL;
875 }
876
877 if (pci_request_regions(pDev, "dpt_i2o")) {
878 PERROR("dpti: adpt_config_hba: pci request region failed\n");
879 return -EINVAL;
880 }
881
882 pci_set_master(pDev);
883
884 /*
885 * See if we should enable dma64 mode.
886 */
887 if (sizeof(dma_addr_t) > 4 &&
888 dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
889 dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
890 dma64 = 1;
891
892 if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
893 return -EINVAL;
894
895 /* adapter only supports message blocks below 4GB */
896 dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
897
898 base_addr0_phys = pci_resource_start(pDev,0);
899 hba_map0_area_size = pci_resource_len(pDev,0);
900
901 // Check if standard PCI card or single BAR Raptor
902 if(pDev->device == PCI_DPT_DEVICE_ID){
903 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
904 // Raptor card with this device id needs 4M
905 hba_map0_area_size = 0x400000;
906 } else { // Not Raptor - it is a PCI card
907 if(hba_map0_area_size > 0x100000 ){
908 hba_map0_area_size = 0x100000;
909 }
910 }
911 } else {// Raptor split BAR config
912 // Use BAR1 in this configuration
913 base_addr1_phys = pci_resource_start(pDev,1);
914 hba_map1_area_size = pci_resource_len(pDev,1);
915 raptorFlag = TRUE;
916 }
917
918 #if BITS_PER_LONG == 64
919 /*
920 * The original Adaptec 64 bit driver has this comment here:
921 * "x86_64 machines need more optimal mappings"
922 *
923 * I assume some HBAs report ridiculously large mappings
924 * and we need to limit them on platforms with IOMMUs.
925 */
926 if (raptorFlag == TRUE) {
927 if (hba_map0_area_size > 128)
928 hba_map0_area_size = 128;
929 if (hba_map1_area_size > 524288)
930 hba_map1_area_size = 524288;
931 } else {
932 if (hba_map0_area_size > 524288)
933 hba_map0_area_size = 524288;
934 }
935 #endif
936
937 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
938 if (!base_addr_virt) {
939 pci_release_regions(pDev);
940 PERROR("dpti: adpt_config_hba: io remap failed\n");
941 return -EINVAL;
942 }
943
944 if(raptorFlag == TRUE) {
945 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
946 if (!msg_addr_virt) {
947 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
948 iounmap(base_addr_virt);
949 pci_release_regions(pDev);
950 return -EINVAL;
951 }
952 } else {
953 msg_addr_virt = base_addr_virt;
954 }
955
956 // Allocate and zero the data structure
957 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
958 if (!pHba) {
959 if (msg_addr_virt != base_addr_virt)
960 iounmap(msg_addr_virt);
961 iounmap(base_addr_virt);
962 pci_release_regions(pDev);
963 return -ENOMEM;
964 }
965
966 mutex_lock(&adpt_configuration_lock);
967
968 if(hba_chain != NULL){
969 for(p = hba_chain; p->next; p = p->next);
970 p->next = pHba;
971 } else {
972 hba_chain = pHba;
973 }
974 pHba->next = NULL;
975 pHba->unit = hba_count;
976 sprintf(pHba->name, "dpti%d", hba_count);
977 hba_count++;
978
979 mutex_unlock(&adpt_configuration_lock);
980
981 pHba->pDev = pDev;
982 pHba->base_addr_phys = base_addr0_phys;
983
984 // Set up the Virtual Base Address of the I2O Device
985 pHba->base_addr_virt = base_addr_virt;
986 pHba->msg_addr_virt = msg_addr_virt;
987 pHba->irq_mask = base_addr_virt+0x30;
988 pHba->post_port = base_addr_virt+0x40;
989 pHba->reply_port = base_addr_virt+0x44;
990
991 pHba->hrt = NULL;
992 pHba->lct = NULL;
993 pHba->lct_size = 0;
994 pHba->status_block = NULL;
995 pHba->post_count = 0;
996 pHba->state = DPTI_STATE_RESET;
997 pHba->pDev = pDev;
998 pHba->devices = NULL;
999 pHba->dma64 = dma64;
1000
1001 // Initializing the spinlocks
1002 spin_lock_init(&pHba->state_lock);
1003
1004 if(raptorFlag == 0){
1005 printk(KERN_INFO "Adaptec I2O RAID controller"
1006 " %d at %p size=%x irq=%d%s\n",
1007 hba_count-1, base_addr_virt,
1008 hba_map0_area_size, pDev->irq,
1009 dma64 ? " (64-bit DMA)" : "");
1010 } else {
1011 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1012 hba_count-1, pDev->irq,
1013 dma64 ? " (64-bit DMA)" : "");
1014 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1015 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1016 }
1017
1018 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1019 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1020 adpt_i2o_delete_hba(pHba);
1021 return -EINVAL;
1022 }
1023
1024 return 0;
1025 }
1026
1027
adpt_i2o_delete_hba(adpt_hba * pHba)1028 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1029 {
1030 adpt_hba* p1;
1031 adpt_hba* p2;
1032 struct i2o_device* d;
1033 struct i2o_device* next;
1034 int i;
1035 int j;
1036 struct adpt_device* pDev;
1037 struct adpt_device* pNext;
1038
1039
1040 mutex_lock(&adpt_configuration_lock);
1041 if(pHba->host){
1042 free_irq(pHba->host->irq, pHba);
1043 }
1044 p2 = NULL;
1045 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1046 if(p1 == pHba) {
1047 if(p2) {
1048 p2->next = p1->next;
1049 } else {
1050 hba_chain = p1->next;
1051 }
1052 break;
1053 }
1054 }
1055
1056 hba_count--;
1057 mutex_unlock(&adpt_configuration_lock);
1058
1059 iounmap(pHba->base_addr_virt);
1060 pci_release_regions(pHba->pDev);
1061 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1062 iounmap(pHba->msg_addr_virt);
1063 }
1064 if(pHba->FwDebugBuffer_P)
1065 iounmap(pHba->FwDebugBuffer_P);
1066 if(pHba->hrt) {
1067 dma_free_coherent(&pHba->pDev->dev,
1068 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1069 pHba->hrt, pHba->hrt_pa);
1070 }
1071 if(pHba->lct) {
1072 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1073 pHba->lct, pHba->lct_pa);
1074 }
1075 if(pHba->status_block) {
1076 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1077 pHba->status_block, pHba->status_block_pa);
1078 }
1079 if(pHba->reply_pool) {
1080 dma_free_coherent(&pHba->pDev->dev,
1081 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1082 pHba->reply_pool, pHba->reply_pool_pa);
1083 }
1084
1085 for(d = pHba->devices; d ; d = next){
1086 next = d->next;
1087 kfree(d);
1088 }
1089 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1090 for(j = 0; j < MAX_ID; j++){
1091 if(pHba->channel[i].device[j] != NULL){
1092 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1093 pNext = pDev->next_lun;
1094 kfree(pDev);
1095 }
1096 }
1097 }
1098 }
1099 pci_dev_put(pHba->pDev);
1100 if (adpt_sysfs_class)
1101 device_destroy(adpt_sysfs_class,
1102 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1103 kfree(pHba);
1104
1105 if(hba_count <= 0){
1106 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1107 if (adpt_sysfs_class) {
1108 class_destroy(adpt_sysfs_class);
1109 adpt_sysfs_class = NULL;
1110 }
1111 }
1112 }
1113
adpt_find_device(adpt_hba * pHba,u32 chan,u32 id,u64 lun)1114 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1115 {
1116 struct adpt_device* d;
1117
1118 if (chan >= MAX_CHANNEL)
1119 return NULL;
1120
1121 d = pHba->channel[chan].device[id];
1122 if(!d || d->tid == 0) {
1123 return NULL;
1124 }
1125
1126 /* If it is the only lun at that address then this should match*/
1127 if(d->scsi_lun == lun){
1128 return d;
1129 }
1130
1131 /* else we need to look through all the luns */
1132 for(d=d->next_lun ; d ; d = d->next_lun){
1133 if(d->scsi_lun == lun){
1134 return d;
1135 }
1136 }
1137 return NULL;
1138 }
1139
1140
adpt_i2o_post_wait(adpt_hba * pHba,u32 * msg,int len,int timeout)1141 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1142 {
1143 // I used my own version of the WAIT_QUEUE_HEAD
1144 // to handle some version differences
1145 // When embedded in the kernel this could go back to the vanilla one
1146 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1147 int status = 0;
1148 ulong flags = 0;
1149 struct adpt_i2o_post_wait_data *p1, *p2;
1150 struct adpt_i2o_post_wait_data *wait_data =
1151 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1152 DECLARE_WAITQUEUE(wait, current);
1153
1154 if (!wait_data)
1155 return -ENOMEM;
1156
1157 /*
1158 * The spin locking is needed to keep anyone from playing
1159 * with the queue pointers and id while we do the same
1160 */
1161 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1162 // TODO we need a MORE unique way of getting ids
1163 // to support async LCT get
1164 wait_data->next = adpt_post_wait_queue;
1165 adpt_post_wait_queue = wait_data;
1166 adpt_post_wait_id++;
1167 adpt_post_wait_id &= 0x7fff;
1168 wait_data->id = adpt_post_wait_id;
1169 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1170
1171 wait_data->wq = &adpt_wq_i2o_post;
1172 wait_data->status = -ETIMEDOUT;
1173
1174 add_wait_queue(&adpt_wq_i2o_post, &wait);
1175
1176 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1177 timeout *= HZ;
1178 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1179 set_current_state(TASK_INTERRUPTIBLE);
1180 if(pHba->host)
1181 spin_unlock_irq(pHba->host->host_lock);
1182 if (!timeout)
1183 schedule();
1184 else{
1185 timeout = schedule_timeout(timeout);
1186 if (timeout == 0) {
1187 // I/O issued, but cannot get result in
1188 // specified time. Freeing resorces is
1189 // dangerous.
1190 status = -ETIME;
1191 }
1192 }
1193 if(pHba->host)
1194 spin_lock_irq(pHba->host->host_lock);
1195 }
1196 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1197
1198 if(status == -ETIMEDOUT){
1199 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1200 // We will have to free the wait_data memory during shutdown
1201 return status;
1202 }
1203
1204 /* Remove the entry from the queue. */
1205 p2 = NULL;
1206 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1207 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1208 if(p1 == wait_data) {
1209 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1210 status = -EOPNOTSUPP;
1211 }
1212 if(p2) {
1213 p2->next = p1->next;
1214 } else {
1215 adpt_post_wait_queue = p1->next;
1216 }
1217 break;
1218 }
1219 }
1220 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1221
1222 kfree(wait_data);
1223
1224 return status;
1225 }
1226
1227
adpt_i2o_post_this(adpt_hba * pHba,u32 * data,int len)1228 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1229 {
1230
1231 u32 m = EMPTY_QUEUE;
1232 u32 __iomem *msg;
1233 ulong timeout = jiffies + 30*HZ;
1234 do {
1235 rmb();
1236 m = readl(pHba->post_port);
1237 if (m != EMPTY_QUEUE) {
1238 break;
1239 }
1240 if(time_after(jiffies,timeout)){
1241 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1242 return -ETIMEDOUT;
1243 }
1244 schedule_timeout_uninterruptible(1);
1245 } while(m == EMPTY_QUEUE);
1246
1247 msg = pHba->msg_addr_virt + m;
1248 memcpy_toio(msg, data, len);
1249 wmb();
1250
1251 //post message
1252 writel(m, pHba->post_port);
1253 wmb();
1254
1255 return 0;
1256 }
1257
1258
adpt_i2o_post_wait_complete(u32 context,int status)1259 static void adpt_i2o_post_wait_complete(u32 context, int status)
1260 {
1261 struct adpt_i2o_post_wait_data *p1 = NULL;
1262 /*
1263 * We need to search through the adpt_post_wait
1264 * queue to see if the given message is still
1265 * outstanding. If not, it means that the IOP
1266 * took longer to respond to the message than we
1267 * had allowed and timer has already expired.
1268 * Not much we can do about that except log
1269 * it for debug purposes, increase timeout, and recompile
1270 *
1271 * Lock needed to keep anyone from moving queue pointers
1272 * around while we're looking through them.
1273 */
1274
1275 context &= 0x7fff;
1276
1277 spin_lock(&adpt_post_wait_lock);
1278 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1279 if(p1->id == context) {
1280 p1->status = status;
1281 spin_unlock(&adpt_post_wait_lock);
1282 wake_up_interruptible(p1->wq);
1283 return;
1284 }
1285 }
1286 spin_unlock(&adpt_post_wait_lock);
1287 // If this happens we lose commands that probably really completed
1288 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1289 printk(KERN_DEBUG" Tasks in wait queue:\n");
1290 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1291 printk(KERN_DEBUG" %d\n",p1->id);
1292 }
1293 return;
1294 }
1295
adpt_i2o_reset_hba(adpt_hba * pHba)1296 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1297 {
1298 u32 msg[8];
1299 u8* status;
1300 dma_addr_t addr;
1301 u32 m = EMPTY_QUEUE ;
1302 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1303
1304 if(pHba->initialized == FALSE) { // First time reset should be quick
1305 timeout = jiffies + (25*HZ);
1306 } else {
1307 adpt_i2o_quiesce_hba(pHba);
1308 }
1309
1310 do {
1311 rmb();
1312 m = readl(pHba->post_port);
1313 if (m != EMPTY_QUEUE) {
1314 break;
1315 }
1316 if(time_after(jiffies,timeout)){
1317 printk(KERN_WARNING"Timeout waiting for message!\n");
1318 return -ETIMEDOUT;
1319 }
1320 schedule_timeout_uninterruptible(1);
1321 } while (m == EMPTY_QUEUE);
1322
1323 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1324 if(status == NULL) {
1325 adpt_send_nop(pHba, m);
1326 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1327 return -ENOMEM;
1328 }
1329
1330 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1331 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1332 msg[2]=0;
1333 msg[3]=0;
1334 msg[4]=0;
1335 msg[5]=0;
1336 msg[6]=dma_low(addr);
1337 msg[7]=dma_high(addr);
1338
1339 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1340 wmb();
1341 writel(m, pHba->post_port);
1342 wmb();
1343
1344 while(*status == 0){
1345 if(time_after(jiffies,timeout)){
1346 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1347 /* We lose 4 bytes of "status" here, but we cannot
1348 free these because controller may awake and corrupt
1349 those bytes at any time */
1350 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1351 return -ETIMEDOUT;
1352 }
1353 rmb();
1354 schedule_timeout_uninterruptible(1);
1355 }
1356
1357 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1358 PDEBUG("%s: Reset in progress...\n", pHba->name);
1359 // Here we wait for message frame to become available
1360 // indicated that reset has finished
1361 do {
1362 rmb();
1363 m = readl(pHba->post_port);
1364 if (m != EMPTY_QUEUE) {
1365 break;
1366 }
1367 if(time_after(jiffies,timeout)){
1368 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1369 /* We lose 4 bytes of "status" here, but we
1370 cannot free these because controller may
1371 awake and corrupt those bytes at any time */
1372 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1373 return -ETIMEDOUT;
1374 }
1375 schedule_timeout_uninterruptible(1);
1376 } while (m == EMPTY_QUEUE);
1377 // Flush the offset
1378 adpt_send_nop(pHba, m);
1379 }
1380 adpt_i2o_status_get(pHba);
1381 if(*status == 0x02 ||
1382 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1383 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1384 pHba->name);
1385 } else {
1386 PDEBUG("%s: Reset completed.\n", pHba->name);
1387 }
1388
1389 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1390 #ifdef UARTDELAY
1391 // This delay is to allow someone attached to the card through the debug UART to
1392 // set up the dump levels that they want before the rest of the initialization sequence
1393 adpt_delay(20000);
1394 #endif
1395 return 0;
1396 }
1397
1398
adpt_i2o_parse_lct(adpt_hba * pHba)1399 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1400 {
1401 int i;
1402 int max;
1403 int tid;
1404 struct i2o_device *d;
1405 i2o_lct *lct = pHba->lct;
1406 u8 bus_no = 0;
1407 s16 scsi_id;
1408 u64 scsi_lun;
1409 u32 buf[10]; // larger than 7, or 8 ...
1410 struct adpt_device* pDev;
1411
1412 if (lct == NULL) {
1413 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1414 return -1;
1415 }
1416
1417 max = lct->table_size;
1418 max -= 3;
1419 max /= 9;
1420
1421 for(i=0;i<max;i++) {
1422 if( lct->lct_entry[i].user_tid != 0xfff){
1423 /*
1424 * If we have hidden devices, we need to inform the upper layers about
1425 * the possible maximum id reference to handle device access when
1426 * an array is disassembled. This code has no other purpose but to
1427 * allow us future access to devices that are currently hidden
1428 * behind arrays, hotspares or have not been configured (JBOD mode).
1429 */
1430 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1431 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1432 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1433 continue;
1434 }
1435 tid = lct->lct_entry[i].tid;
1436 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1437 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1438 continue;
1439 }
1440 bus_no = buf[0]>>16;
1441 scsi_id = buf[1];
1442 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1443 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1444 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1445 continue;
1446 }
1447 if (scsi_id >= MAX_ID){
1448 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1449 continue;
1450 }
1451 if(bus_no > pHba->top_scsi_channel){
1452 pHba->top_scsi_channel = bus_no;
1453 }
1454 if(scsi_id > pHba->top_scsi_id){
1455 pHba->top_scsi_id = scsi_id;
1456 }
1457 if(scsi_lun > pHba->top_scsi_lun){
1458 pHba->top_scsi_lun = scsi_lun;
1459 }
1460 continue;
1461 }
1462 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1463 if(d==NULL)
1464 {
1465 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1466 return -ENOMEM;
1467 }
1468
1469 d->controller = pHba;
1470 d->next = NULL;
1471
1472 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1473
1474 d->flags = 0;
1475 tid = d->lct_data.tid;
1476 adpt_i2o_report_hba_unit(pHba, d);
1477 adpt_i2o_install_device(pHba, d);
1478 }
1479 bus_no = 0;
1480 for(d = pHba->devices; d ; d = d->next) {
1481 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1482 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1483 tid = d->lct_data.tid;
1484 // TODO get the bus_no from hrt-but for now they are in order
1485 //bus_no =
1486 if(bus_no > pHba->top_scsi_channel){
1487 pHba->top_scsi_channel = bus_no;
1488 }
1489 pHba->channel[bus_no].type = d->lct_data.class_id;
1490 pHba->channel[bus_no].tid = tid;
1491 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1492 {
1493 pHba->channel[bus_no].scsi_id = buf[1];
1494 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1495 }
1496 // TODO remove - this is just until we get from hrt
1497 bus_no++;
1498 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1499 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1500 break;
1501 }
1502 }
1503 }
1504
1505 // Setup adpt_device table
1506 for(d = pHba->devices; d ; d = d->next) {
1507 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1508 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1509 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1510
1511 tid = d->lct_data.tid;
1512 scsi_id = -1;
1513 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1514 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1515 bus_no = buf[0]>>16;
1516 scsi_id = buf[1];
1517 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1518 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1519 continue;
1520 }
1521 if (scsi_id >= MAX_ID) {
1522 continue;
1523 }
1524 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1525 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1526 if(pDev == NULL) {
1527 return -ENOMEM;
1528 }
1529 pHba->channel[bus_no].device[scsi_id] = pDev;
1530 } else {
1531 for( pDev = pHba->channel[bus_no].device[scsi_id];
1532 pDev->next_lun; pDev = pDev->next_lun){
1533 }
1534 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1535 if(pDev->next_lun == NULL) {
1536 return -ENOMEM;
1537 }
1538 pDev = pDev->next_lun;
1539 }
1540 pDev->tid = tid;
1541 pDev->scsi_channel = bus_no;
1542 pDev->scsi_id = scsi_id;
1543 pDev->scsi_lun = scsi_lun;
1544 pDev->pI2o_dev = d;
1545 d->owner = pDev;
1546 pDev->type = (buf[0])&0xff;
1547 pDev->flags = (buf[0]>>8)&0xff;
1548 if(scsi_id > pHba->top_scsi_id){
1549 pHba->top_scsi_id = scsi_id;
1550 }
1551 if(scsi_lun > pHba->top_scsi_lun){
1552 pHba->top_scsi_lun = scsi_lun;
1553 }
1554 }
1555 if(scsi_id == -1){
1556 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1557 d->lct_data.identity_tag);
1558 }
1559 }
1560 }
1561 return 0;
1562 }
1563
1564
1565 /*
1566 * Each I2O controller has a chain of devices on it - these match
1567 * the useful parts of the LCT of the board.
1568 */
1569
adpt_i2o_install_device(adpt_hba * pHba,struct i2o_device * d)1570 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1571 {
1572 mutex_lock(&adpt_configuration_lock);
1573 d->controller=pHba;
1574 d->owner=NULL;
1575 d->next=pHba->devices;
1576 d->prev=NULL;
1577 if (pHba->devices != NULL){
1578 pHba->devices->prev=d;
1579 }
1580 pHba->devices=d;
1581 *d->dev_name = 0;
1582
1583 mutex_unlock(&adpt_configuration_lock);
1584 return 0;
1585 }
1586
adpt_open(struct inode * inode,struct file * file)1587 static int adpt_open(struct inode *inode, struct file *file)
1588 {
1589 int minor;
1590 adpt_hba* pHba;
1591
1592 mutex_lock(&adpt_mutex);
1593 //TODO check for root access
1594 //
1595 minor = iminor(inode);
1596 if (minor >= hba_count) {
1597 mutex_unlock(&adpt_mutex);
1598 return -ENXIO;
1599 }
1600 mutex_lock(&adpt_configuration_lock);
1601 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1602 if (pHba->unit == minor) {
1603 break; /* found adapter */
1604 }
1605 }
1606 if (pHba == NULL) {
1607 mutex_unlock(&adpt_configuration_lock);
1608 mutex_unlock(&adpt_mutex);
1609 return -ENXIO;
1610 }
1611
1612 // if(pHba->in_use){
1613 // mutex_unlock(&adpt_configuration_lock);
1614 // return -EBUSY;
1615 // }
1616
1617 pHba->in_use = 1;
1618 mutex_unlock(&adpt_configuration_lock);
1619 mutex_unlock(&adpt_mutex);
1620
1621 return 0;
1622 }
1623
adpt_close(struct inode * inode,struct file * file)1624 static int adpt_close(struct inode *inode, struct file *file)
1625 {
1626 int minor;
1627 adpt_hba* pHba;
1628
1629 minor = iminor(inode);
1630 if (minor >= hba_count) {
1631 return -ENXIO;
1632 }
1633 mutex_lock(&adpt_configuration_lock);
1634 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1635 if (pHba->unit == minor) {
1636 break; /* found adapter */
1637 }
1638 }
1639 mutex_unlock(&adpt_configuration_lock);
1640 if (pHba == NULL) {
1641 return -ENXIO;
1642 }
1643
1644 pHba->in_use = 0;
1645
1646 return 0;
1647 }
1648
1649
adpt_i2o_passthru(adpt_hba * pHba,u32 __user * arg)1650 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1651 {
1652 u32 msg[MAX_MESSAGE_SIZE];
1653 u32* reply = NULL;
1654 u32 size = 0;
1655 u32 reply_size = 0;
1656 u32 __user *user_msg = arg;
1657 u32 __user * user_reply = NULL;
1658 void **sg_list = NULL;
1659 u32 sg_offset = 0;
1660 u32 sg_count = 0;
1661 int sg_index = 0;
1662 u32 i = 0;
1663 u32 rcode = 0;
1664 void *p = NULL;
1665 dma_addr_t addr;
1666 ulong flags = 0;
1667
1668 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1669 // get user msg size in u32s
1670 if(get_user(size, &user_msg[0])){
1671 return -EFAULT;
1672 }
1673 size = size>>16;
1674
1675 user_reply = &user_msg[size];
1676 if(size > MAX_MESSAGE_SIZE){
1677 return -EFAULT;
1678 }
1679 size *= 4; // Convert to bytes
1680
1681 /* Copy in the user's I2O command */
1682 if(copy_from_user(msg, user_msg, size)) {
1683 return -EFAULT;
1684 }
1685 get_user(reply_size, &user_reply[0]);
1686 reply_size = reply_size>>16;
1687 if(reply_size > REPLY_FRAME_SIZE){
1688 reply_size = REPLY_FRAME_SIZE;
1689 }
1690 reply_size *= 4;
1691 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1692 if(reply == NULL) {
1693 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1694 return -ENOMEM;
1695 }
1696 sg_offset = (msg[0]>>4)&0xf;
1697 msg[2] = 0x40000000; // IOCTL context
1698 msg[3] = adpt_ioctl_to_context(pHba, reply);
1699 if (msg[3] == (u32)-1) {
1700 rcode = -EBUSY;
1701 goto free;
1702 }
1703
1704 sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
1705 if (!sg_list) {
1706 rcode = -ENOMEM;
1707 goto free;
1708 }
1709 if(sg_offset) {
1710 // TODO add 64 bit API
1711 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1712 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1713 if (sg_count > pHba->sg_tablesize){
1714 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1715 rcode = -EINVAL;
1716 goto free;
1717 }
1718
1719 for(i = 0; i < sg_count; i++) {
1720 int sg_size;
1721
1722 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1723 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1724 rcode = -EINVAL;
1725 goto cleanup;
1726 }
1727 sg_size = sg[i].flag_count & 0xffffff;
1728 /* Allocate memory for the transfer */
1729 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1730 if(!p) {
1731 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1732 pHba->name,sg_size,i,sg_count);
1733 rcode = -ENOMEM;
1734 goto cleanup;
1735 }
1736 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1737 /* Copy in the user's SG buffer if necessary */
1738 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1739 // sg_simple_element API is 32 bit
1740 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1741 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1742 rcode = -EFAULT;
1743 goto cleanup;
1744 }
1745 }
1746 /* sg_simple_element API is 32 bit, but addr < 4GB */
1747 sg[i].addr_bus = addr;
1748 }
1749 }
1750
1751 do {
1752 /*
1753 * Stop any new commands from enterring the
1754 * controller while processing the ioctl
1755 */
1756 if (pHba->host) {
1757 scsi_block_requests(pHba->host);
1758 spin_lock_irqsave(pHba->host->host_lock, flags);
1759 }
1760 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1761 if (rcode != 0)
1762 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1763 rcode, reply);
1764 if (pHba->host) {
1765 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1766 scsi_unblock_requests(pHba->host);
1767 }
1768 } while (rcode == -ETIMEDOUT);
1769
1770 if(rcode){
1771 goto cleanup;
1772 }
1773
1774 if(sg_offset) {
1775 /* Copy back the Scatter Gather buffers back to user space */
1776 u32 j;
1777 // TODO add 64 bit API
1778 struct sg_simple_element* sg;
1779 int sg_size;
1780
1781 // re-acquire the original message to handle correctly the sg copy operation
1782 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1783 // get user msg size in u32s
1784 if(get_user(size, &user_msg[0])){
1785 rcode = -EFAULT;
1786 goto cleanup;
1787 }
1788 size = size>>16;
1789 size *= 4;
1790 if (size > MAX_MESSAGE_SIZE) {
1791 rcode = -EINVAL;
1792 goto cleanup;
1793 }
1794 /* Copy in the user's I2O command */
1795 if (copy_from_user (msg, user_msg, size)) {
1796 rcode = -EFAULT;
1797 goto cleanup;
1798 }
1799 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1800
1801 // TODO add 64 bit API
1802 sg = (struct sg_simple_element*)(msg + sg_offset);
1803 for (j = 0; j < sg_count; j++) {
1804 /* Copy out the SG list to user's buffer if necessary */
1805 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1806 sg_size = sg[j].flag_count & 0xffffff;
1807 // sg_simple_element API is 32 bit
1808 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1809 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1810 rcode = -EFAULT;
1811 goto cleanup;
1812 }
1813 }
1814 }
1815 }
1816
1817 /* Copy back the reply to user space */
1818 if (reply_size) {
1819 // we wrote our own values for context - now restore the user supplied ones
1820 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1821 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1822 rcode = -EFAULT;
1823 }
1824 if(copy_to_user(user_reply, reply, reply_size)) {
1825 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1826 rcode = -EFAULT;
1827 }
1828 }
1829
1830
1831 cleanup:
1832 if (rcode != -ETIME && rcode != -EINTR) {
1833 struct sg_simple_element *sg =
1834 (struct sg_simple_element*) (msg +sg_offset);
1835 while(sg_index) {
1836 if(sg_list[--sg_index]) {
1837 dma_free_coherent(&pHba->pDev->dev,
1838 sg[sg_index].flag_count & 0xffffff,
1839 sg_list[sg_index],
1840 sg[sg_index].addr_bus);
1841 }
1842 }
1843 }
1844
1845 free:
1846 kfree(sg_list);
1847 kfree(reply);
1848 return rcode;
1849 }
1850
1851 #if defined __ia64__
adpt_ia64_info(sysInfo_S * si)1852 static void adpt_ia64_info(sysInfo_S* si)
1853 {
1854 // This is all the info we need for now
1855 // We will add more info as our new
1856 // managmenent utility requires it
1857 si->processorType = PROC_IA64;
1858 }
1859 #endif
1860
1861 #if defined __sparc__
adpt_sparc_info(sysInfo_S * si)1862 static void adpt_sparc_info(sysInfo_S* si)
1863 {
1864 // This is all the info we need for now
1865 // We will add more info as our new
1866 // managmenent utility requires it
1867 si->processorType = PROC_ULTRASPARC;
1868 }
1869 #endif
1870 #if defined __alpha__
adpt_alpha_info(sysInfo_S * si)1871 static void adpt_alpha_info(sysInfo_S* si)
1872 {
1873 // This is all the info we need for now
1874 // We will add more info as our new
1875 // managmenent utility requires it
1876 si->processorType = PROC_ALPHA;
1877 }
1878 #endif
1879
1880 #if defined __i386__
1881
1882 #include <uapi/asm/vm86.h>
1883
adpt_i386_info(sysInfo_S * si)1884 static void adpt_i386_info(sysInfo_S* si)
1885 {
1886 // This is all the info we need for now
1887 // We will add more info as our new
1888 // managmenent utility requires it
1889 switch (boot_cpu_data.x86) {
1890 case CPU_386:
1891 si->processorType = PROC_386;
1892 break;
1893 case CPU_486:
1894 si->processorType = PROC_486;
1895 break;
1896 case CPU_586:
1897 si->processorType = PROC_PENTIUM;
1898 break;
1899 default: // Just in case
1900 si->processorType = PROC_PENTIUM;
1901 break;
1902 }
1903 }
1904 #endif
1905
1906 /*
1907 * This routine returns information about the system. This does not effect
1908 * any logic and if the info is wrong - it doesn't matter.
1909 */
1910
1911 /* Get all the info we can not get from kernel services */
adpt_system_info(void __user * buffer)1912 static int adpt_system_info(void __user *buffer)
1913 {
1914 sysInfo_S si;
1915
1916 memset(&si, 0, sizeof(si));
1917
1918 si.osType = OS_LINUX;
1919 si.osMajorVersion = 0;
1920 si.osMinorVersion = 0;
1921 si.osRevision = 0;
1922 si.busType = SI_PCI_BUS;
1923 si.processorFamily = DPTI_sig.dsProcessorFamily;
1924
1925 #if defined __i386__
1926 adpt_i386_info(&si);
1927 #elif defined (__ia64__)
1928 adpt_ia64_info(&si);
1929 #elif defined(__sparc__)
1930 adpt_sparc_info(&si);
1931 #elif defined (__alpha__)
1932 adpt_alpha_info(&si);
1933 #else
1934 si.processorType = 0xff ;
1935 #endif
1936 if (copy_to_user(buffer, &si, sizeof(si))){
1937 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1938 return -EFAULT;
1939 }
1940
1941 return 0;
1942 }
1943
adpt_ioctl(struct inode * inode,struct file * file,uint cmd,ulong arg)1944 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1945 {
1946 int minor;
1947 int error = 0;
1948 adpt_hba* pHba;
1949 ulong flags = 0;
1950 void __user *argp = (void __user *)arg;
1951
1952 minor = iminor(inode);
1953 if (minor >= DPTI_MAX_HBA){
1954 return -ENXIO;
1955 }
1956 mutex_lock(&adpt_configuration_lock);
1957 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1958 if (pHba->unit == minor) {
1959 break; /* found adapter */
1960 }
1961 }
1962 mutex_unlock(&adpt_configuration_lock);
1963 if(pHba == NULL){
1964 return -ENXIO;
1965 }
1966
1967 while((volatile u32) pHba->state & DPTI_STATE_RESET )
1968 schedule_timeout_uninterruptible(2);
1969
1970 switch (cmd) {
1971 // TODO: handle 3 cases
1972 case DPT_SIGNATURE:
1973 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1974 return -EFAULT;
1975 }
1976 break;
1977 case I2OUSRCMD:
1978 return adpt_i2o_passthru(pHba, argp);
1979
1980 case DPT_CTRLINFO:{
1981 drvrHBAinfo_S HbaInfo;
1982
1983 #define FLG_OSD_PCI_VALID 0x0001
1984 #define FLG_OSD_DMA 0x0002
1985 #define FLG_OSD_I2O 0x0004
1986 memset(&HbaInfo, 0, sizeof(HbaInfo));
1987 HbaInfo.drvrHBAnum = pHba->unit;
1988 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1989 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1990 HbaInfo.pciBusNum = pHba->pDev->bus->number;
1991 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1992 HbaInfo.Interrupt = pHba->pDev->irq;
1993 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1994 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1995 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1996 return -EFAULT;
1997 }
1998 break;
1999 }
2000 case DPT_SYSINFO:
2001 return adpt_system_info(argp);
2002 case DPT_BLINKLED:{
2003 u32 value;
2004 value = (u32)adpt_read_blink_led(pHba);
2005 if (copy_to_user(argp, &value, sizeof(value))) {
2006 return -EFAULT;
2007 }
2008 break;
2009 }
2010 case I2ORESETCMD: {
2011 struct Scsi_Host *shost = pHba->host;
2012
2013 if (shost)
2014 spin_lock_irqsave(shost->host_lock, flags);
2015 adpt_hba_reset(pHba);
2016 if (shost)
2017 spin_unlock_irqrestore(shost->host_lock, flags);
2018 break;
2019 }
2020 case I2ORESCANCMD:
2021 adpt_rescan(pHba);
2022 break;
2023 default:
2024 return -EINVAL;
2025 }
2026
2027 return error;
2028 }
2029
adpt_unlocked_ioctl(struct file * file,uint cmd,ulong arg)2030 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2031 {
2032 struct inode *inode;
2033 long ret;
2034
2035 inode = file_inode(file);
2036
2037 mutex_lock(&adpt_mutex);
2038 ret = adpt_ioctl(inode, file, cmd, arg);
2039 mutex_unlock(&adpt_mutex);
2040
2041 return ret;
2042 }
2043
2044 #ifdef CONFIG_COMPAT
compat_adpt_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2045 static long compat_adpt_ioctl(struct file *file,
2046 unsigned int cmd, unsigned long arg)
2047 {
2048 struct inode *inode;
2049 long ret;
2050
2051 inode = file_inode(file);
2052
2053 mutex_lock(&adpt_mutex);
2054
2055 switch(cmd) {
2056 case DPT_SIGNATURE:
2057 case I2OUSRCMD:
2058 case DPT_CTRLINFO:
2059 case DPT_SYSINFO:
2060 case DPT_BLINKLED:
2061 case I2ORESETCMD:
2062 case I2ORESCANCMD:
2063 case (DPT_TARGET_BUSY & 0xFFFF):
2064 case DPT_TARGET_BUSY:
2065 ret = adpt_ioctl(inode, file, cmd, arg);
2066 break;
2067 default:
2068 ret = -ENOIOCTLCMD;
2069 }
2070
2071 mutex_unlock(&adpt_mutex);
2072
2073 return ret;
2074 }
2075 #endif
2076
adpt_isr(int irq,void * dev_id)2077 static irqreturn_t adpt_isr(int irq, void *dev_id)
2078 {
2079 struct scsi_cmnd* cmd;
2080 adpt_hba* pHba = dev_id;
2081 u32 m;
2082 void __iomem *reply;
2083 u32 status=0;
2084 u32 context;
2085 ulong flags = 0;
2086 int handled = 0;
2087
2088 if (pHba == NULL){
2089 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2090 return IRQ_NONE;
2091 }
2092 if(pHba->host)
2093 spin_lock_irqsave(pHba->host->host_lock, flags);
2094
2095 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2096 m = readl(pHba->reply_port);
2097 if(m == EMPTY_QUEUE){
2098 // Try twice then give up
2099 rmb();
2100 m = readl(pHba->reply_port);
2101 if(m == EMPTY_QUEUE){
2102 // This really should not happen
2103 printk(KERN_ERR"dpti: Could not get reply frame\n");
2104 goto out;
2105 }
2106 }
2107 if (pHba->reply_pool_pa <= m &&
2108 m < pHba->reply_pool_pa +
2109 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2110 reply = (u8 *)pHba->reply_pool +
2111 (m - pHba->reply_pool_pa);
2112 } else {
2113 /* Ick, we should *never* be here */
2114 printk(KERN_ERR "dpti: reply frame not from pool\n");
2115 reply = (u8 *)bus_to_virt(m);
2116 }
2117
2118 if (readl(reply) & MSG_FAIL) {
2119 u32 old_m = readl(reply+28);
2120 void __iomem *msg;
2121 u32 old_context;
2122 PDEBUG("%s: Failed message\n",pHba->name);
2123 if(old_m >= 0x100000){
2124 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2125 writel(m,pHba->reply_port);
2126 continue;
2127 }
2128 // Transaction context is 0 in failed reply frame
2129 msg = pHba->msg_addr_virt + old_m;
2130 old_context = readl(msg+12);
2131 writel(old_context, reply+12);
2132 adpt_send_nop(pHba, old_m);
2133 }
2134 context = readl(reply+8);
2135 if(context & 0x40000000){ // IOCTL
2136 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2137 if( p != NULL) {
2138 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2139 }
2140 // All IOCTLs will also be post wait
2141 }
2142 if(context & 0x80000000){ // Post wait message
2143 status = readl(reply+16);
2144 if(status >> 24){
2145 status &= 0xffff; /* Get detail status */
2146 } else {
2147 status = I2O_POST_WAIT_OK;
2148 }
2149 if(!(context & 0x40000000)) {
2150 /*
2151 * The request tag is one less than the command tag
2152 * as the firmware might treat a 0 tag as invalid
2153 */
2154 cmd = scsi_host_find_tag(pHba->host,
2155 readl(reply + 12) - 1);
2156 if(cmd != NULL) {
2157 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2158 }
2159 }
2160 adpt_i2o_post_wait_complete(context, status);
2161 } else { // SCSI message
2162 /*
2163 * The request tag is one less than the command tag
2164 * as the firmware might treat a 0 tag as invalid
2165 */
2166 cmd = scsi_host_find_tag(pHba->host,
2167 readl(reply + 12) - 1);
2168 if(cmd != NULL){
2169 scsi_dma_unmap(cmd);
2170 adpt_i2o_scsi_complete(reply, cmd);
2171 }
2172 }
2173 writel(m, pHba->reply_port);
2174 wmb();
2175 rmb();
2176 }
2177 handled = 1;
2178 out: if(pHba->host)
2179 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2180 return IRQ_RETVAL(handled);
2181 }
2182
adpt_scsi_to_i2o(adpt_hba * pHba,struct scsi_cmnd * cmd,struct adpt_device * d)2183 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2184 {
2185 int i;
2186 u32 msg[MAX_MESSAGE_SIZE];
2187 u32* mptr;
2188 u32* lptr;
2189 u32 *lenptr;
2190 int direction;
2191 int scsidir;
2192 int nseg;
2193 u32 len;
2194 u32 reqlen;
2195 s32 rcode;
2196 dma_addr_t addr;
2197
2198 memset(msg, 0 , sizeof(msg));
2199 len = scsi_bufflen(cmd);
2200 direction = 0x00000000;
2201
2202 scsidir = 0x00000000; // DATA NO XFER
2203 if(len) {
2204 /*
2205 * Set SCBFlags to indicate if data is being transferred
2206 * in or out, or no data transfer
2207 * Note: Do not have to verify index is less than 0 since
2208 * cmd->cmnd[0] is an unsigned char
2209 */
2210 switch(cmd->sc_data_direction){
2211 case DMA_FROM_DEVICE:
2212 scsidir =0x40000000; // DATA IN (iop<--dev)
2213 break;
2214 case DMA_TO_DEVICE:
2215 direction=0x04000000; // SGL OUT
2216 scsidir =0x80000000; // DATA OUT (iop-->dev)
2217 break;
2218 case DMA_NONE:
2219 break;
2220 case DMA_BIDIRECTIONAL:
2221 scsidir =0x40000000; // DATA IN (iop<--dev)
2222 // Assume In - and continue;
2223 break;
2224 default:
2225 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2226 pHba->name, cmd->cmnd[0]);
2227 cmd->result = (DID_ERROR <<16);
2228 scsi_done(cmd);
2229 return 0;
2230 }
2231 }
2232 // msg[0] is set later
2233 // I2O_CMD_SCSI_EXEC
2234 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2235 msg[2] = 0;
2236 /* Add 1 to avoid firmware treating it as invalid command */
2237 msg[3] = scsi_cmd_to_rq(cmd)->tag + 1;
2238 // Our cards use the transaction context as the tag for queueing
2239 // Adaptec/DPT Private stuff
2240 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2241 msg[5] = d->tid;
2242 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2243 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2244 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2245 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2246 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2247
2248 mptr=msg+7;
2249
2250 // Write SCSI command into the message - always 16 byte block
2251 memset(mptr, 0, 16);
2252 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2253 mptr+=4;
2254 lenptr=mptr++; /* Remember me - fill in when we know */
2255 if (dpt_dma64(pHba)) {
2256 reqlen = 16; // SINGLE SGE
2257 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2258 *mptr++ = 1 << PAGE_SHIFT;
2259 } else {
2260 reqlen = 14; // SINGLE SGE
2261 }
2262 /* Now fill in the SGList and command */
2263
2264 nseg = scsi_dma_map(cmd);
2265 BUG_ON(nseg < 0);
2266 if (nseg) {
2267 struct scatterlist *sg;
2268
2269 len = 0;
2270 scsi_for_each_sg(cmd, sg, nseg, i) {
2271 lptr = mptr;
2272 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2273 len+=sg_dma_len(sg);
2274 addr = sg_dma_address(sg);
2275 *mptr++ = dma_low(addr);
2276 if (dpt_dma64(pHba))
2277 *mptr++ = dma_high(addr);
2278 /* Make this an end of list */
2279 if (i == nseg - 1)
2280 *lptr = direction|0xD0000000|sg_dma_len(sg);
2281 }
2282 reqlen = mptr - msg;
2283 *lenptr = len;
2284
2285 if(cmd->underflow && len != cmd->underflow){
2286 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2287 len, cmd->underflow);
2288 }
2289 } else {
2290 *lenptr = len = 0;
2291 reqlen = 12;
2292 }
2293
2294 /* Stick the headers on */
2295 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2296
2297 // Send it on it's way
2298 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2299 if (rcode == 0) {
2300 return 0;
2301 }
2302 return rcode;
2303 }
2304
2305
adpt_scsi_host_alloc(adpt_hba * pHba,struct scsi_host_template * sht)2306 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2307 {
2308 struct Scsi_Host *host;
2309
2310 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2311 if (host == NULL) {
2312 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2313 return -1;
2314 }
2315 host->hostdata[0] = (unsigned long)pHba;
2316 pHba->host = host;
2317
2318 host->irq = pHba->pDev->irq;
2319 /* no IO ports, so don't have to set host->io_port and
2320 * host->n_io_port
2321 */
2322 host->io_port = 0;
2323 host->n_io_port = 0;
2324 /* see comments in scsi_host.h */
2325 host->max_id = 16;
2326 host->max_lun = 256;
2327 host->max_channel = pHba->top_scsi_channel + 1;
2328 host->cmd_per_lun = 1;
2329 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2330 host->sg_tablesize = pHba->sg_tablesize;
2331 host->can_queue = pHba->post_fifo_size;
2332
2333 return 0;
2334 }
2335
2336
adpt_i2o_scsi_complete(void __iomem * reply,struct scsi_cmnd * cmd)2337 static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
2338 {
2339 adpt_hba* pHba;
2340 u32 hba_status;
2341 u32 dev_status;
2342 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2343 // I know this would look cleaner if I just read bytes
2344 // but the model I have been using for all the rest of the
2345 // io is in 4 byte words - so I keep that model
2346 u16 detailed_status = readl(reply+16) &0xffff;
2347 dev_status = (detailed_status & 0xff);
2348 hba_status = detailed_status >> 8;
2349
2350 // calculate resid for sg
2351 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2352
2353 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2354
2355 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2356
2357 if(!(reply_flags & MSG_FAIL)) {
2358 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2359 case I2O_SCSI_DSC_SUCCESS:
2360 cmd->result = (DID_OK << 16);
2361 // handle underflow
2362 if (readl(reply+20) < cmd->underflow) {
2363 cmd->result = (DID_ERROR <<16);
2364 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2365 }
2366 break;
2367 case I2O_SCSI_DSC_REQUEST_ABORTED:
2368 cmd->result = (DID_ABORT << 16);
2369 break;
2370 case I2O_SCSI_DSC_PATH_INVALID:
2371 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2372 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2373 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2374 case I2O_SCSI_DSC_NO_ADAPTER:
2375 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2376 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2377 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2378 cmd->result = (DID_TIME_OUT << 16);
2379 break;
2380 case I2O_SCSI_DSC_ADAPTER_BUSY:
2381 case I2O_SCSI_DSC_BUS_BUSY:
2382 cmd->result = (DID_BUS_BUSY << 16);
2383 break;
2384 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2385 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2386 cmd->result = (DID_RESET << 16);
2387 break;
2388 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2389 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2390 cmd->result = (DID_PARITY << 16);
2391 break;
2392 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2393 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2394 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2395 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2396 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2397 case I2O_SCSI_DSC_DATA_OVERRUN:
2398 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2399 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2400 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2401 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2402 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2403 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2404 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2405 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2406 case I2O_SCSI_DSC_INVALID_CDB:
2407 case I2O_SCSI_DSC_LUN_INVALID:
2408 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2409 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2410 case I2O_SCSI_DSC_NO_NEXUS:
2411 case I2O_SCSI_DSC_CDB_RECEIVED:
2412 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2413 case I2O_SCSI_DSC_QUEUE_FROZEN:
2414 case I2O_SCSI_DSC_REQUEST_INVALID:
2415 default:
2416 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2417 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2418 hba_status, dev_status, cmd->cmnd[0]);
2419 cmd->result = (DID_ERROR << 16);
2420 break;
2421 }
2422
2423 // copy over the request sense data if it was a check
2424 // condition status
2425 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2426 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2427 // Copy over the sense data
2428 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2429 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2430 cmd->sense_buffer[2] == DATA_PROTECT ){
2431 /* This is to handle an array failed */
2432 cmd->result = (DID_TIME_OUT << 16);
2433 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2434 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2435 hba_status, dev_status, cmd->cmnd[0]);
2436
2437 }
2438 }
2439 } else {
2440 /* In this condtion we could not talk to the tid
2441 * the card rejected it. We should signal a retry
2442 * for a limitted number of retries.
2443 */
2444 cmd->result = (DID_TIME_OUT << 16);
2445 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2446 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2447 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2448 }
2449
2450 cmd->result |= (dev_status);
2451
2452 scsi_done(cmd);
2453 }
2454
2455
adpt_rescan(adpt_hba * pHba)2456 static s32 adpt_rescan(adpt_hba* pHba)
2457 {
2458 s32 rcode;
2459 ulong flags = 0;
2460
2461 if(pHba->host)
2462 spin_lock_irqsave(pHba->host->host_lock, flags);
2463 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2464 goto out;
2465 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2466 goto out;
2467 rcode = 0;
2468 out: if(pHba->host)
2469 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2470 return rcode;
2471 }
2472
2473
adpt_i2o_reparse_lct(adpt_hba * pHba)2474 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2475 {
2476 int i;
2477 int max;
2478 int tid;
2479 struct i2o_device *d;
2480 i2o_lct *lct = pHba->lct;
2481 u8 bus_no = 0;
2482 s16 scsi_id;
2483 u64 scsi_lun;
2484 u32 buf[10]; // at least 8 u32's
2485 struct adpt_device* pDev = NULL;
2486 struct i2o_device* pI2o_dev = NULL;
2487
2488 if (lct == NULL) {
2489 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2490 return -1;
2491 }
2492
2493 max = lct->table_size;
2494 max -= 3;
2495 max /= 9;
2496
2497 // Mark each drive as unscanned
2498 for (d = pHba->devices; d; d = d->next) {
2499 pDev =(struct adpt_device*) d->owner;
2500 if(!pDev){
2501 continue;
2502 }
2503 pDev->state |= DPTI_DEV_UNSCANNED;
2504 }
2505
2506 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2507
2508 for(i=0;i<max;i++) {
2509 if( lct->lct_entry[i].user_tid != 0xfff){
2510 continue;
2511 }
2512
2513 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2514 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2515 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2516 tid = lct->lct_entry[i].tid;
2517 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2518 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2519 continue;
2520 }
2521 bus_no = buf[0]>>16;
2522 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2523 printk(KERN_WARNING
2524 "%s: Channel number %d out of range\n",
2525 pHba->name, bus_no);
2526 continue;
2527 }
2528
2529 scsi_id = buf[1];
2530 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2531 pDev = pHba->channel[bus_no].device[scsi_id];
2532 /* da lun */
2533 while(pDev) {
2534 if(pDev->scsi_lun == scsi_lun) {
2535 break;
2536 }
2537 pDev = pDev->next_lun;
2538 }
2539 if(!pDev ) { // Something new add it
2540 d = kmalloc(sizeof(struct i2o_device),
2541 GFP_ATOMIC);
2542 if(d==NULL)
2543 {
2544 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2545 return -ENOMEM;
2546 }
2547
2548 d->controller = pHba;
2549 d->next = NULL;
2550
2551 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2552
2553 d->flags = 0;
2554 adpt_i2o_report_hba_unit(pHba, d);
2555 adpt_i2o_install_device(pHba, d);
2556
2557 pDev = pHba->channel[bus_no].device[scsi_id];
2558 if( pDev == NULL){
2559 pDev =
2560 kzalloc(sizeof(struct adpt_device),
2561 GFP_ATOMIC);
2562 if(pDev == NULL) {
2563 return -ENOMEM;
2564 }
2565 pHba->channel[bus_no].device[scsi_id] = pDev;
2566 } else {
2567 while (pDev->next_lun) {
2568 pDev = pDev->next_lun;
2569 }
2570 pDev = pDev->next_lun =
2571 kzalloc(sizeof(struct adpt_device),
2572 GFP_ATOMIC);
2573 if(pDev == NULL) {
2574 return -ENOMEM;
2575 }
2576 }
2577 pDev->tid = d->lct_data.tid;
2578 pDev->scsi_channel = bus_no;
2579 pDev->scsi_id = scsi_id;
2580 pDev->scsi_lun = scsi_lun;
2581 pDev->pI2o_dev = d;
2582 d->owner = pDev;
2583 pDev->type = (buf[0])&0xff;
2584 pDev->flags = (buf[0]>>8)&0xff;
2585 // Too late, SCSI system has made up it's mind, but what the hey ...
2586 if(scsi_id > pHba->top_scsi_id){
2587 pHba->top_scsi_id = scsi_id;
2588 }
2589 if(scsi_lun > pHba->top_scsi_lun){
2590 pHba->top_scsi_lun = scsi_lun;
2591 }
2592 continue;
2593 } // end of new i2o device
2594
2595 // We found an old device - check it
2596 while(pDev) {
2597 if(pDev->scsi_lun == scsi_lun) {
2598 if(!scsi_device_online(pDev->pScsi_dev)) {
2599 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2600 pHba->name,bus_no,scsi_id,scsi_lun);
2601 if (pDev->pScsi_dev) {
2602 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2603 }
2604 }
2605 d = pDev->pI2o_dev;
2606 if(d->lct_data.tid != tid) { // something changed
2607 pDev->tid = tid;
2608 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2609 if (pDev->pScsi_dev) {
2610 pDev->pScsi_dev->changed = TRUE;
2611 pDev->pScsi_dev->removable = TRUE;
2612 }
2613 }
2614 // Found it - mark it scanned
2615 pDev->state = DPTI_DEV_ONLINE;
2616 break;
2617 }
2618 pDev = pDev->next_lun;
2619 }
2620 }
2621 }
2622 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2623 pDev =(struct adpt_device*) pI2o_dev->owner;
2624 if(!pDev){
2625 continue;
2626 }
2627 // Drive offline drives that previously existed but could not be found
2628 // in the LCT table
2629 if (pDev->state & DPTI_DEV_UNSCANNED){
2630 pDev->state = DPTI_DEV_OFFLINE;
2631 printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2632 if (pDev->pScsi_dev) {
2633 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2634 }
2635 }
2636 }
2637 return 0;
2638 }
2639
2640 /*============================================================================
2641 * Routines from i2o subsystem
2642 *============================================================================
2643 */
2644
2645
2646
2647 /*
2648 * Bring an I2O controller into HOLD state. See the spec.
2649 */
adpt_i2o_activate_hba(adpt_hba * pHba)2650 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2651 {
2652 int rcode;
2653
2654 if(pHba->initialized ) {
2655 if (adpt_i2o_status_get(pHba) < 0) {
2656 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2657 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2658 return rcode;
2659 }
2660 if (adpt_i2o_status_get(pHba) < 0) {
2661 printk(KERN_INFO "HBA not responding.\n");
2662 return -1;
2663 }
2664 }
2665
2666 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2667 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2668 return -1;
2669 }
2670
2671 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2672 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2673 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2674 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2675 adpt_i2o_reset_hba(pHba);
2676 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2677 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2678 return -1;
2679 }
2680 }
2681 } else {
2682 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2683 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2684 return rcode;
2685 }
2686
2687 }
2688
2689 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2690 return -1;
2691 }
2692
2693 /* In HOLD state */
2694
2695 if (adpt_i2o_hrt_get(pHba) < 0) {
2696 return -1;
2697 }
2698
2699 return 0;
2700 }
2701
2702 /*
2703 * Bring a controller online into OPERATIONAL state.
2704 */
2705
adpt_i2o_online_hba(adpt_hba * pHba)2706 static int adpt_i2o_online_hba(adpt_hba* pHba)
2707 {
2708 if (adpt_i2o_systab_send(pHba) < 0)
2709 return -1;
2710 /* In READY state */
2711
2712 if (adpt_i2o_enable_hba(pHba) < 0)
2713 return -1;
2714
2715 /* In OPERATIONAL state */
2716 return 0;
2717 }
2718
adpt_send_nop(adpt_hba * pHba,u32 m)2719 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2720 {
2721 u32 __iomem *msg;
2722 ulong timeout = jiffies + 5*HZ;
2723
2724 while(m == EMPTY_QUEUE){
2725 rmb();
2726 m = readl(pHba->post_port);
2727 if(m != EMPTY_QUEUE){
2728 break;
2729 }
2730 if(time_after(jiffies,timeout)){
2731 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2732 return 2;
2733 }
2734 schedule_timeout_uninterruptible(1);
2735 }
2736 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2737 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2738 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2739 writel( 0,&msg[2]);
2740 wmb();
2741
2742 writel(m, pHba->post_port);
2743 wmb();
2744 return 0;
2745 }
2746
adpt_i2o_init_outbound_q(adpt_hba * pHba)2747 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2748 {
2749 u8 *status;
2750 dma_addr_t addr;
2751 u32 __iomem *msg = NULL;
2752 int i;
2753 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2754 u32 m;
2755
2756 do {
2757 rmb();
2758 m = readl(pHba->post_port);
2759 if (m != EMPTY_QUEUE) {
2760 break;
2761 }
2762
2763 if(time_after(jiffies,timeout)){
2764 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2765 return -ETIMEDOUT;
2766 }
2767 schedule_timeout_uninterruptible(1);
2768 } while(m == EMPTY_QUEUE);
2769
2770 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2771
2772 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2773 if (!status) {
2774 adpt_send_nop(pHba, m);
2775 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2776 pHba->name);
2777 return -ENOMEM;
2778 }
2779
2780 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2781 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2782 writel(0, &msg[2]);
2783 writel(0x0106, &msg[3]); /* Transaction context */
2784 writel(4096, &msg[4]); /* Host page frame size */
2785 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2786 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2787 writel((u32)addr, &msg[7]);
2788
2789 writel(m, pHba->post_port);
2790 wmb();
2791
2792 // Wait for the reply status to come back
2793 do {
2794 if (*status) {
2795 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2796 break;
2797 }
2798 }
2799 rmb();
2800 if(time_after(jiffies,timeout)){
2801 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2802 /* We lose 4 bytes of "status" here, but we
2803 cannot free these because controller may
2804 awake and corrupt those bytes at any time */
2805 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2806 return -ETIMEDOUT;
2807 }
2808 schedule_timeout_uninterruptible(1);
2809 } while (1);
2810
2811 // If the command was successful, fill the fifo with our reply
2812 // message packets
2813 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2814 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2815 return -2;
2816 }
2817 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2818
2819 if(pHba->reply_pool != NULL) {
2820 dma_free_coherent(&pHba->pDev->dev,
2821 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2822 pHba->reply_pool, pHba->reply_pool_pa);
2823 }
2824
2825 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2826 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2827 &pHba->reply_pool_pa, GFP_KERNEL);
2828 if (!pHba->reply_pool) {
2829 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2830 return -ENOMEM;
2831 }
2832
2833 for(i = 0; i < pHba->reply_fifo_size; i++) {
2834 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2835 pHba->reply_port);
2836 wmb();
2837 }
2838 adpt_i2o_status_get(pHba);
2839 return 0;
2840 }
2841
2842
2843 /*
2844 * I2O System Table. Contains information about
2845 * all the IOPs in the system. Used to inform IOPs
2846 * about each other's existence.
2847 *
2848 * sys_tbl_ver is the CurrentChangeIndicator that is
2849 * used by IOPs to track changes.
2850 */
2851
2852
2853
adpt_i2o_status_get(adpt_hba * pHba)2854 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2855 {
2856 ulong timeout;
2857 u32 m;
2858 u32 __iomem *msg;
2859 u8 *status_block=NULL;
2860
2861 if(pHba->status_block == NULL) {
2862 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2863 sizeof(i2o_status_block),
2864 &pHba->status_block_pa, GFP_KERNEL);
2865 if(pHba->status_block == NULL) {
2866 printk(KERN_ERR
2867 "dpti%d: Get Status Block failed; Out of memory. \n",
2868 pHba->unit);
2869 return -ENOMEM;
2870 }
2871 }
2872 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2873 status_block = (u8*)(pHba->status_block);
2874 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2875 do {
2876 rmb();
2877 m = readl(pHba->post_port);
2878 if (m != EMPTY_QUEUE) {
2879 break;
2880 }
2881 if(time_after(jiffies,timeout)){
2882 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2883 pHba->name);
2884 return -ETIMEDOUT;
2885 }
2886 schedule_timeout_uninterruptible(1);
2887 } while(m==EMPTY_QUEUE);
2888
2889
2890 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2891
2892 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2893 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2894 writel(1, &msg[2]);
2895 writel(0, &msg[3]);
2896 writel(0, &msg[4]);
2897 writel(0, &msg[5]);
2898 writel( dma_low(pHba->status_block_pa), &msg[6]);
2899 writel( dma_high(pHba->status_block_pa), &msg[7]);
2900 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2901
2902 //post message
2903 writel(m, pHba->post_port);
2904 wmb();
2905
2906 while(status_block[87]!=0xff){
2907 if(time_after(jiffies,timeout)){
2908 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2909 pHba->unit);
2910 return -ETIMEDOUT;
2911 }
2912 rmb();
2913 schedule_timeout_uninterruptible(1);
2914 }
2915
2916 // Set up our number of outbound and inbound messages
2917 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2918 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2919 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2920 }
2921
2922 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2923 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2924 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2925 }
2926
2927 // Calculate the Scatter Gather list size
2928 if (dpt_dma64(pHba)) {
2929 pHba->sg_tablesize
2930 = ((pHba->status_block->inbound_frame_size * 4
2931 - 14 * sizeof(u32))
2932 / (sizeof(struct sg_simple_element) + sizeof(u32)));
2933 } else {
2934 pHba->sg_tablesize
2935 = ((pHba->status_block->inbound_frame_size * 4
2936 - 12 * sizeof(u32))
2937 / sizeof(struct sg_simple_element));
2938 }
2939 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2940 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2941 }
2942
2943
2944 #ifdef DEBUG
2945 printk("dpti%d: State = ",pHba->unit);
2946 switch(pHba->status_block->iop_state) {
2947 case 0x01:
2948 printk("INIT\n");
2949 break;
2950 case 0x02:
2951 printk("RESET\n");
2952 break;
2953 case 0x04:
2954 printk("HOLD\n");
2955 break;
2956 case 0x05:
2957 printk("READY\n");
2958 break;
2959 case 0x08:
2960 printk("OPERATIONAL\n");
2961 break;
2962 case 0x10:
2963 printk("FAILED\n");
2964 break;
2965 case 0x11:
2966 printk("FAULTED\n");
2967 break;
2968 default:
2969 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2970 }
2971 #endif
2972 return 0;
2973 }
2974
2975 /*
2976 * Get the IOP's Logical Configuration Table
2977 */
adpt_i2o_lct_get(adpt_hba * pHba)2978 static int adpt_i2o_lct_get(adpt_hba* pHba)
2979 {
2980 u32 msg[8];
2981 int ret;
2982 u32 buf[16];
2983
2984 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2985 pHba->lct_size = pHba->status_block->expected_lct_size;
2986 }
2987 do {
2988 if (pHba->lct == NULL) {
2989 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
2990 pHba->lct_size, &pHba->lct_pa,
2991 GFP_ATOMIC);
2992 if(pHba->lct == NULL) {
2993 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2994 pHba->name);
2995 return -ENOMEM;
2996 }
2997 }
2998 memset(pHba->lct, 0, pHba->lct_size);
2999
3000 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3001 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3002 msg[2] = 0;
3003 msg[3] = 0;
3004 msg[4] = 0xFFFFFFFF; /* All devices */
3005 msg[5] = 0x00000000; /* Report now */
3006 msg[6] = 0xD0000000|pHba->lct_size;
3007 msg[7] = (u32)pHba->lct_pa;
3008
3009 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3010 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3011 pHba->name, ret);
3012 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3013 return ret;
3014 }
3015
3016 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3017 pHba->lct_size = pHba->lct->table_size << 2;
3018 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3019 pHba->lct, pHba->lct_pa);
3020 pHba->lct = NULL;
3021 }
3022 } while (pHba->lct == NULL);
3023
3024 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3025
3026
3027 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3028 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3029 pHba->FwDebugBufferSize = buf[1];
3030 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3031 pHba->FwDebugBufferSize);
3032 if (pHba->FwDebugBuffer_P) {
3033 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3034 FW_DEBUG_FLAGS_OFFSET;
3035 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3036 FW_DEBUG_BLED_OFFSET;
3037 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3038 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3039 FW_DEBUG_STR_LENGTH_OFFSET;
3040 pHba->FwDebugBuffer_P += buf[2];
3041 pHba->FwDebugFlags = 0;
3042 }
3043 }
3044
3045 return 0;
3046 }
3047
adpt_i2o_build_sys_table(void)3048 static int adpt_i2o_build_sys_table(void)
3049 {
3050 adpt_hba* pHba = hba_chain;
3051 int count = 0;
3052
3053 if (sys_tbl)
3054 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3055 sys_tbl, sys_tbl_pa);
3056
3057 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3058 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3059
3060 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3061 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3062 if (!sys_tbl) {
3063 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3064 return -ENOMEM;
3065 }
3066
3067 sys_tbl->num_entries = hba_count;
3068 sys_tbl->version = I2OVERSION;
3069 sys_tbl->change_ind = sys_tbl_ind++;
3070
3071 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3072 u64 addr;
3073 // Get updated Status Block so we have the latest information
3074 if (adpt_i2o_status_get(pHba)) {
3075 sys_tbl->num_entries--;
3076 continue; // try next one
3077 }
3078
3079 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3080 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3081 sys_tbl->iops[count].seg_num = 0;
3082 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3083 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3084 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3085 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3086 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3087 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3088 addr = pHba->base_addr_phys + 0x40;
3089 sys_tbl->iops[count].inbound_low = dma_low(addr);
3090 sys_tbl->iops[count].inbound_high = dma_high(addr);
3091
3092 count++;
3093 }
3094
3095 #ifdef DEBUG
3096 {
3097 u32 *table = (u32*)sys_tbl;
3098 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3099 for(count = 0; count < (sys_tbl_len >>2); count++) {
3100 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3101 count, table[count]);
3102 }
3103 }
3104 #endif
3105
3106 return 0;
3107 }
3108
3109
3110 /*
3111 * Dump the information block associated with a given unit (TID)
3112 */
3113
adpt_i2o_report_hba_unit(adpt_hba * pHba,struct i2o_device * d)3114 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3115 {
3116 char buf[64];
3117 int unit = d->lct_data.tid;
3118
3119 printk(KERN_INFO "TID %3.3d ", unit);
3120
3121 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3122 {
3123 buf[16]=0;
3124 printk(" Vendor: %-12.12s", buf);
3125 }
3126 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3127 {
3128 buf[16]=0;
3129 printk(" Device: %-12.12s", buf);
3130 }
3131 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3132 {
3133 buf[8]=0;
3134 printk(" Rev: %-12.12s\n", buf);
3135 }
3136 #ifdef DEBUG
3137 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3138 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3139 printk(KERN_INFO "\tFlags: ");
3140
3141 if(d->lct_data.device_flags&(1<<0))
3142 printk("C"); // ConfigDialog requested
3143 if(d->lct_data.device_flags&(1<<1))
3144 printk("U"); // Multi-user capable
3145 if(!(d->lct_data.device_flags&(1<<4)))
3146 printk("P"); // Peer service enabled!
3147 if(!(d->lct_data.device_flags&(1<<5)))
3148 printk("M"); // Mgmt service enabled!
3149 printk("\n");
3150 #endif
3151 }
3152
3153 #ifdef DEBUG
3154 /*
3155 * Do i2o class name lookup
3156 */
adpt_i2o_get_class_name(int class)3157 static const char *adpt_i2o_get_class_name(int class)
3158 {
3159 int idx = 16;
3160 static char *i2o_class_name[] = {
3161 "Executive",
3162 "Device Driver Module",
3163 "Block Device",
3164 "Tape Device",
3165 "LAN Interface",
3166 "WAN Interface",
3167 "Fibre Channel Port",
3168 "Fibre Channel Device",
3169 "SCSI Device",
3170 "ATE Port",
3171 "ATE Device",
3172 "Floppy Controller",
3173 "Floppy Device",
3174 "Secondary Bus Port",
3175 "Peer Transport Agent",
3176 "Peer Transport",
3177 "Unknown"
3178 };
3179
3180 switch(class&0xFFF) {
3181 case I2O_CLASS_EXECUTIVE:
3182 idx = 0; break;
3183 case I2O_CLASS_DDM:
3184 idx = 1; break;
3185 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3186 idx = 2; break;
3187 case I2O_CLASS_SEQUENTIAL_STORAGE:
3188 idx = 3; break;
3189 case I2O_CLASS_LAN:
3190 idx = 4; break;
3191 case I2O_CLASS_WAN:
3192 idx = 5; break;
3193 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3194 idx = 6; break;
3195 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3196 idx = 7; break;
3197 case I2O_CLASS_SCSI_PERIPHERAL:
3198 idx = 8; break;
3199 case I2O_CLASS_ATE_PORT:
3200 idx = 9; break;
3201 case I2O_CLASS_ATE_PERIPHERAL:
3202 idx = 10; break;
3203 case I2O_CLASS_FLOPPY_CONTROLLER:
3204 idx = 11; break;
3205 case I2O_CLASS_FLOPPY_DEVICE:
3206 idx = 12; break;
3207 case I2O_CLASS_BUS_ADAPTER_PORT:
3208 idx = 13; break;
3209 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3210 idx = 14; break;
3211 case I2O_CLASS_PEER_TRANSPORT:
3212 idx = 15; break;
3213 }
3214 return i2o_class_name[idx];
3215 }
3216 #endif
3217
3218
adpt_i2o_hrt_get(adpt_hba * pHba)3219 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3220 {
3221 u32 msg[6];
3222 int ret, size = sizeof(i2o_hrt);
3223
3224 do {
3225 if (pHba->hrt == NULL) {
3226 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3227 size, &pHba->hrt_pa, GFP_KERNEL);
3228 if (pHba->hrt == NULL) {
3229 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3230 return -ENOMEM;
3231 }
3232 }
3233
3234 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3235 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3236 msg[2]= 0;
3237 msg[3]= 0;
3238 msg[4]= (0xD0000000 | size); /* Simple transaction */
3239 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3240
3241 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3242 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3243 return ret;
3244 }
3245
3246 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3247 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3248 dma_free_coherent(&pHba->pDev->dev, size,
3249 pHba->hrt, pHba->hrt_pa);
3250 size = newsize;
3251 pHba->hrt = NULL;
3252 }
3253 } while(pHba->hrt == NULL);
3254 return 0;
3255 }
3256
3257 /*
3258 * Query one scalar group value or a whole scalar group.
3259 */
adpt_i2o_query_scalar(adpt_hba * pHba,int tid,int group,int field,void * buf,int buflen)3260 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3261 int group, int field, void *buf, int buflen)
3262 {
3263 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3264 u8 *opblk_va;
3265 dma_addr_t opblk_pa;
3266 u8 *resblk_va;
3267 dma_addr_t resblk_pa;
3268
3269 int size;
3270
3271 /* 8 bytes for header */
3272 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3273 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3274 if (resblk_va == NULL) {
3275 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3276 return -ENOMEM;
3277 }
3278
3279 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3280 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3281 if (opblk_va == NULL) {
3282 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3283 resblk_va, resblk_pa);
3284 printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3285 pHba->name);
3286 return -ENOMEM;
3287 }
3288 if (field == -1) /* whole group */
3289 opblk[4] = -1;
3290
3291 memcpy(opblk_va, opblk, sizeof(opblk));
3292 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3293 opblk_va, opblk_pa, sizeof(opblk),
3294 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3295 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3296 if (size == -ETIME) {
3297 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3298 resblk_va, resblk_pa);
3299 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3300 return -ETIME;
3301 } else if (size == -EINTR) {
3302 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3303 resblk_va, resblk_pa);
3304 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3305 return -EINTR;
3306 }
3307
3308 memcpy(buf, resblk_va+8, buflen); /* cut off header */
3309
3310 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3311 resblk_va, resblk_pa);
3312 if (size < 0)
3313 return size;
3314
3315 return buflen;
3316 }
3317
3318
3319 /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3320 *
3321 * This function can be used for all UtilParamsGet/Set operations.
3322 * The OperationBlock is given in opblk-buffer,
3323 * and results are returned in resblk-buffer.
3324 * Note that the minimum sized resblk is 8 bytes and contains
3325 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3326 */
adpt_i2o_issue_params(int cmd,adpt_hba * pHba,int tid,void * opblk_va,dma_addr_t opblk_pa,int oplen,void * resblk_va,dma_addr_t resblk_pa,int reslen)3327 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3328 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3329 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3330 {
3331 u32 msg[9];
3332 u32 *res = (u32 *)resblk_va;
3333 int wait_status;
3334
3335 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3336 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3337 msg[2] = 0;
3338 msg[3] = 0;
3339 msg[4] = 0;
3340 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3341 msg[6] = (u32)opblk_pa;
3342 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3343 msg[8] = (u32)resblk_pa;
3344
3345 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3346 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3347 return wait_status; /* -DetailedStatus */
3348 }
3349
3350 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3351 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3352 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3353 pHba->name,
3354 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3355 : "PARAMS_GET",
3356 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3357 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3358 }
3359
3360 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3361 }
3362
3363
adpt_i2o_quiesce_hba(adpt_hba * pHba)3364 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3365 {
3366 u32 msg[4];
3367 int ret;
3368
3369 adpt_i2o_status_get(pHba);
3370
3371 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3372
3373 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3374 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3375 return 0;
3376 }
3377
3378 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3379 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3380 msg[2] = 0;
3381 msg[3] = 0;
3382
3383 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3384 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3385 pHba->unit, -ret);
3386 } else {
3387 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3388 }
3389
3390 adpt_i2o_status_get(pHba);
3391 return ret;
3392 }
3393
3394
3395 /*
3396 * Enable IOP. Allows the IOP to resume external operations.
3397 */
adpt_i2o_enable_hba(adpt_hba * pHba)3398 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3399 {
3400 u32 msg[4];
3401 int ret;
3402
3403 adpt_i2o_status_get(pHba);
3404 if(!pHba->status_block){
3405 return -ENOMEM;
3406 }
3407 /* Enable only allowed on READY state */
3408 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3409 return 0;
3410
3411 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3412 return -EINVAL;
3413
3414 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3415 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3416 msg[2]= 0;
3417 msg[3]= 0;
3418
3419 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3420 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3421 pHba->name, ret);
3422 } else {
3423 PDEBUG("%s: Enabled.\n", pHba->name);
3424 }
3425
3426 adpt_i2o_status_get(pHba);
3427 return ret;
3428 }
3429
3430
adpt_i2o_systab_send(adpt_hba * pHba)3431 static int adpt_i2o_systab_send(adpt_hba* pHba)
3432 {
3433 u32 msg[12];
3434 int ret;
3435
3436 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3437 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3438 msg[2] = 0;
3439 msg[3] = 0;
3440 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3441 msg[5] = 0; /* Segment 0 */
3442
3443 /*
3444 * Provide three SGL-elements:
3445 * System table (SysTab), Private memory space declaration and
3446 * Private i/o space declaration
3447 */
3448 msg[6] = 0x54000000 | sys_tbl_len;
3449 msg[7] = (u32)sys_tbl_pa;
3450 msg[8] = 0x54000000 | 0;
3451 msg[9] = 0;
3452 msg[10] = 0xD4000000 | 0;
3453 msg[11] = 0;
3454
3455 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3456 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3457 pHba->name, ret);
3458 }
3459 #ifdef DEBUG
3460 else {
3461 PINFO("%s: SysTab set.\n", pHba->name);
3462 }
3463 #endif
3464
3465 return ret;
3466 }
3467
3468
3469 /*============================================================================
3470 *
3471 *============================================================================
3472 */
3473
3474
3475 #ifdef UARTDELAY
3476
adpt_delay(int millisec)3477 static static void adpt_delay(int millisec)
3478 {
3479 int i;
3480 for (i = 0; i < millisec; i++) {
3481 udelay(1000); /* delay for one millisecond */
3482 }
3483 }
3484
3485 #endif
3486
3487 static struct scsi_host_template driver_template = {
3488 .module = THIS_MODULE,
3489 .name = "dpt_i2o",
3490 .proc_name = "dpt_i2o",
3491 .show_info = adpt_show_info,
3492 .info = adpt_info,
3493 .queuecommand = adpt_queue,
3494 .eh_abort_handler = adpt_abort,
3495 .eh_device_reset_handler = adpt_device_reset,
3496 .eh_bus_reset_handler = adpt_bus_reset,
3497 .eh_host_reset_handler = adpt_reset,
3498 .bios_param = adpt_bios_param,
3499 .slave_configure = adpt_slave_configure,
3500 .can_queue = MAX_TO_IOP_MESSAGES,
3501 .this_id = 7,
3502 };
3503
adpt_init(void)3504 static int __init adpt_init(void)
3505 {
3506 int error;
3507 adpt_hba *pHba, *next;
3508
3509 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3510
3511 error = adpt_detect(&driver_template);
3512 if (error < 0)
3513 return error;
3514 if (hba_chain == NULL)
3515 return -ENODEV;
3516
3517 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3518 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3519 if (error)
3520 goto fail;
3521 scsi_scan_host(pHba->host);
3522 }
3523 return 0;
3524 fail:
3525 for (pHba = hba_chain; pHba; pHba = next) {
3526 next = pHba->next;
3527 scsi_remove_host(pHba->host);
3528 }
3529 return error;
3530 }
3531
adpt_exit(void)3532 static void __exit adpt_exit(void)
3533 {
3534 adpt_hba *pHba, *next;
3535
3536 for (pHba = hba_chain; pHba; pHba = next) {
3537 next = pHba->next;
3538 adpt_release(pHba);
3539 }
3540 }
3541
3542 module_init(adpt_init);
3543 module_exit(adpt_exit);
3544
3545 MODULE_LICENSE("GPL");
3546