1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Linux MegaRAID device driver
5 *
6 * Copyright (c) 2002 LSI Logic Corporation.
7 *
8 * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
9 * - fixes
10 * - speed-ups (list handling fixes, issued_list, optimizations.)
11 * - lots of cleanups.
12 *
13 * Copyright (c) 2003 Christoph Hellwig <hch@lst.de>
14 * - new-style, hotplug-aware pci probing and scsi registration
15 *
16 * Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju
17 * <Seokmann.Ju@lsil.com>
18 *
19 * Description: Linux device driver for LSI Logic MegaRAID controller
20 *
21 * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493
22 * 518, 520, 531, 532
23 *
24 * This driver is supported by LSI Logic, with assistance from Red Hat, Dell,
25 * and others. Please send updates to the mailing list
26 * linux-scsi@vger.kernel.org .
27 */
28
29 #include <linux/mm.h>
30 #include <linux/fs.h>
31 #include <linux/blkdev.h>
32 #include <linux/uaccess.h>
33 #include <asm/io.h>
34 #include <linux/completion.h>
35 #include <linux/delay.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/reboot.h>
39 #include <linux/module.h>
40 #include <linux/list.h>
41 #include <linux/interrupt.h>
42 #include <linux/pci.h>
43 #include <linux/init.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/mutex.h>
46 #include <linux/slab.h>
47
48 #include <scsi/scsi.h>
49 #include <scsi/scsi_cmnd.h>
50 #include <scsi/scsi_device.h>
51 #include <scsi/scsi_eh.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsi_tcq.h>
54 #include <scsi/scsicam.h>
55
56 #include "megaraid.h"
57
58 #define MEGARAID_MODULE_VERSION "2.00.4"
59
60 MODULE_AUTHOR ("sju@lsil.com");
61 MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver");
62 MODULE_LICENSE ("GPL");
63 MODULE_VERSION(MEGARAID_MODULE_VERSION);
64
65 static DEFINE_MUTEX(megadev_mutex);
66 static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN;
67 module_param(max_cmd_per_lun, uint, 0);
68 MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)");
69
70 static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO;
71 module_param(max_sectors_per_io, ushort, 0);
72 MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)");
73
74
75 static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT;
76 module_param(max_mbox_busy_wait, ushort, 0);
77 MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)");
78
79 #define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20)
80 #define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C)
81 #define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20)
82 #define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C)
83
84 /*
85 * Global variables
86 */
87
88 static int hba_count;
89 static adapter_t *hba_soft_state[MAX_CONTROLLERS];
90 static struct proc_dir_entry *mega_proc_dir_entry;
91
92 /* For controller re-ordering */
93 static struct mega_hbas mega_hbas[MAX_CONTROLLERS];
94
95 static long
96 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
97
98 /*
99 * The File Operations structure for the serial/ioctl interface of the driver
100 */
101 static const struct file_operations megadev_fops = {
102 .owner = THIS_MODULE,
103 .unlocked_ioctl = megadev_unlocked_ioctl,
104 .open = megadev_open,
105 .llseek = noop_llseek,
106 };
107
108 /*
109 * Array to structures for storing the information about the controllers. This
110 * information is sent to the user level applications, when they do an ioctl
111 * for this information.
112 */
113 static struct mcontroller mcontroller[MAX_CONTROLLERS];
114
115 /* The current driver version */
116 static u32 driver_ver = 0x02000000;
117
118 /* major number used by the device for character interface */
119 static int major;
120
121 #define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01)
122
123
124 /*
125 * Debug variable to print some diagnostic messages
126 */
127 static int trace_level;
128
129 /**
130 * mega_setup_mailbox()
131 * @adapter: pointer to our soft state
132 *
133 * Allocates a 8 byte aligned memory for the handshake mailbox.
134 */
135 static int
mega_setup_mailbox(adapter_t * adapter)136 mega_setup_mailbox(adapter_t *adapter)
137 {
138 unsigned long align;
139
140 adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev,
141 sizeof(mbox64_t),
142 &adapter->una_mbox64_dma,
143 GFP_KERNEL);
144
145 if( !adapter->una_mbox64 ) return -1;
146
147 adapter->mbox = &adapter->una_mbox64->mbox;
148
149 adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) &
150 (~0UL ^ 0xFUL));
151
152 adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8);
153
154 align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox);
155
156 adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align;
157
158 /*
159 * Register the mailbox if the controller is an io-mapped controller
160 */
161 if( adapter->flag & BOARD_IOMAP ) {
162
163 outb(adapter->mbox_dma & 0xFF,
164 adapter->host->io_port + MBOX_PORT0);
165
166 outb((adapter->mbox_dma >> 8) & 0xFF,
167 adapter->host->io_port + MBOX_PORT1);
168
169 outb((adapter->mbox_dma >> 16) & 0xFF,
170 adapter->host->io_port + MBOX_PORT2);
171
172 outb((adapter->mbox_dma >> 24) & 0xFF,
173 adapter->host->io_port + MBOX_PORT3);
174
175 outb(ENABLE_MBOX_BYTE,
176 adapter->host->io_port + ENABLE_MBOX_REGION);
177
178 irq_ack(adapter);
179
180 irq_enable(adapter);
181 }
182
183 return 0;
184 }
185
186
187 /*
188 * mega_query_adapter()
189 * @adapter - pointer to our soft state
190 *
191 * Issue the adapter inquiry commands to the controller and find out
192 * information and parameter about the devices attached
193 */
194 static int
mega_query_adapter(adapter_t * adapter)195 mega_query_adapter(adapter_t *adapter)
196 {
197 dma_addr_t prod_info_dma_handle;
198 mega_inquiry3 *inquiry3;
199 struct mbox_out mbox;
200 u8 *raw_mbox = (u8 *)&mbox;
201 int retval;
202
203 /* Initialize adapter inquiry mailbox */
204
205 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
206 memset(&mbox, 0, sizeof(mbox));
207
208 /*
209 * Try to issue Inquiry3 command
210 * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and
211 * update enquiry3 structure
212 */
213 mbox.xferaddr = (u32)adapter->buf_dma_handle;
214
215 inquiry3 = (mega_inquiry3 *)adapter->mega_buffer;
216
217 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
218 raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */
219 raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */
220
221 /* Issue a blocking command to the card */
222 if ((retval = issue_scb_block(adapter, raw_mbox))) {
223 /* the adapter does not support 40ld */
224
225 mraid_ext_inquiry *ext_inq;
226 mraid_inquiry *inq;
227 dma_addr_t dma_handle;
228
229 ext_inq = dma_alloc_coherent(&adapter->dev->dev,
230 sizeof(mraid_ext_inquiry),
231 &dma_handle, GFP_KERNEL);
232
233 if( ext_inq == NULL ) return -1;
234
235 inq = &ext_inq->raid_inq;
236
237 mbox.xferaddr = (u32)dma_handle;
238
239 /*issue old 0x04 command to adapter */
240 mbox.cmd = MEGA_MBOXCMD_ADPEXTINQ;
241
242 issue_scb_block(adapter, raw_mbox);
243
244 /*
245 * update Enquiry3 and ProductInfo structures with
246 * mraid_inquiry structure
247 */
248 mega_8_to_40ld(inq, inquiry3,
249 (mega_product_info *)&adapter->product_info);
250
251 dma_free_coherent(&adapter->dev->dev,
252 sizeof(mraid_ext_inquiry), ext_inq,
253 dma_handle);
254
255 } else { /*adapter supports 40ld */
256 adapter->flag |= BOARD_40LD;
257
258 /*
259 * get product_info, which is static information and will be
260 * unchanged
261 */
262 prod_info_dma_handle = dma_map_single(&adapter->dev->dev,
263 (void *)&adapter->product_info,
264 sizeof(mega_product_info),
265 DMA_FROM_DEVICE);
266
267 mbox.xferaddr = prod_info_dma_handle;
268
269 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
270 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
271
272 if ((retval = issue_scb_block(adapter, raw_mbox)))
273 dev_warn(&adapter->dev->dev,
274 "Product_info cmd failed with error: %d\n",
275 retval);
276
277 dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle,
278 sizeof(mega_product_info), DMA_FROM_DEVICE);
279 }
280
281
282 /*
283 * kernel scans the channels from 0 to <= max_channel
284 */
285 adapter->host->max_channel =
286 adapter->product_info.nchannels + NVIRT_CHAN -1;
287
288 adapter->host->max_id = 16; /* max targets per channel */
289
290 adapter->host->max_lun = 7; /* Up to 7 luns for non disk devices */
291
292 adapter->host->cmd_per_lun = max_cmd_per_lun;
293
294 adapter->numldrv = inquiry3->num_ldrv;
295
296 adapter->max_cmds = adapter->product_info.max_commands;
297
298 if(adapter->max_cmds > MAX_COMMANDS)
299 adapter->max_cmds = MAX_COMMANDS;
300
301 adapter->host->can_queue = adapter->max_cmds - 1;
302
303 /*
304 * Get the maximum number of scatter-gather elements supported by this
305 * firmware
306 */
307 mega_get_max_sgl(adapter);
308
309 adapter->host->sg_tablesize = adapter->sglen;
310
311 /* use HP firmware and bios version encoding
312 Note: fw_version[0|1] and bios_version[0|1] were originally shifted
313 right 8 bits making them zero. This 0 value was hardcoded to fix
314 sparse warnings. */
315 if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) {
316 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
317 "%c%d%d.%d%d",
318 adapter->product_info.fw_version[2],
319 0,
320 adapter->product_info.fw_version[1] & 0x0f,
321 0,
322 adapter->product_info.fw_version[0] & 0x0f);
323 snprintf(adapter->bios_version, sizeof(adapter->fw_version),
324 "%c%d%d.%d%d",
325 adapter->product_info.bios_version[2],
326 0,
327 adapter->product_info.bios_version[1] & 0x0f,
328 0,
329 adapter->product_info.bios_version[0] & 0x0f);
330 } else {
331 memcpy(adapter->fw_version,
332 (char *)adapter->product_info.fw_version, 4);
333 adapter->fw_version[4] = 0;
334
335 memcpy(adapter->bios_version,
336 (char *)adapter->product_info.bios_version, 4);
337
338 adapter->bios_version[4] = 0;
339 }
340
341 dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n",
342 adapter->fw_version, adapter->bios_version, adapter->numldrv);
343
344 /*
345 * Do we support extended (>10 bytes) cdbs
346 */
347 adapter->support_ext_cdb = mega_support_ext_cdb(adapter);
348 if (adapter->support_ext_cdb)
349 dev_notice(&adapter->dev->dev, "supports extended CDBs\n");
350
351
352 return 0;
353 }
354
355 /**
356 * mega_runpendq()
357 * @adapter: pointer to our soft state
358 *
359 * Runs through the list of pending requests.
360 */
361 static inline void
mega_runpendq(adapter_t * adapter)362 mega_runpendq(adapter_t *adapter)
363 {
364 if(!list_empty(&adapter->pending_list))
365 __mega_runpendq(adapter);
366 }
367
368 /*
369 * megaraid_queue()
370 * @scmd - Issue this scsi command
371 * @done - the callback hook into the scsi mid-layer
372 *
373 * The command queuing entry point for the mid-layer.
374 */
megaraid_queue_lck(struct scsi_cmnd * scmd)375 static int megaraid_queue_lck(struct scsi_cmnd *scmd)
376 {
377 adapter_t *adapter;
378 scb_t *scb;
379 int busy=0;
380 unsigned long flags;
381
382 adapter = (adapter_t *)scmd->device->host->hostdata;
383
384 /*
385 * Allocate and build a SCB request
386 * busy flag will be set if mega_build_cmd() command could not
387 * allocate scb. We will return non-zero status in that case.
388 * NOTE: scb can be null even though certain commands completed
389 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would
390 * return 0 in that case.
391 */
392
393 spin_lock_irqsave(&adapter->lock, flags);
394 scb = mega_build_cmd(adapter, scmd, &busy);
395 if (!scb)
396 goto out;
397
398 scb->state |= SCB_PENDQ;
399 list_add_tail(&scb->list, &adapter->pending_list);
400
401 /*
402 * Check if the HBA is in quiescent state, e.g., during a
403 * delete logical drive opertion. If it is, don't run
404 * the pending_list.
405 */
406 if (atomic_read(&adapter->quiescent) == 0)
407 mega_runpendq(adapter);
408
409 busy = 0;
410 out:
411 spin_unlock_irqrestore(&adapter->lock, flags);
412 return busy;
413 }
414
DEF_SCSI_QCMD(megaraid_queue)415 static DEF_SCSI_QCMD(megaraid_queue)
416
417 /**
418 * mega_allocate_scb()
419 * @adapter: pointer to our soft state
420 * @cmd: scsi command from the mid-layer
421 *
422 * Allocate a SCB structure. This is the central structure for controller
423 * commands.
424 */
425 static inline scb_t *
426 mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd)
427 {
428 struct list_head *head = &adapter->free_list;
429 scb_t *scb;
430
431 /* Unlink command from Free List */
432 if( !list_empty(head) ) {
433
434 scb = list_entry(head->next, scb_t, list);
435
436 list_del_init(head->next);
437
438 scb->state = SCB_ACTIVE;
439 scb->cmd = cmd;
440 scb->dma_type = MEGA_DMA_TYPE_NONE;
441
442 return scb;
443 }
444
445 return NULL;
446 }
447
448 /**
449 * mega_get_ldrv_num()
450 * @adapter: pointer to our soft state
451 * @cmd: scsi mid layer command
452 * @channel: channel on the controller
453 *
454 * Calculate the logical drive number based on the information in scsi command
455 * and the channel number.
456 */
457 static inline int
mega_get_ldrv_num(adapter_t * adapter,struct scsi_cmnd * cmd,int channel)458 mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel)
459 {
460 int tgt;
461 int ldrv_num;
462
463 tgt = cmd->device->id;
464
465 if ( tgt > adapter->this_id )
466 tgt--; /* we do not get inquires for initiator id */
467
468 ldrv_num = (channel * 15) + tgt;
469
470
471 /*
472 * If we have a logical drive with boot enabled, project it first
473 */
474 if( adapter->boot_ldrv_enabled ) {
475 if( ldrv_num == 0 ) {
476 ldrv_num = adapter->boot_ldrv;
477 }
478 else {
479 if( ldrv_num <= adapter->boot_ldrv ) {
480 ldrv_num--;
481 }
482 }
483 }
484
485 /*
486 * If "delete logical drive" feature is enabled on this controller.
487 * Do only if at least one delete logical drive operation was done.
488 *
489 * Also, after logical drive deletion, instead of logical drive number,
490 * the value returned should be 0x80+logical drive id.
491 *
492 * These is valid only for IO commands.
493 */
494
495 if (adapter->support_random_del && adapter->read_ldidmap )
496 switch (cmd->cmnd[0]) {
497 case READ_6:
498 case WRITE_6:
499 case READ_10:
500 case WRITE_10:
501 ldrv_num += 0x80;
502 }
503
504 return ldrv_num;
505 }
506
507 /**
508 * mega_build_cmd()
509 * @adapter: pointer to our soft state
510 * @cmd: Prepare using this scsi command
511 * @busy: busy flag if no resources
512 *
513 * Prepares a command and scatter gather list for the controller. This routine
514 * also finds out if the commands is intended for a logical drive or a
515 * physical device and prepares the controller command accordingly.
516 *
517 * We also re-order the logical drives and physical devices based on their
518 * boot settings.
519 */
520 static scb_t *
mega_build_cmd(adapter_t * adapter,struct scsi_cmnd * cmd,int * busy)521 mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
522 {
523 mega_passthru *pthru;
524 scb_t *scb;
525 mbox_t *mbox;
526 u32 seg;
527 char islogical;
528 int max_ldrv_num;
529 int channel = 0;
530 int target = 0;
531 int ldrv_num = 0; /* logical drive number */
532
533 /*
534 * We know what channels our logical drives are on - mega_find_card()
535 */
536 islogical = adapter->logdrv_chan[cmd->device->channel];
537
538 /*
539 * The theory: If physical drive is chosen for boot, all the physical
540 * devices are exported before the logical drives, otherwise physical
541 * devices are pushed after logical drives, in which case - Kernel sees
542 * the physical devices on virtual channel which is obviously converted
543 * to actual channel on the HBA.
544 */
545 if( adapter->boot_pdrv_enabled ) {
546 if( islogical ) {
547 /* logical channel */
548 channel = cmd->device->channel -
549 adapter->product_info.nchannels;
550 }
551 else {
552 /* this is physical channel */
553 channel = cmd->device->channel;
554 target = cmd->device->id;
555
556 /*
557 * boot from a physical disk, that disk needs to be
558 * exposed first IF both the channels are SCSI, then
559 * booting from the second channel is not allowed.
560 */
561 if( target == 0 ) {
562 target = adapter->boot_pdrv_tgt;
563 }
564 else if( target == adapter->boot_pdrv_tgt ) {
565 target = 0;
566 }
567 }
568 }
569 else {
570 if( islogical ) {
571 /* this is the logical channel */
572 channel = cmd->device->channel;
573 }
574 else {
575 /* physical channel */
576 channel = cmd->device->channel - NVIRT_CHAN;
577 target = cmd->device->id;
578 }
579 }
580
581
582 if(islogical) {
583
584 /* have just LUN 0 for each target on virtual channels */
585 if (cmd->device->lun) {
586 cmd->result = (DID_BAD_TARGET << 16);
587 scsi_done(cmd);
588 return NULL;
589 }
590
591 ldrv_num = mega_get_ldrv_num(adapter, cmd, channel);
592
593
594 max_ldrv_num = (adapter->flag & BOARD_40LD) ?
595 MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD;
596
597 /*
598 * max_ldrv_num increases by 0x80 if some logical drive was
599 * deleted.
600 */
601 if(adapter->read_ldidmap)
602 max_ldrv_num += 0x80;
603
604 if(ldrv_num > max_ldrv_num ) {
605 cmd->result = (DID_BAD_TARGET << 16);
606 scsi_done(cmd);
607 return NULL;
608 }
609
610 }
611 else {
612 if( cmd->device->lun > 7) {
613 /*
614 * Do not support lun >7 for physically accessed
615 * devices
616 */
617 cmd->result = (DID_BAD_TARGET << 16);
618 scsi_done(cmd);
619 return NULL;
620 }
621 }
622
623 /*
624 *
625 * Logical drive commands
626 *
627 */
628 if(islogical) {
629 switch (cmd->cmnd[0]) {
630 case TEST_UNIT_READY:
631 #if MEGA_HAVE_CLUSTERING
632 /*
633 * Do we support clustering and is the support enabled
634 * If no, return success always
635 */
636 if( !adapter->has_cluster ) {
637 cmd->result = (DID_OK << 16);
638 scsi_done(cmd);
639 return NULL;
640 }
641
642 if(!(scb = mega_allocate_scb(adapter, cmd))) {
643 *busy = 1;
644 return NULL;
645 }
646
647 scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
648 scb->raw_mbox[2] = MEGA_RESERVATION_STATUS;
649 scb->raw_mbox[3] = ldrv_num;
650
651 scb->dma_direction = DMA_NONE;
652
653 return scb;
654 #else
655 cmd->result = (DID_OK << 16);
656 scsi_done(cmd);
657 return NULL;
658 #endif
659
660 case MODE_SENSE: {
661 char *buf;
662 struct scatterlist *sg;
663
664 sg = scsi_sglist(cmd);
665 buf = kmap_atomic(sg_page(sg)) + sg->offset;
666
667 memset(buf, 0, cmd->cmnd[4]);
668 kunmap_atomic(buf - sg->offset);
669
670 cmd->result = (DID_OK << 16);
671 scsi_done(cmd);
672 return NULL;
673 }
674
675 case READ_CAPACITY:
676 case INQUIRY:
677
678 if(!(adapter->flag & (1L << cmd->device->channel))) {
679
680 dev_notice(&adapter->dev->dev,
681 "scsi%d: scanning scsi channel %d "
682 "for logical drives\n",
683 adapter->host->host_no,
684 cmd->device->channel);
685
686 adapter->flag |= (1L << cmd->device->channel);
687 }
688
689 /* Allocate a SCB and initialize passthru */
690 if(!(scb = mega_allocate_scb(adapter, cmd))) {
691 *busy = 1;
692 return NULL;
693 }
694 pthru = scb->pthru;
695
696 mbox = (mbox_t *)scb->raw_mbox;
697 memset(mbox, 0, sizeof(scb->raw_mbox));
698 memset(pthru, 0, sizeof(mega_passthru));
699
700 pthru->timeout = 0;
701 pthru->ars = 1;
702 pthru->reqsenselen = 14;
703 pthru->islogical = 1;
704 pthru->logdrv = ldrv_num;
705 pthru->cdblen = cmd->cmd_len;
706 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
707
708 if( adapter->has_64bit_addr ) {
709 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64;
710 }
711 else {
712 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
713 }
714
715 scb->dma_direction = DMA_FROM_DEVICE;
716
717 pthru->numsgelements = mega_build_sglist(adapter, scb,
718 &pthru->dataxferaddr, &pthru->dataxferlen);
719
720 mbox->m_out.xferaddr = scb->pthru_dma_addr;
721
722 return scb;
723
724 case READ_6:
725 case WRITE_6:
726 case READ_10:
727 case WRITE_10:
728 case READ_12:
729 case WRITE_12:
730
731 /* Allocate a SCB and initialize mailbox */
732 if(!(scb = mega_allocate_scb(adapter, cmd))) {
733 *busy = 1;
734 return NULL;
735 }
736 mbox = (mbox_t *)scb->raw_mbox;
737
738 memset(mbox, 0, sizeof(scb->raw_mbox));
739 mbox->m_out.logdrv = ldrv_num;
740
741 /*
742 * A little hack: 2nd bit is zero for all scsi read
743 * commands and is set for all scsi write commands
744 */
745 if( adapter->has_64bit_addr ) {
746 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ?
747 MEGA_MBOXCMD_LWRITE64:
748 MEGA_MBOXCMD_LREAD64 ;
749 }
750 else {
751 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ?
752 MEGA_MBOXCMD_LWRITE:
753 MEGA_MBOXCMD_LREAD ;
754 }
755
756 /*
757 * 6-byte READ(0x08) or WRITE(0x0A) cdb
758 */
759 if( cmd->cmd_len == 6 ) {
760 mbox->m_out.numsectors = (u32) cmd->cmnd[4];
761 mbox->m_out.lba =
762 ((u32)cmd->cmnd[1] << 16) |
763 ((u32)cmd->cmnd[2] << 8) |
764 (u32)cmd->cmnd[3];
765
766 mbox->m_out.lba &= 0x1FFFFF;
767
768 #if MEGA_HAVE_STATS
769 /*
770 * Take modulo 0x80, since the logical drive
771 * number increases by 0x80 when a logical
772 * drive was deleted
773 */
774 if (*cmd->cmnd == READ_6) {
775 adapter->nreads[ldrv_num%0x80]++;
776 adapter->nreadblocks[ldrv_num%0x80] +=
777 mbox->m_out.numsectors;
778 } else {
779 adapter->nwrites[ldrv_num%0x80]++;
780 adapter->nwriteblocks[ldrv_num%0x80] +=
781 mbox->m_out.numsectors;
782 }
783 #endif
784 }
785
786 /*
787 * 10-byte READ(0x28) or WRITE(0x2A) cdb
788 */
789 if( cmd->cmd_len == 10 ) {
790 mbox->m_out.numsectors =
791 (u32)cmd->cmnd[8] |
792 ((u32)cmd->cmnd[7] << 8);
793 mbox->m_out.lba =
794 ((u32)cmd->cmnd[2] << 24) |
795 ((u32)cmd->cmnd[3] << 16) |
796 ((u32)cmd->cmnd[4] << 8) |
797 (u32)cmd->cmnd[5];
798
799 #if MEGA_HAVE_STATS
800 if (*cmd->cmnd == READ_10) {
801 adapter->nreads[ldrv_num%0x80]++;
802 adapter->nreadblocks[ldrv_num%0x80] +=
803 mbox->m_out.numsectors;
804 } else {
805 adapter->nwrites[ldrv_num%0x80]++;
806 adapter->nwriteblocks[ldrv_num%0x80] +=
807 mbox->m_out.numsectors;
808 }
809 #endif
810 }
811
812 /*
813 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
814 */
815 if( cmd->cmd_len == 12 ) {
816 mbox->m_out.lba =
817 ((u32)cmd->cmnd[2] << 24) |
818 ((u32)cmd->cmnd[3] << 16) |
819 ((u32)cmd->cmnd[4] << 8) |
820 (u32)cmd->cmnd[5];
821
822 mbox->m_out.numsectors =
823 ((u32)cmd->cmnd[6] << 24) |
824 ((u32)cmd->cmnd[7] << 16) |
825 ((u32)cmd->cmnd[8] << 8) |
826 (u32)cmd->cmnd[9];
827
828 #if MEGA_HAVE_STATS
829 if (*cmd->cmnd == READ_12) {
830 adapter->nreads[ldrv_num%0x80]++;
831 adapter->nreadblocks[ldrv_num%0x80] +=
832 mbox->m_out.numsectors;
833 } else {
834 adapter->nwrites[ldrv_num%0x80]++;
835 adapter->nwriteblocks[ldrv_num%0x80] +=
836 mbox->m_out.numsectors;
837 }
838 #endif
839 }
840
841 /*
842 * If it is a read command
843 */
844 if( (*cmd->cmnd & 0x0F) == 0x08 ) {
845 scb->dma_direction = DMA_FROM_DEVICE;
846 }
847 else {
848 scb->dma_direction = DMA_TO_DEVICE;
849 }
850
851 /* Calculate Scatter-Gather info */
852 mbox->m_out.numsgelements = mega_build_sglist(adapter, scb,
853 (u32 *)&mbox->m_out.xferaddr, &seg);
854
855 return scb;
856
857 #if MEGA_HAVE_CLUSTERING
858 case RESERVE:
859 case RELEASE:
860
861 /*
862 * Do we support clustering and is the support enabled
863 */
864 if( ! adapter->has_cluster ) {
865
866 cmd->result = (DID_BAD_TARGET << 16);
867 scsi_done(cmd);
868 return NULL;
869 }
870
871 /* Allocate a SCB and initialize mailbox */
872 if(!(scb = mega_allocate_scb(adapter, cmd))) {
873 *busy = 1;
874 return NULL;
875 }
876
877 scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
878 scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ?
879 MEGA_RESERVE_LD : MEGA_RELEASE_LD;
880
881 scb->raw_mbox[3] = ldrv_num;
882
883 scb->dma_direction = DMA_NONE;
884
885 return scb;
886 #endif
887
888 default:
889 cmd->result = (DID_BAD_TARGET << 16);
890 scsi_done(cmd);
891 return NULL;
892 }
893 }
894
895 /*
896 * Passthru drive commands
897 */
898 else {
899 /* Allocate a SCB and initialize passthru */
900 if(!(scb = mega_allocate_scb(adapter, cmd))) {
901 *busy = 1;
902 return NULL;
903 }
904
905 mbox = (mbox_t *)scb->raw_mbox;
906 memset(mbox, 0, sizeof(scb->raw_mbox));
907
908 if( adapter->support_ext_cdb ) {
909
910 mega_prepare_extpassthru(adapter, scb, cmd,
911 channel, target);
912
913 mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU;
914
915 mbox->m_out.xferaddr = scb->epthru_dma_addr;
916
917 }
918 else {
919
920 pthru = mega_prepare_passthru(adapter, scb, cmd,
921 channel, target);
922
923 /* Initialize mailbox */
924 if( adapter->has_64bit_addr ) {
925 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64;
926 }
927 else {
928 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
929 }
930
931 mbox->m_out.xferaddr = scb->pthru_dma_addr;
932
933 }
934 return scb;
935 }
936 return NULL;
937 }
938
939
940 /**
941 * mega_prepare_passthru()
942 * @adapter: pointer to our soft state
943 * @scb: our scsi control block
944 * @cmd: scsi command from the mid-layer
945 * @channel: actual channel on the controller
946 * @target: actual id on the controller.
947 *
948 * prepare a command for the scsi physical devices.
949 */
950 static mega_passthru *
mega_prepare_passthru(adapter_t * adapter,scb_t * scb,struct scsi_cmnd * cmd,int channel,int target)951 mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
952 int channel, int target)
953 {
954 mega_passthru *pthru;
955
956 pthru = scb->pthru;
957 memset(pthru, 0, sizeof (mega_passthru));
958
959 /* 0=6sec/1=60sec/2=10min/3=3hrs */
960 pthru->timeout = 2;
961
962 pthru->ars = 1;
963 pthru->reqsenselen = 14;
964 pthru->islogical = 0;
965
966 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel;
967
968 pthru->target = (adapter->flag & BOARD_40LD) ?
969 (channel << 4) | target : target;
970
971 pthru->cdblen = cmd->cmd_len;
972 pthru->logdrv = cmd->device->lun;
973
974 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
975
976 /* Not sure about the direction */
977 scb->dma_direction = DMA_BIDIRECTIONAL;
978
979 /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */
980 switch (cmd->cmnd[0]) {
981 case INQUIRY:
982 case READ_CAPACITY:
983 if(!(adapter->flag & (1L << cmd->device->channel))) {
984
985 dev_notice(&adapter->dev->dev,
986 "scsi%d: scanning scsi channel %d [P%d] "
987 "for physical devices\n",
988 adapter->host->host_no,
989 cmd->device->channel, channel);
990
991 adapter->flag |= (1L << cmd->device->channel);
992 }
993 fallthrough;
994 default:
995 pthru->numsgelements = mega_build_sglist(adapter, scb,
996 &pthru->dataxferaddr, &pthru->dataxferlen);
997 break;
998 }
999 return pthru;
1000 }
1001
1002
1003 /**
1004 * mega_prepare_extpassthru()
1005 * @adapter: pointer to our soft state
1006 * @scb: our scsi control block
1007 * @cmd: scsi command from the mid-layer
1008 * @channel: actual channel on the controller
1009 * @target: actual id on the controller.
1010 *
1011 * prepare a command for the scsi physical devices. This rountine prepares
1012 * commands for devices which can take extended CDBs (>10 bytes)
1013 */
1014 static mega_ext_passthru *
mega_prepare_extpassthru(adapter_t * adapter,scb_t * scb,struct scsi_cmnd * cmd,int channel,int target)1015 mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb,
1016 struct scsi_cmnd *cmd,
1017 int channel, int target)
1018 {
1019 mega_ext_passthru *epthru;
1020
1021 epthru = scb->epthru;
1022 memset(epthru, 0, sizeof(mega_ext_passthru));
1023
1024 /* 0=6sec/1=60sec/2=10min/3=3hrs */
1025 epthru->timeout = 2;
1026
1027 epthru->ars = 1;
1028 epthru->reqsenselen = 14;
1029 epthru->islogical = 0;
1030
1031 epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel;
1032 epthru->target = (adapter->flag & BOARD_40LD) ?
1033 (channel << 4) | target : target;
1034
1035 epthru->cdblen = cmd->cmd_len;
1036 epthru->logdrv = cmd->device->lun;
1037
1038 memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len);
1039
1040 /* Not sure about the direction */
1041 scb->dma_direction = DMA_BIDIRECTIONAL;
1042
1043 switch(cmd->cmnd[0]) {
1044 case INQUIRY:
1045 case READ_CAPACITY:
1046 if(!(adapter->flag & (1L << cmd->device->channel))) {
1047
1048 dev_notice(&adapter->dev->dev,
1049 "scsi%d: scanning scsi channel %d [P%d] "
1050 "for physical devices\n",
1051 adapter->host->host_no,
1052 cmd->device->channel, channel);
1053
1054 adapter->flag |= (1L << cmd->device->channel);
1055 }
1056 fallthrough;
1057 default:
1058 epthru->numsgelements = mega_build_sglist(adapter, scb,
1059 &epthru->dataxferaddr, &epthru->dataxferlen);
1060 break;
1061 }
1062
1063 return epthru;
1064 }
1065
1066 static void
__mega_runpendq(adapter_t * adapter)1067 __mega_runpendq(adapter_t *adapter)
1068 {
1069 scb_t *scb;
1070 struct list_head *pos, *next;
1071
1072 /* Issue any pending commands to the card */
1073 list_for_each_safe(pos, next, &adapter->pending_list) {
1074
1075 scb = list_entry(pos, scb_t, list);
1076
1077 if( !(scb->state & SCB_ISSUED) ) {
1078
1079 if( issue_scb(adapter, scb) != 0 )
1080 return;
1081 }
1082 }
1083
1084 return;
1085 }
1086
1087
1088 /**
1089 * issue_scb()
1090 * @adapter: pointer to our soft state
1091 * @scb: scsi control block
1092 *
1093 * Post a command to the card if the mailbox is available, otherwise return
1094 * busy. We also take the scb from the pending list if the mailbox is
1095 * available.
1096 */
1097 static int
issue_scb(adapter_t * adapter,scb_t * scb)1098 issue_scb(adapter_t *adapter, scb_t *scb)
1099 {
1100 volatile mbox64_t *mbox64 = adapter->mbox64;
1101 volatile mbox_t *mbox = adapter->mbox;
1102 unsigned int i = 0;
1103
1104 if(unlikely(mbox->m_in.busy)) {
1105 do {
1106 udelay(1);
1107 i++;
1108 } while( mbox->m_in.busy && (i < max_mbox_busy_wait) );
1109
1110 if(mbox->m_in.busy) return -1;
1111 }
1112
1113 /* Copy mailbox data into host structure */
1114 memcpy((char *)&mbox->m_out, (char *)scb->raw_mbox,
1115 sizeof(struct mbox_out));
1116
1117 mbox->m_out.cmdid = scb->idx; /* Set cmdid */
1118 mbox->m_in.busy = 1; /* Set busy */
1119
1120
1121 /*
1122 * Increment the pending queue counter
1123 */
1124 atomic_inc(&adapter->pend_cmds);
1125
1126 switch (mbox->m_out.cmd) {
1127 case MEGA_MBOXCMD_LREAD64:
1128 case MEGA_MBOXCMD_LWRITE64:
1129 case MEGA_MBOXCMD_PASSTHRU64:
1130 case MEGA_MBOXCMD_EXTPTHRU:
1131 mbox64->xfer_segment_lo = mbox->m_out.xferaddr;
1132 mbox64->xfer_segment_hi = 0;
1133 mbox->m_out.xferaddr = 0xFFFFFFFF;
1134 break;
1135 default:
1136 mbox64->xfer_segment_lo = 0;
1137 mbox64->xfer_segment_hi = 0;
1138 }
1139
1140 /*
1141 * post the command
1142 */
1143 scb->state |= SCB_ISSUED;
1144
1145 if( likely(adapter->flag & BOARD_MEMMAP) ) {
1146 mbox->m_in.poll = 0;
1147 mbox->m_in.ack = 0;
1148 WRINDOOR(adapter, adapter->mbox_dma | 0x1);
1149 }
1150 else {
1151 irq_enable(adapter);
1152 issue_command(adapter);
1153 }
1154
1155 return 0;
1156 }
1157
1158 /*
1159 * Wait until the controller's mailbox is available
1160 */
1161 static inline int
mega_busywait_mbox(adapter_t * adapter)1162 mega_busywait_mbox (adapter_t *adapter)
1163 {
1164 if (adapter->mbox->m_in.busy)
1165 return __mega_busywait_mbox(adapter);
1166 return 0;
1167 }
1168
1169 /**
1170 * issue_scb_block()
1171 * @adapter: pointer to our soft state
1172 * @raw_mbox: the mailbox
1173 *
1174 * Issue a scb in synchronous and non-interrupt mode
1175 */
1176 static int
issue_scb_block(adapter_t * adapter,u_char * raw_mbox)1177 issue_scb_block(adapter_t *adapter, u_char *raw_mbox)
1178 {
1179 volatile mbox64_t *mbox64 = adapter->mbox64;
1180 volatile mbox_t *mbox = adapter->mbox;
1181 u8 byte;
1182
1183 /* Wait until mailbox is free */
1184 if(mega_busywait_mbox (adapter))
1185 goto bug_blocked_mailbox;
1186
1187 /* Copy mailbox data into host structure */
1188 memcpy((char *) mbox, raw_mbox, sizeof(struct mbox_out));
1189 mbox->m_out.cmdid = 0xFE;
1190 mbox->m_in.busy = 1;
1191
1192 switch (raw_mbox[0]) {
1193 case MEGA_MBOXCMD_LREAD64:
1194 case MEGA_MBOXCMD_LWRITE64:
1195 case MEGA_MBOXCMD_PASSTHRU64:
1196 case MEGA_MBOXCMD_EXTPTHRU:
1197 mbox64->xfer_segment_lo = mbox->m_out.xferaddr;
1198 mbox64->xfer_segment_hi = 0;
1199 mbox->m_out.xferaddr = 0xFFFFFFFF;
1200 break;
1201 default:
1202 mbox64->xfer_segment_lo = 0;
1203 mbox64->xfer_segment_hi = 0;
1204 }
1205
1206 if( likely(adapter->flag & BOARD_MEMMAP) ) {
1207 mbox->m_in.poll = 0;
1208 mbox->m_in.ack = 0;
1209 mbox->m_in.numstatus = 0xFF;
1210 mbox->m_in.status = 0xFF;
1211 WRINDOOR(adapter, adapter->mbox_dma | 0x1);
1212
1213 while((volatile u8)mbox->m_in.numstatus == 0xFF)
1214 cpu_relax();
1215
1216 mbox->m_in.numstatus = 0xFF;
1217
1218 while( (volatile u8)mbox->m_in.poll != 0x77 )
1219 cpu_relax();
1220
1221 mbox->m_in.poll = 0;
1222 mbox->m_in.ack = 0x77;
1223
1224 WRINDOOR(adapter, adapter->mbox_dma | 0x2);
1225
1226 while(RDINDOOR(adapter) & 0x2)
1227 cpu_relax();
1228 }
1229 else {
1230 irq_disable(adapter);
1231 issue_command(adapter);
1232
1233 while (!((byte = irq_state(adapter)) & INTR_VALID))
1234 cpu_relax();
1235
1236 set_irq_state(adapter, byte);
1237 irq_enable(adapter);
1238 irq_ack(adapter);
1239 }
1240
1241 return mbox->m_in.status;
1242
1243 bug_blocked_mailbox:
1244 dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n");
1245 udelay (1000);
1246 return -1;
1247 }
1248
1249
1250 /**
1251 * megaraid_isr_iomapped()
1252 * @irq: irq
1253 * @devp: pointer to our soft state
1254 *
1255 * Interrupt service routine for io-mapped controllers.
1256 * Find out if our device is interrupting. If yes, acknowledge the interrupt
1257 * and service the completed commands.
1258 */
1259 static irqreturn_t
megaraid_isr_iomapped(int irq,void * devp)1260 megaraid_isr_iomapped(int irq, void *devp)
1261 {
1262 adapter_t *adapter = devp;
1263 unsigned long flags;
1264 u8 status;
1265 u8 nstatus;
1266 u8 completed[MAX_FIRMWARE_STATUS];
1267 u8 byte;
1268 int handled = 0;
1269
1270
1271 /*
1272 * loop till F/W has more commands for us to complete.
1273 */
1274 spin_lock_irqsave(&adapter->lock, flags);
1275
1276 do {
1277 /* Check if a valid interrupt is pending */
1278 byte = irq_state(adapter);
1279 if( (byte & VALID_INTR_BYTE) == 0 ) {
1280 /*
1281 * No more pending commands
1282 */
1283 goto out_unlock;
1284 }
1285 set_irq_state(adapter, byte);
1286
1287 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus)
1288 == 0xFF)
1289 cpu_relax();
1290 adapter->mbox->m_in.numstatus = 0xFF;
1291
1292 status = adapter->mbox->m_in.status;
1293
1294 /*
1295 * decrement the pending queue counter
1296 */
1297 atomic_sub(nstatus, &adapter->pend_cmds);
1298
1299 memcpy(completed, (void *)adapter->mbox->m_in.completed,
1300 nstatus);
1301
1302 /* Acknowledge interrupt */
1303 irq_ack(adapter);
1304
1305 mega_cmd_done(adapter, completed, nstatus, status);
1306
1307 mega_rundoneq(adapter);
1308
1309 handled = 1;
1310
1311 /* Loop through any pending requests */
1312 if(atomic_read(&adapter->quiescent) == 0) {
1313 mega_runpendq(adapter);
1314 }
1315
1316 } while(1);
1317
1318 out_unlock:
1319
1320 spin_unlock_irqrestore(&adapter->lock, flags);
1321
1322 return IRQ_RETVAL(handled);
1323 }
1324
1325
1326 /**
1327 * megaraid_isr_memmapped()
1328 * @irq: irq
1329 * @devp: pointer to our soft state
1330 *
1331 * Interrupt service routine for memory-mapped controllers.
1332 * Find out if our device is interrupting. If yes, acknowledge the interrupt
1333 * and service the completed commands.
1334 */
1335 static irqreturn_t
megaraid_isr_memmapped(int irq,void * devp)1336 megaraid_isr_memmapped(int irq, void *devp)
1337 {
1338 adapter_t *adapter = devp;
1339 unsigned long flags;
1340 u8 status;
1341 u32 dword = 0;
1342 u8 nstatus;
1343 u8 completed[MAX_FIRMWARE_STATUS];
1344 int handled = 0;
1345
1346
1347 /*
1348 * loop till F/W has more commands for us to complete.
1349 */
1350 spin_lock_irqsave(&adapter->lock, flags);
1351
1352 do {
1353 /* Check if a valid interrupt is pending */
1354 dword = RDOUTDOOR(adapter);
1355 if(dword != 0x10001234) {
1356 /*
1357 * No more pending commands
1358 */
1359 goto out_unlock;
1360 }
1361 WROUTDOOR(adapter, 0x10001234);
1362
1363 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus)
1364 == 0xFF) {
1365 cpu_relax();
1366 }
1367 adapter->mbox->m_in.numstatus = 0xFF;
1368
1369 status = adapter->mbox->m_in.status;
1370
1371 /*
1372 * decrement the pending queue counter
1373 */
1374 atomic_sub(nstatus, &adapter->pend_cmds);
1375
1376 memcpy(completed, (void *)adapter->mbox->m_in.completed,
1377 nstatus);
1378
1379 /* Acknowledge interrupt */
1380 WRINDOOR(adapter, 0x2);
1381
1382 handled = 1;
1383
1384 while( RDINDOOR(adapter) & 0x02 )
1385 cpu_relax();
1386
1387 mega_cmd_done(adapter, completed, nstatus, status);
1388
1389 mega_rundoneq(adapter);
1390
1391 /* Loop through any pending requests */
1392 if(atomic_read(&adapter->quiescent) == 0) {
1393 mega_runpendq(adapter);
1394 }
1395
1396 } while(1);
1397
1398 out_unlock:
1399
1400 spin_unlock_irqrestore(&adapter->lock, flags);
1401
1402 return IRQ_RETVAL(handled);
1403 }
1404 /**
1405 * mega_cmd_done()
1406 * @adapter: pointer to our soft state
1407 * @completed: array of ids of completed commands
1408 * @nstatus: number of completed commands
1409 * @status: status of the last command completed
1410 *
1411 * Complete the commands and call the scsi mid-layer callback hooks.
1412 */
1413 static void
mega_cmd_done(adapter_t * adapter,u8 completed[],int nstatus,int status)1414 mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1415 {
1416 mega_ext_passthru *epthru = NULL;
1417 struct scatterlist *sgl;
1418 struct scsi_cmnd *cmd = NULL;
1419 mega_passthru *pthru = NULL;
1420 mbox_t *mbox = NULL;
1421 u8 c;
1422 scb_t *scb;
1423 int islogical;
1424 int cmdid;
1425 int i;
1426
1427 /*
1428 * for all the commands completed, call the mid-layer callback routine
1429 * and free the scb.
1430 */
1431 for( i = 0; i < nstatus; i++ ) {
1432
1433 cmdid = completed[i];
1434
1435 /*
1436 * Only free SCBs for the commands coming down from the
1437 * mid-layer, not for which were issued internally
1438 *
1439 * For internal command, restore the status returned by the
1440 * firmware so that user can interpret it.
1441 */
1442 if (cmdid == CMDID_INT_CMDS) {
1443 scb = &adapter->int_scb;
1444 cmd = scb->cmd;
1445
1446 list_del_init(&scb->list);
1447 scb->state = SCB_FREE;
1448
1449 adapter->int_status = status;
1450 complete(&adapter->int_waitq);
1451 } else {
1452 scb = &adapter->scb_list[cmdid];
1453
1454 /*
1455 * Make sure f/w has completed a valid command
1456 */
1457 if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) {
1458 dev_crit(&adapter->dev->dev, "invalid command "
1459 "Id %d, scb->state:%x, scsi cmd:%p\n",
1460 cmdid, scb->state, scb->cmd);
1461
1462 continue;
1463 }
1464
1465 /*
1466 * Was a abort issued for this command
1467 */
1468 if( scb->state & SCB_ABORT ) {
1469
1470 dev_warn(&adapter->dev->dev,
1471 "aborted cmd [%x] complete\n",
1472 scb->idx);
1473
1474 scb->cmd->result = (DID_ABORT << 16);
1475
1476 list_add_tail(SCSI_LIST(scb->cmd),
1477 &adapter->completed_list);
1478
1479 mega_free_scb(adapter, scb);
1480
1481 continue;
1482 }
1483
1484 /*
1485 * Was a reset issued for this command
1486 */
1487 if( scb->state & SCB_RESET ) {
1488
1489 dev_warn(&adapter->dev->dev,
1490 "reset cmd [%x] complete\n",
1491 scb->idx);
1492
1493 scb->cmd->result = (DID_RESET << 16);
1494
1495 list_add_tail(SCSI_LIST(scb->cmd),
1496 &adapter->completed_list);
1497
1498 mega_free_scb (adapter, scb);
1499
1500 continue;
1501 }
1502
1503 cmd = scb->cmd;
1504 pthru = scb->pthru;
1505 epthru = scb->epthru;
1506 mbox = (mbox_t *)scb->raw_mbox;
1507
1508 #if MEGA_HAVE_STATS
1509 {
1510
1511 int logdrv = mbox->m_out.logdrv;
1512
1513 islogical = adapter->logdrv_chan[cmd->channel];
1514 /*
1515 * Maintain an error counter for the logical drive.
1516 * Some application like SNMP agent need such
1517 * statistics
1518 */
1519 if( status && islogical && (cmd->cmnd[0] == READ_6 ||
1520 cmd->cmnd[0] == READ_10 ||
1521 cmd->cmnd[0] == READ_12)) {
1522 /*
1523 * Logical drive number increases by 0x80 when
1524 * a logical drive is deleted
1525 */
1526 adapter->rd_errors[logdrv%0x80]++;
1527 }
1528
1529 if( status && islogical && (cmd->cmnd[0] == WRITE_6 ||
1530 cmd->cmnd[0] == WRITE_10 ||
1531 cmd->cmnd[0] == WRITE_12)) {
1532 /*
1533 * Logical drive number increases by 0x80 when
1534 * a logical drive is deleted
1535 */
1536 adapter->wr_errors[logdrv%0x80]++;
1537 }
1538
1539 }
1540 #endif
1541 }
1542
1543 /*
1544 * Do not return the presence of hard disk on the channel so,
1545 * inquiry sent, and returned data==hard disk or removable
1546 * hard disk and not logical, request should return failure! -
1547 * PJ
1548 */
1549 islogical = adapter->logdrv_chan[cmd->device->channel];
1550 if( cmd->cmnd[0] == INQUIRY && !islogical ) {
1551
1552 sgl = scsi_sglist(cmd);
1553 if( sg_page(sgl) ) {
1554 c = *(unsigned char *) sg_virt(&sgl[0]);
1555 } else {
1556 dev_warn(&adapter->dev->dev, "invalid sg\n");
1557 c = 0;
1558 }
1559
1560 if(IS_RAID_CH(adapter, cmd->device->channel) &&
1561 ((c & 0x1F ) == TYPE_DISK)) {
1562 status = 0xF0;
1563 }
1564 }
1565
1566 /* clear result; otherwise, success returns corrupt value */
1567 cmd->result = 0;
1568
1569 /* Convert MegaRAID status to Linux error code */
1570 switch (status) {
1571 case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */
1572 cmd->result |= (DID_OK << 16);
1573 break;
1574
1575 case 0x02: /* ERROR_ABORTED, i.e.
1576 SCSI_STATUS_CHECK_CONDITION */
1577
1578 /* set sense_buffer and result fields */
1579 if( mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU ||
1580 mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU64 ) {
1581
1582 memcpy(cmd->sense_buffer, pthru->reqsensearea,
1583 14);
1584
1585 cmd->result = SAM_STAT_CHECK_CONDITION;
1586 }
1587 else {
1588 if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) {
1589
1590 memcpy(cmd->sense_buffer,
1591 epthru->reqsensearea, 14);
1592
1593 cmd->result = SAM_STAT_CHECK_CONDITION;
1594 } else
1595 scsi_build_sense(cmd, 0,
1596 ABORTED_COMMAND, 0, 0);
1597 }
1598 break;
1599
1600 case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e.
1601 SCSI_STATUS_BUSY */
1602 cmd->result |= (DID_BUS_BUSY << 16) | status;
1603 break;
1604
1605 default:
1606 #if MEGA_HAVE_CLUSTERING
1607 /*
1608 * If TEST_UNIT_READY fails, we know
1609 * MEGA_RESERVATION_STATUS failed
1610 */
1611 if( cmd->cmnd[0] == TEST_UNIT_READY ) {
1612 cmd->result |= (DID_ERROR << 16) |
1613 SAM_STAT_RESERVATION_CONFLICT;
1614 }
1615 else
1616 /*
1617 * Error code returned is 1 if Reserve or Release
1618 * failed or the input parameter is invalid
1619 */
1620 if( status == 1 &&
1621 (cmd->cmnd[0] == RESERVE ||
1622 cmd->cmnd[0] == RELEASE) ) {
1623
1624 cmd->result |= (DID_ERROR << 16) |
1625 SAM_STAT_RESERVATION_CONFLICT;
1626 }
1627 else
1628 #endif
1629 cmd->result |= (DID_BAD_TARGET << 16)|status;
1630 }
1631
1632 mega_free_scb(adapter, scb);
1633
1634 /* Add Scsi_Command to end of completed queue */
1635 list_add_tail(SCSI_LIST(cmd), &adapter->completed_list);
1636 }
1637 }
1638
1639
1640 /*
1641 * mega_runpendq()
1642 *
1643 * Run through the list of completed requests and finish it
1644 */
1645 static void
mega_rundoneq(adapter_t * adapter)1646 mega_rundoneq (adapter_t *adapter)
1647 {
1648 struct megaraid_cmd_priv *cmd_priv;
1649
1650 list_for_each_entry(cmd_priv, &adapter->completed_list, entry)
1651 scsi_done(megaraid_to_scsi_cmd(cmd_priv));
1652
1653 INIT_LIST_HEAD(&adapter->completed_list);
1654 }
1655
1656
1657 /*
1658 * Free a SCB structure
1659 * Note: We assume the scsi commands associated with this scb is not free yet.
1660 */
1661 static void
mega_free_scb(adapter_t * adapter,scb_t * scb)1662 mega_free_scb(adapter_t *adapter, scb_t *scb)
1663 {
1664 switch( scb->dma_type ) {
1665
1666 case MEGA_DMA_TYPE_NONE:
1667 break;
1668
1669 case MEGA_SGLIST:
1670 scsi_dma_unmap(scb->cmd);
1671 break;
1672 default:
1673 break;
1674 }
1675
1676 /*
1677 * Remove from the pending list
1678 */
1679 list_del_init(&scb->list);
1680
1681 /* Link the scb back into free list */
1682 scb->state = SCB_FREE;
1683 scb->cmd = NULL;
1684
1685 list_add(&scb->list, &adapter->free_list);
1686 }
1687
1688
1689 static int
__mega_busywait_mbox(adapter_t * adapter)1690 __mega_busywait_mbox (adapter_t *adapter)
1691 {
1692 volatile mbox_t *mbox = adapter->mbox;
1693 long counter;
1694
1695 for (counter = 0; counter < 10000; counter++) {
1696 if (!mbox->m_in.busy)
1697 return 0;
1698 udelay(100);
1699 cond_resched();
1700 }
1701 return -1; /* give up after 1 second */
1702 }
1703
1704 /*
1705 * Copies data to SGLIST
1706 * Note: For 64 bit cards, we need a minimum of one SG element for read/write
1707 */
1708 static int
mega_build_sglist(adapter_t * adapter,scb_t * scb,u32 * buf,u32 * len)1709 mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1710 {
1711 struct scatterlist *sg;
1712 struct scsi_cmnd *cmd;
1713 int sgcnt;
1714 int idx;
1715
1716 cmd = scb->cmd;
1717
1718 /*
1719 * Copy Scatter-Gather list info into controller structure.
1720 *
1721 * The number of sg elements returned must not exceed our limit
1722 */
1723 sgcnt = scsi_dma_map(cmd);
1724
1725 scb->dma_type = MEGA_SGLIST;
1726
1727 BUG_ON(sgcnt > adapter->sglen || sgcnt < 0);
1728
1729 *len = 0;
1730
1731 if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) {
1732 sg = scsi_sglist(cmd);
1733 scb->dma_h_bulkdata = sg_dma_address(sg);
1734 *buf = (u32)scb->dma_h_bulkdata;
1735 *len = sg_dma_len(sg);
1736 return 0;
1737 }
1738
1739 scsi_for_each_sg(cmd, sg, sgcnt, idx) {
1740 if (adapter->has_64bit_addr) {
1741 scb->sgl64[idx].address = sg_dma_address(sg);
1742 *len += scb->sgl64[idx].length = sg_dma_len(sg);
1743 } else {
1744 scb->sgl[idx].address = sg_dma_address(sg);
1745 *len += scb->sgl[idx].length = sg_dma_len(sg);
1746 }
1747 }
1748
1749 /* Reset pointer and length fields */
1750 *buf = scb->sgl_dma_addr;
1751
1752 /* Return count of SG requests */
1753 return sgcnt;
1754 }
1755
1756
1757 /*
1758 * mega_8_to_40ld()
1759 *
1760 * takes all info in AdapterInquiry structure and puts it into ProductInfo and
1761 * Enquiry3 structures for later use
1762 */
1763 static void
mega_8_to_40ld(mraid_inquiry * inquiry,mega_inquiry3 * enquiry3,mega_product_info * product_info)1764 mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3,
1765 mega_product_info *product_info)
1766 {
1767 int i;
1768
1769 product_info->max_commands = inquiry->adapter_info.max_commands;
1770 enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate;
1771 product_info->nchannels = inquiry->adapter_info.nchannels;
1772
1773 for (i = 0; i < 4; i++) {
1774 product_info->fw_version[i] =
1775 inquiry->adapter_info.fw_version[i];
1776
1777 product_info->bios_version[i] =
1778 inquiry->adapter_info.bios_version[i];
1779 }
1780 enquiry3->cache_flush_interval =
1781 inquiry->adapter_info.cache_flush_interval;
1782
1783 product_info->dram_size = inquiry->adapter_info.dram_size;
1784
1785 enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv;
1786
1787 for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) {
1788 enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i];
1789 enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i];
1790 enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i];
1791 }
1792
1793 for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++)
1794 enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i];
1795 }
1796
1797 static inline void
mega_free_sgl(adapter_t * adapter)1798 mega_free_sgl(adapter_t *adapter)
1799 {
1800 scb_t *scb;
1801 int i;
1802
1803 for(i = 0; i < adapter->max_cmds; i++) {
1804
1805 scb = &adapter->scb_list[i];
1806
1807 if( scb->sgl64 ) {
1808 dma_free_coherent(&adapter->dev->dev,
1809 sizeof(mega_sgl64) * adapter->sglen,
1810 scb->sgl64, scb->sgl_dma_addr);
1811
1812 scb->sgl64 = NULL;
1813 }
1814
1815 if( scb->pthru ) {
1816 dma_free_coherent(&adapter->dev->dev,
1817 sizeof(mega_passthru), scb->pthru,
1818 scb->pthru_dma_addr);
1819
1820 scb->pthru = NULL;
1821 }
1822
1823 if( scb->epthru ) {
1824 dma_free_coherent(&adapter->dev->dev,
1825 sizeof(mega_ext_passthru),
1826 scb->epthru, scb->epthru_dma_addr);
1827
1828 scb->epthru = NULL;
1829 }
1830
1831 }
1832 }
1833
1834
1835 /*
1836 * Get information about the card/driver
1837 */
1838 const char *
megaraid_info(struct Scsi_Host * host)1839 megaraid_info(struct Scsi_Host *host)
1840 {
1841 static char buffer[512];
1842 adapter_t *adapter;
1843
1844 adapter = (adapter_t *)host->hostdata;
1845
1846 sprintf (buffer,
1847 "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns",
1848 adapter->fw_version, adapter->product_info.max_commands,
1849 adapter->host->max_id, adapter->host->max_channel,
1850 (u32)adapter->host->max_lun);
1851 return buffer;
1852 }
1853
1854 /*
1855 * Abort a previous SCSI request. Only commands on the pending list can be
1856 * aborted. All the commands issued to the F/W must complete.
1857 */
1858 static int
megaraid_abort(struct scsi_cmnd * cmd)1859 megaraid_abort(struct scsi_cmnd *cmd)
1860 {
1861 adapter_t *adapter;
1862 int rval;
1863
1864 adapter = (adapter_t *)cmd->device->host->hostdata;
1865
1866 rval = megaraid_abort_and_reset(adapter, cmd, SCB_ABORT);
1867
1868 /*
1869 * This is required here to complete any completed requests
1870 * to be communicated over to the mid layer.
1871 */
1872 mega_rundoneq(adapter);
1873
1874 return rval;
1875 }
1876
1877
1878 static int
megaraid_reset(struct scsi_cmnd * cmd)1879 megaraid_reset(struct scsi_cmnd *cmd)
1880 {
1881 adapter_t *adapter;
1882 megacmd_t mc;
1883 int rval;
1884
1885 adapter = (adapter_t *)cmd->device->host->hostdata;
1886
1887 #if MEGA_HAVE_CLUSTERING
1888 mc.cmd = MEGA_CLUSTER_CMD;
1889 mc.opcode = MEGA_RESET_RESERVATIONS;
1890
1891 if( mega_internal_command(adapter, &mc, NULL) != 0 ) {
1892 dev_warn(&adapter->dev->dev, "reservation reset failed\n");
1893 }
1894 else {
1895 dev_info(&adapter->dev->dev, "reservation reset\n");
1896 }
1897 #endif
1898
1899 spin_lock_irq(&adapter->lock);
1900
1901 rval = megaraid_abort_and_reset(adapter, cmd, SCB_RESET);
1902
1903 /*
1904 * This is required here to complete any completed requests
1905 * to be communicated over to the mid layer.
1906 */
1907 mega_rundoneq(adapter);
1908 spin_unlock_irq(&adapter->lock);
1909
1910 return rval;
1911 }
1912
1913 /**
1914 * megaraid_abort_and_reset()
1915 * @adapter: megaraid soft state
1916 * @cmd: scsi command to be aborted or reset
1917 * @aor: abort or reset flag
1918 *
1919 * Try to locate the scsi command in the pending queue. If found and is not
1920 * issued to the controller, abort/reset it. Otherwise return failure
1921 */
1922 static int
megaraid_abort_and_reset(adapter_t * adapter,struct scsi_cmnd * cmd,int aor)1923 megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor)
1924 {
1925 struct list_head *pos, *next;
1926 scb_t *scb;
1927
1928 dev_warn(&adapter->dev->dev, "%s cmd=%x <c=%d t=%d l=%d>\n",
1929 (aor == SCB_ABORT)? "ABORTING":"RESET",
1930 cmd->cmnd[0], cmd->device->channel,
1931 cmd->device->id, (u32)cmd->device->lun);
1932
1933 if(list_empty(&adapter->pending_list))
1934 return FAILED;
1935
1936 list_for_each_safe(pos, next, &adapter->pending_list) {
1937
1938 scb = list_entry(pos, scb_t, list);
1939
1940 if (scb->cmd == cmd) { /* Found command */
1941
1942 scb->state |= aor;
1943
1944 /*
1945 * Check if this command has firmware ownership. If
1946 * yes, we cannot reset this command. Whenever f/w
1947 * completes this command, we will return appropriate
1948 * status from ISR.
1949 */
1950 if( scb->state & SCB_ISSUED ) {
1951
1952 dev_warn(&adapter->dev->dev,
1953 "%s[%x], fw owner\n",
1954 (aor==SCB_ABORT) ? "ABORTING":"RESET",
1955 scb->idx);
1956
1957 return FAILED;
1958 }
1959 else {
1960
1961 /*
1962 * Not yet issued! Remove from the pending
1963 * list
1964 */
1965 dev_warn(&adapter->dev->dev,
1966 "%s-[%x], driver owner\n",
1967 (aor==SCB_ABORT) ? "ABORTING":"RESET",
1968 scb->idx);
1969
1970 mega_free_scb(adapter, scb);
1971
1972 if( aor == SCB_ABORT ) {
1973 cmd->result = (DID_ABORT << 16);
1974 }
1975 else {
1976 cmd->result = (DID_RESET << 16);
1977 }
1978
1979 list_add_tail(SCSI_LIST(cmd),
1980 &adapter->completed_list);
1981
1982 return SUCCESS;
1983 }
1984 }
1985 }
1986
1987 return FAILED;
1988 }
1989
1990 static inline int
make_local_pdev(adapter_t * adapter,struct pci_dev ** pdev)1991 make_local_pdev(adapter_t *adapter, struct pci_dev **pdev)
1992 {
1993 *pdev = pci_alloc_dev(NULL);
1994
1995 if( *pdev == NULL ) return -1;
1996
1997 memcpy(*pdev, adapter->dev, sizeof(struct pci_dev));
1998
1999 if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) {
2000 kfree(*pdev);
2001 return -1;
2002 }
2003
2004 return 0;
2005 }
2006
2007 static inline void
free_local_pdev(struct pci_dev * pdev)2008 free_local_pdev(struct pci_dev *pdev)
2009 {
2010 kfree(pdev);
2011 }
2012
2013 /**
2014 * mega_allocate_inquiry()
2015 * @dma_handle: handle returned for dma address
2016 * @pdev: handle to pci device
2017 *
2018 * allocates memory for inquiry structure
2019 */
2020 static inline void *
mega_allocate_inquiry(dma_addr_t * dma_handle,struct pci_dev * pdev)2021 mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev)
2022 {
2023 return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3),
2024 dma_handle, GFP_KERNEL);
2025 }
2026
2027
2028 static inline void
mega_free_inquiry(void * inquiry,dma_addr_t dma_handle,struct pci_dev * pdev)2029 mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev)
2030 {
2031 dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry,
2032 dma_handle);
2033 }
2034
2035
2036 #ifdef CONFIG_PROC_FS
2037 /* Following code handles /proc fs */
2038
2039 /**
2040 * proc_show_config()
2041 * @m: Synthetic file construction data
2042 * @v: File iterator
2043 *
2044 * Display configuration information about the controller.
2045 */
2046 static int
proc_show_config(struct seq_file * m,void * v)2047 proc_show_config(struct seq_file *m, void *v)
2048 {
2049
2050 adapter_t *adapter = m->private;
2051
2052 seq_puts(m, MEGARAID_VERSION);
2053 if(adapter->product_info.product_name[0])
2054 seq_printf(m, "%s\n", adapter->product_info.product_name);
2055
2056 seq_puts(m, "Controller Type: ");
2057
2058 if( adapter->flag & BOARD_MEMMAP )
2059 seq_puts(m, "438/466/467/471/493/518/520/531/532\n");
2060 else
2061 seq_puts(m, "418/428/434\n");
2062
2063 if(adapter->flag & BOARD_40LD)
2064 seq_puts(m, "Controller Supports 40 Logical Drives\n");
2065
2066 if(adapter->flag & BOARD_64BIT)
2067 seq_puts(m, "Controller capable of 64-bit memory addressing\n");
2068 if( adapter->has_64bit_addr )
2069 seq_puts(m, "Controller using 64-bit memory addressing\n");
2070 else
2071 seq_puts(m, "Controller is not using 64-bit memory addressing\n");
2072
2073 seq_printf(m, "Base = %08lx, Irq = %d, ",
2074 adapter->base, adapter->host->irq);
2075
2076 seq_printf(m, "Logical Drives = %d, Channels = %d\n",
2077 adapter->numldrv, adapter->product_info.nchannels);
2078
2079 seq_printf(m, "Version =%s:%s, DRAM = %dMb\n",
2080 adapter->fw_version, adapter->bios_version,
2081 adapter->product_info.dram_size);
2082
2083 seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n",
2084 adapter->product_info.max_commands, adapter->max_cmds);
2085
2086 seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb);
2087 seq_printf(m, "support_random_del = %d\n", adapter->support_random_del);
2088 seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled);
2089 seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv);
2090 seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled);
2091 seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch);
2092 seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt);
2093 seq_printf(m, "quiescent = %d\n",
2094 atomic_read(&adapter->quiescent));
2095 seq_printf(m, "has_cluster = %d\n", adapter->has_cluster);
2096
2097 seq_puts(m, "\nModule Parameters:\n");
2098 seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun);
2099 seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io);
2100 return 0;
2101 }
2102
2103 /**
2104 * proc_show_stat()
2105 * @m: Synthetic file construction data
2106 * @v: File iterator
2107 *
2108 * Display statistical information about the I/O activity.
2109 */
2110 static int
proc_show_stat(struct seq_file * m,void * v)2111 proc_show_stat(struct seq_file *m, void *v)
2112 {
2113 adapter_t *adapter = m->private;
2114 #if MEGA_HAVE_STATS
2115 int i;
2116 #endif
2117
2118 seq_puts(m, "Statistical Information for this controller\n");
2119 seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds));
2120 #if MEGA_HAVE_STATS
2121 for(i = 0; i < adapter->numldrv; i++) {
2122 seq_printf(m, "Logical Drive %d:\n", i);
2123 seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n",
2124 adapter->nreads[i], adapter->nwrites[i]);
2125 seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n",
2126 adapter->nreadblocks[i], adapter->nwriteblocks[i]);
2127 seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n",
2128 adapter->rd_errors[i], adapter->wr_errors[i]);
2129 }
2130 #else
2131 seq_puts(m, "IO and error counters not compiled in driver.\n");
2132 #endif
2133 return 0;
2134 }
2135
2136
2137 /**
2138 * proc_show_mbox()
2139 * @m: Synthetic file construction data
2140 * @v: File iterator
2141 *
2142 * Display mailbox information for the last command issued. This information
2143 * is good for debugging.
2144 */
2145 static int
proc_show_mbox(struct seq_file * m,void * v)2146 proc_show_mbox(struct seq_file *m, void *v)
2147 {
2148 adapter_t *adapter = m->private;
2149 volatile mbox_t *mbox = adapter->mbox;
2150
2151 seq_puts(m, "Contents of Mail Box Structure\n");
2152 seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd);
2153 seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid);
2154 seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors);
2155 seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba);
2156 seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr);
2157 seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv);
2158 seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements);
2159 seq_printf(m, " Busy = %01x\n", mbox->m_in.busy);
2160 seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status);
2161 return 0;
2162 }
2163
2164
2165 /**
2166 * proc_show_rebuild_rate()
2167 * @m: Synthetic file construction data
2168 * @v: File iterator
2169 *
2170 * Display current rebuild rate
2171 */
2172 static int
proc_show_rebuild_rate(struct seq_file * m,void * v)2173 proc_show_rebuild_rate(struct seq_file *m, void *v)
2174 {
2175 adapter_t *adapter = m->private;
2176 dma_addr_t dma_handle;
2177 caddr_t inquiry;
2178 struct pci_dev *pdev;
2179
2180 if( make_local_pdev(adapter, &pdev) != 0 )
2181 return 0;
2182
2183 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2184 goto free_pdev;
2185
2186 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2187 seq_puts(m, "Adapter inquiry failed.\n");
2188 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2189 goto free_inquiry;
2190 }
2191
2192 if( adapter->flag & BOARD_40LD )
2193 seq_printf(m, "Rebuild Rate: [%d%%]\n",
2194 ((mega_inquiry3 *)inquiry)->rebuild_rate);
2195 else
2196 seq_printf(m, "Rebuild Rate: [%d%%]\n",
2197 ((mraid_ext_inquiry *)
2198 inquiry)->raid_inq.adapter_info.rebuild_rate);
2199
2200 free_inquiry:
2201 mega_free_inquiry(inquiry, dma_handle, pdev);
2202 free_pdev:
2203 free_local_pdev(pdev);
2204 return 0;
2205 }
2206
2207
2208 /**
2209 * proc_show_battery()
2210 * @m: Synthetic file construction data
2211 * @v: File iterator
2212 *
2213 * Display information about the battery module on the controller.
2214 */
2215 static int
proc_show_battery(struct seq_file * m,void * v)2216 proc_show_battery(struct seq_file *m, void *v)
2217 {
2218 adapter_t *adapter = m->private;
2219 dma_addr_t dma_handle;
2220 caddr_t inquiry;
2221 struct pci_dev *pdev;
2222 u8 battery_status;
2223
2224 if( make_local_pdev(adapter, &pdev) != 0 )
2225 return 0;
2226
2227 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2228 goto free_pdev;
2229
2230 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2231 seq_puts(m, "Adapter inquiry failed.\n");
2232 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2233 goto free_inquiry;
2234 }
2235
2236 if( adapter->flag & BOARD_40LD ) {
2237 battery_status = ((mega_inquiry3 *)inquiry)->battery_status;
2238 }
2239 else {
2240 battery_status = ((mraid_ext_inquiry *)inquiry)->
2241 raid_inq.adapter_info.battery_status;
2242 }
2243
2244 /*
2245 * Decode the battery status
2246 */
2247 seq_printf(m, "Battery Status:[%d]", battery_status);
2248
2249 if(battery_status == MEGA_BATT_CHARGE_DONE)
2250 seq_puts(m, " Charge Done");
2251
2252 if(battery_status & MEGA_BATT_MODULE_MISSING)
2253 seq_puts(m, " Module Missing");
2254
2255 if(battery_status & MEGA_BATT_LOW_VOLTAGE)
2256 seq_puts(m, " Low Voltage");
2257
2258 if(battery_status & MEGA_BATT_TEMP_HIGH)
2259 seq_puts(m, " Temperature High");
2260
2261 if(battery_status & MEGA_BATT_PACK_MISSING)
2262 seq_puts(m, " Pack Missing");
2263
2264 if(battery_status & MEGA_BATT_CHARGE_INPROG)
2265 seq_puts(m, " Charge In-progress");
2266
2267 if(battery_status & MEGA_BATT_CHARGE_FAIL)
2268 seq_puts(m, " Charge Fail");
2269
2270 if(battery_status & MEGA_BATT_CYCLES_EXCEEDED)
2271 seq_puts(m, " Cycles Exceeded");
2272
2273 seq_putc(m, '\n');
2274
2275 free_inquiry:
2276 mega_free_inquiry(inquiry, dma_handle, pdev);
2277 free_pdev:
2278 free_local_pdev(pdev);
2279 return 0;
2280 }
2281
2282
2283 /*
2284 * Display scsi inquiry
2285 */
2286 static void
mega_print_inquiry(struct seq_file * m,char * scsi_inq)2287 mega_print_inquiry(struct seq_file *m, char *scsi_inq)
2288 {
2289 int i;
2290
2291 seq_puts(m, " Vendor: ");
2292 seq_write(m, scsi_inq + 8, 8);
2293 seq_puts(m, " Model: ");
2294 seq_write(m, scsi_inq + 16, 16);
2295 seq_puts(m, " Rev: ");
2296 seq_write(m, scsi_inq + 32, 4);
2297 seq_putc(m, '\n');
2298
2299 i = scsi_inq[0] & 0x1f;
2300 seq_printf(m, " Type: %s ", scsi_device_type(i));
2301
2302 seq_printf(m, " ANSI SCSI revision: %02x",
2303 scsi_inq[2] & 0x07);
2304
2305 if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 )
2306 seq_puts(m, " CCS\n");
2307 else
2308 seq_putc(m, '\n');
2309 }
2310
2311 /**
2312 * proc_show_pdrv()
2313 * @m: Synthetic file construction data
2314 * @adapter: pointer to our soft state
2315 * @channel: channel
2316 *
2317 * Display information about the physical drives.
2318 */
2319 static int
proc_show_pdrv(struct seq_file * m,adapter_t * adapter,int channel)2320 proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
2321 {
2322 dma_addr_t dma_handle;
2323 char *scsi_inq;
2324 dma_addr_t scsi_inq_dma_handle;
2325 caddr_t inquiry;
2326 struct pci_dev *pdev;
2327 u8 *pdrv_state;
2328 u8 state;
2329 int tgt;
2330 int max_channels;
2331 int i;
2332
2333 if( make_local_pdev(adapter, &pdev) != 0 )
2334 return 0;
2335
2336 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2337 goto free_pdev;
2338
2339 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2340 seq_puts(m, "Adapter inquiry failed.\n");
2341 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2342 goto free_inquiry;
2343 }
2344
2345
2346 scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle,
2347 GFP_KERNEL);
2348 if( scsi_inq == NULL ) {
2349 seq_puts(m, "memory not available for scsi inq.\n");
2350 goto free_inquiry;
2351 }
2352
2353 if( adapter->flag & BOARD_40LD ) {
2354 pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state;
2355 }
2356 else {
2357 pdrv_state = ((mraid_ext_inquiry *)inquiry)->
2358 raid_inq.pdrv_info.pdrv_state;
2359 }
2360
2361 max_channels = adapter->product_info.nchannels;
2362
2363 if( channel >= max_channels ) {
2364 goto free_pci;
2365 }
2366
2367 for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) {
2368
2369 i = channel*16 + tgt;
2370
2371 state = *(pdrv_state + i);
2372 switch( state & 0x0F ) {
2373 case PDRV_ONLINE:
2374 seq_printf(m, "Channel:%2d Id:%2d State: Online",
2375 channel, tgt);
2376 break;
2377
2378 case PDRV_FAILED:
2379 seq_printf(m, "Channel:%2d Id:%2d State: Failed",
2380 channel, tgt);
2381 break;
2382
2383 case PDRV_RBLD:
2384 seq_printf(m, "Channel:%2d Id:%2d State: Rebuild",
2385 channel, tgt);
2386 break;
2387
2388 case PDRV_HOTSPARE:
2389 seq_printf(m, "Channel:%2d Id:%2d State: Hot spare",
2390 channel, tgt);
2391 break;
2392
2393 default:
2394 seq_printf(m, "Channel:%2d Id:%2d State: Un-configured",
2395 channel, tgt);
2396 break;
2397 }
2398
2399 /*
2400 * This interface displays inquiries for disk drives
2401 * only. Inquries for logical drives and non-disk
2402 * devices are available through /proc/scsi/scsi
2403 */
2404 memset(scsi_inq, 0, 256);
2405 if( mega_internal_dev_inquiry(adapter, channel, tgt,
2406 scsi_inq_dma_handle) ||
2407 (scsi_inq[0] & 0x1F) != TYPE_DISK ) {
2408 continue;
2409 }
2410
2411 /*
2412 * Check for overflow. We print less than 240
2413 * characters for inquiry
2414 */
2415 seq_puts(m, ".\n");
2416 mega_print_inquiry(m, scsi_inq);
2417 }
2418
2419 free_pci:
2420 dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle);
2421 free_inquiry:
2422 mega_free_inquiry(inquiry, dma_handle, pdev);
2423 free_pdev:
2424 free_local_pdev(pdev);
2425 return 0;
2426 }
2427
2428 /**
2429 * proc_show_pdrv_ch0()
2430 * @m: Synthetic file construction data
2431 * @v: File iterator
2432 *
2433 * Display information about the physical drives on physical channel 0.
2434 */
2435 static int
proc_show_pdrv_ch0(struct seq_file * m,void * v)2436 proc_show_pdrv_ch0(struct seq_file *m, void *v)
2437 {
2438 return proc_show_pdrv(m, m->private, 0);
2439 }
2440
2441
2442 /**
2443 * proc_show_pdrv_ch1()
2444 * @m: Synthetic file construction data
2445 * @v: File iterator
2446 *
2447 * Display information about the physical drives on physical channel 1.
2448 */
2449 static int
proc_show_pdrv_ch1(struct seq_file * m,void * v)2450 proc_show_pdrv_ch1(struct seq_file *m, void *v)
2451 {
2452 return proc_show_pdrv(m, m->private, 1);
2453 }
2454
2455
2456 /**
2457 * proc_show_pdrv_ch2()
2458 * @m: Synthetic file construction data
2459 * @v: File iterator
2460 *
2461 * Display information about the physical drives on physical channel 2.
2462 */
2463 static int
proc_show_pdrv_ch2(struct seq_file * m,void * v)2464 proc_show_pdrv_ch2(struct seq_file *m, void *v)
2465 {
2466 return proc_show_pdrv(m, m->private, 2);
2467 }
2468
2469
2470 /**
2471 * proc_show_pdrv_ch3()
2472 * @m: Synthetic file construction data
2473 * @v: File iterator
2474 *
2475 * Display information about the physical drives on physical channel 3.
2476 */
2477 static int
proc_show_pdrv_ch3(struct seq_file * m,void * v)2478 proc_show_pdrv_ch3(struct seq_file *m, void *v)
2479 {
2480 return proc_show_pdrv(m, m->private, 3);
2481 }
2482
2483
2484 /**
2485 * proc_show_rdrv()
2486 * @m: Synthetic file construction data
2487 * @adapter: pointer to our soft state
2488 * @start: starting logical drive to display
2489 * @end: ending logical drive to display
2490 *
2491 * We do not print the inquiry information since its already available through
2492 * /proc/scsi/scsi interface
2493 */
2494 static int
proc_show_rdrv(struct seq_file * m,adapter_t * adapter,int start,int end)2495 proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
2496 {
2497 dma_addr_t dma_handle;
2498 logdrv_param *lparam;
2499 megacmd_t mc;
2500 char *disk_array;
2501 dma_addr_t disk_array_dma_handle;
2502 caddr_t inquiry;
2503 struct pci_dev *pdev;
2504 u8 *rdrv_state;
2505 int num_ldrv;
2506 u32 array_sz;
2507 int i;
2508
2509 if( make_local_pdev(adapter, &pdev) != 0 )
2510 return 0;
2511
2512 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2513 goto free_pdev;
2514
2515 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2516 seq_puts(m, "Adapter inquiry failed.\n");
2517 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2518 goto free_inquiry;
2519 }
2520
2521 memset(&mc, 0, sizeof(megacmd_t));
2522
2523 if( adapter->flag & BOARD_40LD ) {
2524 array_sz = sizeof(disk_array_40ld);
2525
2526 rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state;
2527
2528 num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv;
2529 }
2530 else {
2531 array_sz = sizeof(disk_array_8ld);
2532
2533 rdrv_state = ((mraid_ext_inquiry *)inquiry)->
2534 raid_inq.logdrv_info.ldrv_state;
2535
2536 num_ldrv = ((mraid_ext_inquiry *)inquiry)->
2537 raid_inq.logdrv_info.num_ldrv;
2538 }
2539
2540 disk_array = dma_alloc_coherent(&pdev->dev, array_sz,
2541 &disk_array_dma_handle, GFP_KERNEL);
2542
2543 if( disk_array == NULL ) {
2544 seq_puts(m, "memory not available.\n");
2545 goto free_inquiry;
2546 }
2547
2548 mc.xferaddr = (u32)disk_array_dma_handle;
2549
2550 if( adapter->flag & BOARD_40LD ) {
2551 mc.cmd = FC_NEW_CONFIG;
2552 mc.opcode = OP_DCMD_READ_CONFIG;
2553
2554 if( mega_internal_command(adapter, &mc, NULL) ) {
2555 seq_puts(m, "40LD read config failed.\n");
2556 goto free_pci;
2557 }
2558
2559 }
2560 else {
2561 mc.cmd = NEW_READ_CONFIG_8LD;
2562
2563 if( mega_internal_command(adapter, &mc, NULL) ) {
2564 mc.cmd = READ_CONFIG_8LD;
2565 if( mega_internal_command(adapter, &mc, NULL) ) {
2566 seq_puts(m, "8LD read config failed.\n");
2567 goto free_pci;
2568 }
2569 }
2570 }
2571
2572 for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) {
2573
2574 if( adapter->flag & BOARD_40LD ) {
2575 lparam =
2576 &((disk_array_40ld *)disk_array)->ldrv[i].lparam;
2577 }
2578 else {
2579 lparam =
2580 &((disk_array_8ld *)disk_array)->ldrv[i].lparam;
2581 }
2582
2583 /*
2584 * Check for overflow. We print less than 240 characters for
2585 * information about each logical drive.
2586 */
2587 seq_printf(m, "Logical drive:%2d:, ", i);
2588
2589 switch( rdrv_state[i] & 0x0F ) {
2590 case RDRV_OFFLINE:
2591 seq_puts(m, "state: offline");
2592 break;
2593 case RDRV_DEGRADED:
2594 seq_puts(m, "state: degraded");
2595 break;
2596 case RDRV_OPTIMAL:
2597 seq_puts(m, "state: optimal");
2598 break;
2599 case RDRV_DELETED:
2600 seq_puts(m, "state: deleted");
2601 break;
2602 default:
2603 seq_puts(m, "state: unknown");
2604 break;
2605 }
2606
2607 /*
2608 * Check if check consistency or initialization is going on
2609 * for this logical drive.
2610 */
2611 if( (rdrv_state[i] & 0xF0) == 0x20 )
2612 seq_puts(m, ", check-consistency in progress");
2613 else if( (rdrv_state[i] & 0xF0) == 0x10 )
2614 seq_puts(m, ", initialization in progress");
2615
2616 seq_putc(m, '\n');
2617
2618 seq_printf(m, "Span depth:%3d, ", lparam->span_depth);
2619 seq_printf(m, "RAID level:%3d, ", lparam->level);
2620 seq_printf(m, "Stripe size:%3d, ",
2621 lparam->stripe_sz ? lparam->stripe_sz/2: 128);
2622 seq_printf(m, "Row size:%3d\n", lparam->row_size);
2623
2624 seq_puts(m, "Read Policy: ");
2625 switch(lparam->read_ahead) {
2626 case NO_READ_AHEAD:
2627 seq_puts(m, "No read ahead, ");
2628 break;
2629 case READ_AHEAD:
2630 seq_puts(m, "Read ahead, ");
2631 break;
2632 case ADAP_READ_AHEAD:
2633 seq_puts(m, "Adaptive, ");
2634 break;
2635
2636 }
2637
2638 seq_puts(m, "Write Policy: ");
2639 switch(lparam->write_mode) {
2640 case WRMODE_WRITE_THRU:
2641 seq_puts(m, "Write thru, ");
2642 break;
2643 case WRMODE_WRITE_BACK:
2644 seq_puts(m, "Write back, ");
2645 break;
2646 }
2647
2648 seq_puts(m, "Cache Policy: ");
2649 switch(lparam->direct_io) {
2650 case CACHED_IO:
2651 seq_puts(m, "Cached IO\n\n");
2652 break;
2653 case DIRECT_IO:
2654 seq_puts(m, "Direct IO\n\n");
2655 break;
2656 }
2657 }
2658
2659 free_pci:
2660 dma_free_coherent(&pdev->dev, array_sz, disk_array,
2661 disk_array_dma_handle);
2662 free_inquiry:
2663 mega_free_inquiry(inquiry, dma_handle, pdev);
2664 free_pdev:
2665 free_local_pdev(pdev);
2666 return 0;
2667 }
2668
2669 /**
2670 * proc_show_rdrv_10()
2671 * @m: Synthetic file construction data
2672 * @v: File iterator
2673 *
2674 * Display real time information about the logical drives 0 through 9.
2675 */
2676 static int
proc_show_rdrv_10(struct seq_file * m,void * v)2677 proc_show_rdrv_10(struct seq_file *m, void *v)
2678 {
2679 return proc_show_rdrv(m, m->private, 0, 9);
2680 }
2681
2682
2683 /**
2684 * proc_show_rdrv_20()
2685 * @m: Synthetic file construction data
2686 * @v: File iterator
2687 *
2688 * Display real time information about the logical drives 0 through 9.
2689 */
2690 static int
proc_show_rdrv_20(struct seq_file * m,void * v)2691 proc_show_rdrv_20(struct seq_file *m, void *v)
2692 {
2693 return proc_show_rdrv(m, m->private, 10, 19);
2694 }
2695
2696
2697 /**
2698 * proc_show_rdrv_30()
2699 * @m: Synthetic file construction data
2700 * @v: File iterator
2701 *
2702 * Display real time information about the logical drives 0 through 9.
2703 */
2704 static int
proc_show_rdrv_30(struct seq_file * m,void * v)2705 proc_show_rdrv_30(struct seq_file *m, void *v)
2706 {
2707 return proc_show_rdrv(m, m->private, 20, 29);
2708 }
2709
2710
2711 /**
2712 * proc_show_rdrv_40()
2713 * @m: Synthetic file construction data
2714 * @v: File iterator
2715 *
2716 * Display real time information about the logical drives 0 through 9.
2717 */
2718 static int
proc_show_rdrv_40(struct seq_file * m,void * v)2719 proc_show_rdrv_40(struct seq_file *m, void *v)
2720 {
2721 return proc_show_rdrv(m, m->private, 30, 39);
2722 }
2723
2724 /**
2725 * mega_create_proc_entry()
2726 * @index: index in soft state array
2727 * @parent: parent node for this /proc entry
2728 *
2729 * Creates /proc entries for our controllers.
2730 */
2731 static void
mega_create_proc_entry(int index,struct proc_dir_entry * parent)2732 mega_create_proc_entry(int index, struct proc_dir_entry *parent)
2733 {
2734 adapter_t *adapter = hba_soft_state[index];
2735 struct proc_dir_entry *dir;
2736 u8 string[16];
2737
2738 sprintf(string, "hba%d", adapter->host->host_no);
2739 dir = proc_mkdir_data(string, 0, parent, adapter);
2740 if (!dir) {
2741 dev_warn(&adapter->dev->dev, "proc_mkdir failed\n");
2742 return;
2743 }
2744
2745 proc_create_single_data("config", S_IRUSR, dir,
2746 proc_show_config, adapter);
2747 proc_create_single_data("stat", S_IRUSR, dir,
2748 proc_show_stat, adapter);
2749 proc_create_single_data("mailbox", S_IRUSR, dir,
2750 proc_show_mbox, adapter);
2751 #if MEGA_HAVE_ENH_PROC
2752 proc_create_single_data("rebuild-rate", S_IRUSR, dir,
2753 proc_show_rebuild_rate, adapter);
2754 proc_create_single_data("battery-status", S_IRUSR, dir,
2755 proc_show_battery, adapter);
2756 proc_create_single_data("diskdrives-ch0", S_IRUSR, dir,
2757 proc_show_pdrv_ch0, adapter);
2758 proc_create_single_data("diskdrives-ch1", S_IRUSR, dir,
2759 proc_show_pdrv_ch1, adapter);
2760 proc_create_single_data("diskdrives-ch2", S_IRUSR, dir,
2761 proc_show_pdrv_ch2, adapter);
2762 proc_create_single_data("diskdrives-ch3", S_IRUSR, dir,
2763 proc_show_pdrv_ch3, adapter);
2764 proc_create_single_data("raiddrives-0-9", S_IRUSR, dir,
2765 proc_show_rdrv_10, adapter);
2766 proc_create_single_data("raiddrives-10-19", S_IRUSR, dir,
2767 proc_show_rdrv_20, adapter);
2768 proc_create_single_data("raiddrives-20-29", S_IRUSR, dir,
2769 proc_show_rdrv_30, adapter);
2770 proc_create_single_data("raiddrives-30-39", S_IRUSR, dir,
2771 proc_show_rdrv_40, adapter);
2772 #endif
2773 }
2774
2775 #else
mega_create_proc_entry(int index,struct proc_dir_entry * parent)2776 static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent)
2777 {
2778 }
2779 #endif
2780
2781
2782 /*
2783 * megaraid_biosparam()
2784 *
2785 * Return the disk geometry for a particular disk
2786 */
2787 static int
megaraid_biosparam(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int geom[])2788 megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
2789 sector_t capacity, int geom[])
2790 {
2791 adapter_t *adapter;
2792 int heads;
2793 int sectors;
2794 int cylinders;
2795
2796 /* Get pointer to host config structure */
2797 adapter = (adapter_t *)sdev->host->hostdata;
2798
2799 if (IS_RAID_CH(adapter, sdev->channel)) {
2800 /* Default heads (64) & sectors (32) */
2801 heads = 64;
2802 sectors = 32;
2803 cylinders = (ulong)capacity / (heads * sectors);
2804
2805 /*
2806 * Handle extended translation size for logical drives
2807 * > 1Gb
2808 */
2809 if ((ulong)capacity >= 0x200000) {
2810 heads = 255;
2811 sectors = 63;
2812 cylinders = (ulong)capacity / (heads * sectors);
2813 }
2814
2815 /* return result */
2816 geom[0] = heads;
2817 geom[1] = sectors;
2818 geom[2] = cylinders;
2819 }
2820 else {
2821 if (scsi_partsize(bdev, capacity, geom))
2822 return 0;
2823
2824 dev_info(&adapter->dev->dev,
2825 "invalid partition on this disk on channel %d\n",
2826 sdev->channel);
2827
2828 /* Default heads (64) & sectors (32) */
2829 heads = 64;
2830 sectors = 32;
2831 cylinders = (ulong)capacity / (heads * sectors);
2832
2833 /* Handle extended translation size for logical drives > 1Gb */
2834 if ((ulong)capacity >= 0x200000) {
2835 heads = 255;
2836 sectors = 63;
2837 cylinders = (ulong)capacity / (heads * sectors);
2838 }
2839
2840 /* return result */
2841 geom[0] = heads;
2842 geom[1] = sectors;
2843 geom[2] = cylinders;
2844 }
2845
2846 return 0;
2847 }
2848
2849 /**
2850 * mega_init_scb()
2851 * @adapter: pointer to our soft state
2852 *
2853 * Allocate memory for the various pointers in the scb structures:
2854 * scatter-gather list pointer, passthru and extended passthru structure
2855 * pointers.
2856 */
2857 static int
mega_init_scb(adapter_t * adapter)2858 mega_init_scb(adapter_t *adapter)
2859 {
2860 scb_t *scb;
2861 int i;
2862
2863 for( i = 0; i < adapter->max_cmds; i++ ) {
2864
2865 scb = &adapter->scb_list[i];
2866
2867 scb->sgl64 = NULL;
2868 scb->sgl = NULL;
2869 scb->pthru = NULL;
2870 scb->epthru = NULL;
2871 }
2872
2873 for( i = 0; i < adapter->max_cmds; i++ ) {
2874
2875 scb = &adapter->scb_list[i];
2876
2877 scb->idx = i;
2878
2879 scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev,
2880 sizeof(mega_sgl64) * adapter->sglen,
2881 &scb->sgl_dma_addr, GFP_KERNEL);
2882
2883 scb->sgl = (mega_sglist *)scb->sgl64;
2884
2885 if( !scb->sgl ) {
2886 dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n");
2887 mega_free_sgl(adapter);
2888 return -1;
2889 }
2890
2891 scb->pthru = dma_alloc_coherent(&adapter->dev->dev,
2892 sizeof(mega_passthru),
2893 &scb->pthru_dma_addr, GFP_KERNEL);
2894
2895 if( !scb->pthru ) {
2896 dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n");
2897 mega_free_sgl(adapter);
2898 return -1;
2899 }
2900
2901 scb->epthru = dma_alloc_coherent(&adapter->dev->dev,
2902 sizeof(mega_ext_passthru),
2903 &scb->epthru_dma_addr, GFP_KERNEL);
2904
2905 if( !scb->epthru ) {
2906 dev_warn(&adapter->dev->dev,
2907 "Can't allocate extended passthru\n");
2908 mega_free_sgl(adapter);
2909 return -1;
2910 }
2911
2912
2913 scb->dma_type = MEGA_DMA_TYPE_NONE;
2914
2915 /*
2916 * Link to free list
2917 * lock not required since we are loading the driver, so no
2918 * commands possible right now.
2919 */
2920 scb->state = SCB_FREE;
2921 scb->cmd = NULL;
2922 list_add(&scb->list, &adapter->free_list);
2923 }
2924
2925 return 0;
2926 }
2927
2928
2929 /**
2930 * megadev_open()
2931 * @inode: unused
2932 * @filep: unused
2933 *
2934 * Routines for the character/ioctl interface to the driver. Find out if this
2935 * is a valid open.
2936 */
2937 static int
megadev_open(struct inode * inode,struct file * filep)2938 megadev_open (struct inode *inode, struct file *filep)
2939 {
2940 /*
2941 * Only allow superuser to access private ioctl interface
2942 */
2943 if( !capable(CAP_SYS_ADMIN) ) return -EACCES;
2944
2945 return 0;
2946 }
2947
2948
2949 /**
2950 * megadev_ioctl()
2951 * @filep: Our device file
2952 * @cmd: ioctl command
2953 * @arg: user buffer
2954 *
2955 * ioctl entry point for our private ioctl interface. We move the data in from
2956 * the user space, prepare the command (if necessary, convert the old MIMD
2957 * ioctl to new ioctl command), and issue a synchronous command to the
2958 * controller.
2959 */
2960 static int
megadev_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)2961 megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
2962 {
2963 adapter_t *adapter;
2964 nitioctl_t uioc;
2965 int adapno;
2966 int rval;
2967 mega_passthru __user *upthru; /* user address for passthru */
2968 mega_passthru *pthru; /* copy user passthru here */
2969 dma_addr_t pthru_dma_hndl;
2970 void *data = NULL; /* data to be transferred */
2971 dma_addr_t data_dma_hndl; /* dma handle for data xfer area */
2972 megacmd_t mc;
2973 #if MEGA_HAVE_STATS
2974 megastat_t __user *ustats = NULL;
2975 int num_ldrv = 0;
2976 #endif
2977 u32 uxferaddr = 0;
2978 struct pci_dev *pdev;
2979
2980 /*
2981 * Make sure only USCSICMD are issued through this interface.
2982 * MIMD application would still fire different command.
2983 */
2984 if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) {
2985 return -EINVAL;
2986 }
2987
2988 /*
2989 * Check and convert a possible MIMD command to NIT command.
2990 * mega_m_to_n() copies the data from the user space, so we do not
2991 * have to do it here.
2992 * NOTE: We will need some user address to copyout the data, therefore
2993 * the inteface layer will also provide us with the required user
2994 * addresses.
2995 */
2996 memset(&uioc, 0, sizeof(nitioctl_t));
2997 if( (rval = mega_m_to_n( (void __user *)arg, &uioc)) != 0 )
2998 return rval;
2999
3000
3001 switch( uioc.opcode ) {
3002
3003 case GET_DRIVER_VER:
3004 if( put_user(driver_ver, (u32 __user *)uioc.uioc_uaddr) )
3005 return (-EFAULT);
3006
3007 break;
3008
3009 case GET_N_ADAP:
3010 if( put_user(hba_count, (u32 __user *)uioc.uioc_uaddr) )
3011 return (-EFAULT);
3012
3013 /*
3014 * Shucks. MIMD interface returns a positive value for number
3015 * of adapters. TODO: Change it to return 0 when there is no
3016 * applicatio using mimd interface.
3017 */
3018 return hba_count;
3019
3020 case GET_ADAP_INFO:
3021
3022 /*
3023 * Which adapter
3024 */
3025 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3026 return (-ENODEV);
3027
3028 if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno,
3029 sizeof(struct mcontroller)) )
3030 return (-EFAULT);
3031 break;
3032
3033 #if MEGA_HAVE_STATS
3034
3035 case GET_STATS:
3036 /*
3037 * Which adapter
3038 */
3039 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3040 return (-ENODEV);
3041
3042 adapter = hba_soft_state[adapno];
3043
3044 ustats = uioc.uioc_uaddr;
3045
3046 if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) )
3047 return (-EFAULT);
3048
3049 /*
3050 * Check for the validity of the logical drive number
3051 */
3052 if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL;
3053
3054 if( copy_to_user(ustats->nreads, adapter->nreads,
3055 num_ldrv*sizeof(u32)) )
3056 return -EFAULT;
3057
3058 if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks,
3059 num_ldrv*sizeof(u32)) )
3060 return -EFAULT;
3061
3062 if( copy_to_user(ustats->nwrites, adapter->nwrites,
3063 num_ldrv*sizeof(u32)) )
3064 return -EFAULT;
3065
3066 if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks,
3067 num_ldrv*sizeof(u32)) )
3068 return -EFAULT;
3069
3070 if( copy_to_user(ustats->rd_errors, adapter->rd_errors,
3071 num_ldrv*sizeof(u32)) )
3072 return -EFAULT;
3073
3074 if( copy_to_user(ustats->wr_errors, adapter->wr_errors,
3075 num_ldrv*sizeof(u32)) )
3076 return -EFAULT;
3077
3078 return 0;
3079
3080 #endif
3081 case MBOX_CMD:
3082
3083 /*
3084 * Which adapter
3085 */
3086 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3087 return (-ENODEV);
3088
3089 adapter = hba_soft_state[adapno];
3090
3091 /*
3092 * Deletion of logical drive is a special case. The adapter
3093 * should be quiescent before this command is issued.
3094 */
3095 if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV &&
3096 uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) {
3097
3098 /*
3099 * Do we support this feature
3100 */
3101 if( !adapter->support_random_del ) {
3102 dev_warn(&adapter->dev->dev, "logdrv "
3103 "delete on non-supporting F/W\n");
3104
3105 return (-EINVAL);
3106 }
3107
3108 rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] );
3109
3110 if( rval == 0 ) {
3111 memset(&mc, 0, sizeof(megacmd_t));
3112
3113 mc.status = rval;
3114
3115 rval = mega_n_to_m((void __user *)arg, &mc);
3116 }
3117
3118 return rval;
3119 }
3120 /*
3121 * This interface only support the regular passthru commands.
3122 * Reject extended passthru and 64-bit passthru
3123 */
3124 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 ||
3125 uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) {
3126
3127 dev_warn(&adapter->dev->dev, "rejected passthru\n");
3128
3129 return (-EINVAL);
3130 }
3131
3132 /*
3133 * For all internal commands, the buffer must be allocated in
3134 * <4GB address range
3135 */
3136 if( make_local_pdev(adapter, &pdev) != 0 )
3137 return -EIO;
3138
3139 /* Is it a passthru command or a DCMD */
3140 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) {
3141 /* Passthru commands */
3142
3143 pthru = dma_alloc_coherent(&pdev->dev,
3144 sizeof(mega_passthru),
3145 &pthru_dma_hndl, GFP_KERNEL);
3146
3147 if( pthru == NULL ) {
3148 free_local_pdev(pdev);
3149 return (-ENOMEM);
3150 }
3151
3152 /*
3153 * The user passthru structure
3154 */
3155 upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr;
3156
3157 /*
3158 * Copy in the user passthru here.
3159 */
3160 if( copy_from_user(pthru, upthru,
3161 sizeof(mega_passthru)) ) {
3162
3163 dma_free_coherent(&pdev->dev,
3164 sizeof(mega_passthru),
3165 pthru, pthru_dma_hndl);
3166
3167 free_local_pdev(pdev);
3168
3169 return (-EFAULT);
3170 }
3171
3172 /*
3173 * Is there a data transfer
3174 */
3175 if( pthru->dataxferlen ) {
3176 data = dma_alloc_coherent(&pdev->dev,
3177 pthru->dataxferlen,
3178 &data_dma_hndl,
3179 GFP_KERNEL);
3180
3181 if( data == NULL ) {
3182 dma_free_coherent(&pdev->dev,
3183 sizeof(mega_passthru),
3184 pthru,
3185 pthru_dma_hndl);
3186
3187 free_local_pdev(pdev);
3188
3189 return (-ENOMEM);
3190 }
3191
3192 /*
3193 * Save the user address and point the kernel
3194 * address at just allocated memory
3195 */
3196 uxferaddr = pthru->dataxferaddr;
3197 pthru->dataxferaddr = data_dma_hndl;
3198 }
3199
3200
3201 /*
3202 * Is data coming down-stream
3203 */
3204 if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) {
3205 /*
3206 * Get the user data
3207 */
3208 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
3209 pthru->dataxferlen) ) {
3210 rval = (-EFAULT);
3211 goto freemem_and_return;
3212 }
3213 }
3214
3215 memset(&mc, 0, sizeof(megacmd_t));
3216
3217 mc.cmd = MEGA_MBOXCMD_PASSTHRU;
3218 mc.xferaddr = (u32)pthru_dma_hndl;
3219
3220 /*
3221 * Issue the command
3222 */
3223 mega_internal_command(adapter, &mc, pthru);
3224
3225 rval = mega_n_to_m((void __user *)arg, &mc);
3226
3227 if( rval ) goto freemem_and_return;
3228
3229
3230 /*
3231 * Is data going up-stream
3232 */
3233 if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) {
3234 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
3235 pthru->dataxferlen) ) {
3236 rval = (-EFAULT);
3237 }
3238 }
3239
3240 /*
3241 * Send the request sense data also, irrespective of
3242 * whether the user has asked for it or not.
3243 */
3244 if (copy_to_user(upthru->reqsensearea,
3245 pthru->reqsensearea, 14))
3246 rval = -EFAULT;
3247
3248 freemem_and_return:
3249 if( pthru->dataxferlen ) {
3250 dma_free_coherent(&pdev->dev,
3251 pthru->dataxferlen, data,
3252 data_dma_hndl);
3253 }
3254
3255 dma_free_coherent(&pdev->dev, sizeof(mega_passthru),
3256 pthru, pthru_dma_hndl);
3257
3258 free_local_pdev(pdev);
3259
3260 return rval;
3261 }
3262 else {
3263 /* DCMD commands */
3264
3265 /*
3266 * Is there a data transfer
3267 */
3268 if( uioc.xferlen ) {
3269 data = dma_alloc_coherent(&pdev->dev,
3270 uioc.xferlen,
3271 &data_dma_hndl,
3272 GFP_KERNEL);
3273
3274 if( data == NULL ) {
3275 free_local_pdev(pdev);
3276 return (-ENOMEM);
3277 }
3278
3279 uxferaddr = MBOX(uioc)->xferaddr;
3280 }
3281
3282 /*
3283 * Is data coming down-stream
3284 */
3285 if( uioc.xferlen && (uioc.flags & UIOC_WR) ) {
3286 /*
3287 * Get the user data
3288 */
3289 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
3290 uioc.xferlen) ) {
3291
3292 dma_free_coherent(&pdev->dev,
3293 uioc.xferlen, data,
3294 data_dma_hndl);
3295
3296 free_local_pdev(pdev);
3297
3298 return (-EFAULT);
3299 }
3300 }
3301
3302 memcpy(&mc, MBOX(uioc), sizeof(megacmd_t));
3303
3304 mc.xferaddr = (u32)data_dma_hndl;
3305
3306 /*
3307 * Issue the command
3308 */
3309 mega_internal_command(adapter, &mc, NULL);
3310
3311 rval = mega_n_to_m((void __user *)arg, &mc);
3312
3313 if( rval ) {
3314 if( uioc.xferlen ) {
3315 dma_free_coherent(&pdev->dev,
3316 uioc.xferlen, data,
3317 data_dma_hndl);
3318 }
3319
3320 free_local_pdev(pdev);
3321
3322 return rval;
3323 }
3324
3325 /*
3326 * Is data going up-stream
3327 */
3328 if( uioc.xferlen && (uioc.flags & UIOC_RD) ) {
3329 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
3330 uioc.xferlen) ) {
3331
3332 rval = (-EFAULT);
3333 }
3334 }
3335
3336 if( uioc.xferlen ) {
3337 dma_free_coherent(&pdev->dev, uioc.xferlen,
3338 data, data_dma_hndl);
3339 }
3340
3341 free_local_pdev(pdev);
3342
3343 return rval;
3344 }
3345
3346 default:
3347 return (-EINVAL);
3348 }
3349
3350 return 0;
3351 }
3352
3353 static long
megadev_unlocked_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)3354 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
3355 {
3356 int ret;
3357
3358 mutex_lock(&megadev_mutex);
3359 ret = megadev_ioctl(filep, cmd, arg);
3360 mutex_unlock(&megadev_mutex);
3361
3362 return ret;
3363 }
3364
3365 /**
3366 * mega_m_to_n()
3367 * @arg: user address
3368 * @uioc: new ioctl structure
3369 *
3370 * A thin layer to convert older mimd interface ioctl structure to NIT ioctl
3371 * structure
3372 *
3373 * Converts the older mimd ioctl structure to newer NIT structure
3374 */
3375 static int
mega_m_to_n(void __user * arg,nitioctl_t * uioc)3376 mega_m_to_n(void __user *arg, nitioctl_t *uioc)
3377 {
3378 struct uioctl_t uioc_mimd;
3379 char signature[8] = {0};
3380 u8 opcode;
3381 u8 subopcode;
3382
3383
3384 /*
3385 * check is the application conforms to NIT. We do not have to do much
3386 * in that case.
3387 * We exploit the fact that the signature is stored in the very
3388 * beginning of the structure.
3389 */
3390
3391 if( copy_from_user(signature, arg, 7) )
3392 return (-EFAULT);
3393
3394 if( memcmp(signature, "MEGANIT", 7) == 0 ) {
3395
3396 /*
3397 * NOTE NOTE: The nit ioctl is still under flux because of
3398 * change of mailbox definition, in HPE. No applications yet
3399 * use this interface and let's not have applications use this
3400 * interface till the new specifitions are in place.
3401 */
3402 return -EINVAL;
3403 #if 0
3404 if( copy_from_user(uioc, arg, sizeof(nitioctl_t)) )
3405 return (-EFAULT);
3406 return 0;
3407 #endif
3408 }
3409
3410 /*
3411 * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t
3412 *
3413 * Get the user ioctl structure
3414 */
3415 if( copy_from_user(&uioc_mimd, arg, sizeof(struct uioctl_t)) )
3416 return (-EFAULT);
3417
3418
3419 /*
3420 * Get the opcode and subopcode for the commands
3421 */
3422 opcode = uioc_mimd.ui.fcs.opcode;
3423 subopcode = uioc_mimd.ui.fcs.subopcode;
3424
3425 switch (opcode) {
3426 case 0x82:
3427
3428 switch (subopcode) {
3429
3430 case MEGAIOC_QDRVRVER: /* Query driver version */
3431 uioc->opcode = GET_DRIVER_VER;
3432 uioc->uioc_uaddr = uioc_mimd.data;
3433 break;
3434
3435 case MEGAIOC_QNADAP: /* Get # of adapters */
3436 uioc->opcode = GET_N_ADAP;
3437 uioc->uioc_uaddr = uioc_mimd.data;
3438 break;
3439
3440 case MEGAIOC_QADAPINFO: /* Get adapter information */
3441 uioc->opcode = GET_ADAP_INFO;
3442 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3443 uioc->uioc_uaddr = uioc_mimd.data;
3444 break;
3445
3446 default:
3447 return(-EINVAL);
3448 }
3449
3450 break;
3451
3452
3453 case 0x81:
3454
3455 uioc->opcode = MBOX_CMD;
3456 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3457
3458 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18);
3459
3460 uioc->xferlen = uioc_mimd.ui.fcs.length;
3461
3462 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD;
3463 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR;
3464
3465 break;
3466
3467 case 0x80:
3468
3469 uioc->opcode = MBOX_CMD;
3470 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3471
3472 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18);
3473
3474 /*
3475 * Choose the xferlen bigger of input and output data
3476 */
3477 uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ?
3478 uioc_mimd.outlen : uioc_mimd.inlen;
3479
3480 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD;
3481 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR;
3482
3483 break;
3484
3485 default:
3486 return (-EINVAL);
3487
3488 }
3489
3490 return 0;
3491 }
3492
3493 /*
3494 * mega_n_to_m()
3495 * @arg: user address
3496 * @mc: mailbox command
3497 *
3498 * Updates the status information to the application, depending on application
3499 * conforms to older mimd ioctl interface or newer NIT ioctl interface
3500 */
3501 static int
mega_n_to_m(void __user * arg,megacmd_t * mc)3502 mega_n_to_m(void __user *arg, megacmd_t *mc)
3503 {
3504 nitioctl_t __user *uiocp;
3505 megacmd_t __user *umc;
3506 mega_passthru __user *upthru;
3507 struct uioctl_t __user *uioc_mimd;
3508 char signature[8] = {0};
3509
3510 /*
3511 * check is the application conforms to NIT.
3512 */
3513 if( copy_from_user(signature, arg, 7) )
3514 return -EFAULT;
3515
3516 if( memcmp(signature, "MEGANIT", 7) == 0 ) {
3517
3518 uiocp = arg;
3519
3520 if( put_user(mc->status, (u8 __user *)&MBOX_P(uiocp)->status) )
3521 return (-EFAULT);
3522
3523 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
3524
3525 umc = MBOX_P(uiocp);
3526
3527 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr))
3528 return -EFAULT;
3529
3530 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus))
3531 return (-EFAULT);
3532 }
3533 }
3534 else {
3535 uioc_mimd = arg;
3536
3537 if( put_user(mc->status, (u8 __user *)&uioc_mimd->mbox[17]) )
3538 return (-EFAULT);
3539
3540 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
3541
3542 umc = (megacmd_t __user *)uioc_mimd->mbox;
3543
3544 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr))
3545 return (-EFAULT);
3546
3547 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus) )
3548 return (-EFAULT);
3549 }
3550 }
3551
3552 return 0;
3553 }
3554
3555
3556 /*
3557 * MEGARAID 'FW' commands.
3558 */
3559
3560 /**
3561 * mega_is_bios_enabled()
3562 * @adapter: pointer to our soft state
3563 *
3564 * issue command to find out if the BIOS is enabled for this controller
3565 */
3566 static int
mega_is_bios_enabled(adapter_t * adapter)3567 mega_is_bios_enabled(adapter_t *adapter)
3568 {
3569 struct mbox_out mbox;
3570 unsigned char *raw_mbox = (u8 *)&mbox;
3571
3572 memset(&mbox, 0, sizeof(mbox));
3573
3574 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3575
3576 mbox.xferaddr = (u32)adapter->buf_dma_handle;
3577
3578 raw_mbox[0] = IS_BIOS_ENABLED;
3579 raw_mbox[2] = GET_BIOS;
3580
3581 issue_scb_block(adapter, raw_mbox);
3582
3583 return *(char *)adapter->mega_buffer;
3584 }
3585
3586
3587 /**
3588 * mega_enum_raid_scsi()
3589 * @adapter: pointer to our soft state
3590 *
3591 * Find out what channels are RAID/SCSI. This information is used to
3592 * differentiate the virtual channels and physical channels and to support
3593 * ROMB feature and non-disk devices.
3594 */
3595 static void
mega_enum_raid_scsi(adapter_t * adapter)3596 mega_enum_raid_scsi(adapter_t *adapter)
3597 {
3598 struct mbox_out mbox;
3599 unsigned char *raw_mbox = (u8 *)&mbox;
3600 int i;
3601
3602 memset(&mbox, 0, sizeof(mbox));
3603
3604 /*
3605 * issue command to find out what channels are raid/scsi
3606 */
3607 raw_mbox[0] = CHNL_CLASS;
3608 raw_mbox[2] = GET_CHNL_CLASS;
3609
3610 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3611
3612 mbox.xferaddr = (u32)adapter->buf_dma_handle;
3613
3614 /*
3615 * Non-ROMB firmware fail this command, so all channels
3616 * must be shown RAID
3617 */
3618 adapter->mega_ch_class = 0xFF;
3619
3620 if(!issue_scb_block(adapter, raw_mbox)) {
3621 adapter->mega_ch_class = *((char *)adapter->mega_buffer);
3622
3623 }
3624
3625 for( i = 0; i < adapter->product_info.nchannels; i++ ) {
3626 if( (adapter->mega_ch_class >> i) & 0x01 ) {
3627 dev_info(&adapter->dev->dev, "channel[%d] is raid\n",
3628 i);
3629 }
3630 else {
3631 dev_info(&adapter->dev->dev, "channel[%d] is scsi\n",
3632 i);
3633 }
3634 }
3635
3636 return;
3637 }
3638
3639
3640 /**
3641 * mega_get_boot_drv()
3642 * @adapter: pointer to our soft state
3643 *
3644 * Find out which device is the boot device. Note, any logical drive or any
3645 * phyical device (e.g., a CDROM) can be designated as a boot device.
3646 */
3647 static void
mega_get_boot_drv(adapter_t * adapter)3648 mega_get_boot_drv(adapter_t *adapter)
3649 {
3650 struct private_bios_data *prv_bios_data;
3651 struct mbox_out mbox;
3652 unsigned char *raw_mbox = (u8 *)&mbox;
3653 u16 cksum = 0;
3654 u8 *cksum_p;
3655 u8 boot_pdrv;
3656 int i;
3657
3658 memset(&mbox, 0, sizeof(mbox));
3659
3660 raw_mbox[0] = BIOS_PVT_DATA;
3661 raw_mbox[2] = GET_BIOS_PVT_DATA;
3662
3663 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3664
3665 mbox.xferaddr = (u32)adapter->buf_dma_handle;
3666
3667 adapter->boot_ldrv_enabled = 0;
3668 adapter->boot_ldrv = 0;
3669
3670 adapter->boot_pdrv_enabled = 0;
3671 adapter->boot_pdrv_ch = 0;
3672 adapter->boot_pdrv_tgt = 0;
3673
3674 if(issue_scb_block(adapter, raw_mbox) == 0) {
3675 prv_bios_data =
3676 (struct private_bios_data *)adapter->mega_buffer;
3677
3678 cksum = 0;
3679 cksum_p = (char *)prv_bios_data;
3680 for (i = 0; i < 14; i++ ) {
3681 cksum += (u16)(*cksum_p++);
3682 }
3683
3684 if (prv_bios_data->cksum == (u16)(0-cksum) ) {
3685
3686 /*
3687 * If MSB is set, a physical drive is set as boot
3688 * device
3689 */
3690 if( prv_bios_data->boot_drv & 0x80 ) {
3691 adapter->boot_pdrv_enabled = 1;
3692 boot_pdrv = prv_bios_data->boot_drv & 0x7F;
3693 adapter->boot_pdrv_ch = boot_pdrv / 16;
3694 adapter->boot_pdrv_tgt = boot_pdrv % 16;
3695 }
3696 else {
3697 adapter->boot_ldrv_enabled = 1;
3698 adapter->boot_ldrv = prv_bios_data->boot_drv;
3699 }
3700 }
3701 }
3702
3703 }
3704
3705 /**
3706 * mega_support_random_del()
3707 * @adapter: pointer to our soft state
3708 *
3709 * Find out if this controller supports random deletion and addition of
3710 * logical drives
3711 */
3712 static int
mega_support_random_del(adapter_t * adapter)3713 mega_support_random_del(adapter_t *adapter)
3714 {
3715 struct mbox_out mbox;
3716 unsigned char *raw_mbox = (u8 *)&mbox;
3717 int rval;
3718
3719 memset(&mbox, 0, sizeof(mbox));
3720
3721 /*
3722 * issue command
3723 */
3724 raw_mbox[0] = FC_DEL_LOGDRV;
3725 raw_mbox[2] = OP_SUP_DEL_LOGDRV;
3726
3727 rval = issue_scb_block(adapter, raw_mbox);
3728
3729 return !rval;
3730 }
3731
3732
3733 /**
3734 * mega_support_ext_cdb()
3735 * @adapter: pointer to our soft state
3736 *
3737 * Find out if this firmware support cdblen > 10
3738 */
3739 static int
mega_support_ext_cdb(adapter_t * adapter)3740 mega_support_ext_cdb(adapter_t *adapter)
3741 {
3742 struct mbox_out mbox;
3743 unsigned char *raw_mbox = (u8 *)&mbox;
3744 int rval;
3745
3746 memset(&mbox, 0, sizeof(mbox));
3747 /*
3748 * issue command to find out if controller supports extended CDBs.
3749 */
3750 raw_mbox[0] = 0xA4;
3751 raw_mbox[2] = 0x16;
3752
3753 rval = issue_scb_block(adapter, raw_mbox);
3754
3755 return !rval;
3756 }
3757
3758
3759 /**
3760 * mega_del_logdrv()
3761 * @adapter: pointer to our soft state
3762 * @logdrv: logical drive to be deleted
3763 *
3764 * Delete the specified logical drive. It is the responsibility of the user
3765 * app to let the OS know about this operation.
3766 */
3767 static int
mega_del_logdrv(adapter_t * adapter,int logdrv)3768 mega_del_logdrv(adapter_t *adapter, int logdrv)
3769 {
3770 unsigned long flags;
3771 scb_t *scb;
3772 int rval;
3773
3774 /*
3775 * Stop sending commands to the controller, queue them internally.
3776 * When deletion is complete, ISR will flush the queue.
3777 */
3778 atomic_set(&adapter->quiescent, 1);
3779
3780 /*
3781 * Wait till all the issued commands are complete and there are no
3782 * commands in the pending queue
3783 */
3784 while (atomic_read(&adapter->pend_cmds) > 0 ||
3785 !list_empty(&adapter->pending_list))
3786 msleep(1000); /* sleep for 1s */
3787
3788 rval = mega_do_del_logdrv(adapter, logdrv);
3789
3790 spin_lock_irqsave(&adapter->lock, flags);
3791
3792 /*
3793 * If delete operation was successful, add 0x80 to the logical drive
3794 * ids for commands in the pending queue.
3795 */
3796 if (adapter->read_ldidmap) {
3797 struct list_head *pos;
3798 list_for_each(pos, &adapter->pending_list) {
3799 scb = list_entry(pos, scb_t, list);
3800 if (scb->pthru->logdrv < 0x80 )
3801 scb->pthru->logdrv += 0x80;
3802 }
3803 }
3804
3805 atomic_set(&adapter->quiescent, 0);
3806
3807 mega_runpendq(adapter);
3808
3809 spin_unlock_irqrestore(&adapter->lock, flags);
3810
3811 return rval;
3812 }
3813
3814
3815 static int
mega_do_del_logdrv(adapter_t * adapter,int logdrv)3816 mega_do_del_logdrv(adapter_t *adapter, int logdrv)
3817 {
3818 megacmd_t mc;
3819 int rval;
3820
3821 memset( &mc, 0, sizeof(megacmd_t));
3822
3823 mc.cmd = FC_DEL_LOGDRV;
3824 mc.opcode = OP_DEL_LOGDRV;
3825 mc.subopcode = logdrv;
3826
3827 rval = mega_internal_command(adapter, &mc, NULL);
3828
3829 /* log this event */
3830 if(rval) {
3831 dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv);
3832 return rval;
3833 }
3834
3835 /*
3836 * After deleting first logical drive, the logical drives must be
3837 * addressed by adding 0x80 to the logical drive id.
3838 */
3839 adapter->read_ldidmap = 1;
3840
3841 return rval;
3842 }
3843
3844
3845 /**
3846 * mega_get_max_sgl()
3847 * @adapter: pointer to our soft state
3848 *
3849 * Find out the maximum number of scatter-gather elements supported by this
3850 * version of the firmware
3851 */
3852 static void
mega_get_max_sgl(adapter_t * adapter)3853 mega_get_max_sgl(adapter_t *adapter)
3854 {
3855 struct mbox_out mbox;
3856 unsigned char *raw_mbox = (u8 *)&mbox;
3857
3858 memset(&mbox, 0, sizeof(mbox));
3859
3860 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3861
3862 mbox.xferaddr = (u32)adapter->buf_dma_handle;
3863
3864 raw_mbox[0] = MAIN_MISC_OPCODE;
3865 raw_mbox[2] = GET_MAX_SG_SUPPORT;
3866
3867
3868 if( issue_scb_block(adapter, raw_mbox) ) {
3869 /*
3870 * f/w does not support this command. Choose the default value
3871 */
3872 adapter->sglen = MIN_SGLIST;
3873 }
3874 else {
3875 adapter->sglen = *((char *)adapter->mega_buffer);
3876
3877 /*
3878 * Make sure this is not more than the resources we are
3879 * planning to allocate
3880 */
3881 if ( adapter->sglen > MAX_SGLIST )
3882 adapter->sglen = MAX_SGLIST;
3883 }
3884
3885 return;
3886 }
3887
3888
3889 /**
3890 * mega_support_cluster()
3891 * @adapter: pointer to our soft state
3892 *
3893 * Find out if this firmware support cluster calls.
3894 */
3895 static int
mega_support_cluster(adapter_t * adapter)3896 mega_support_cluster(adapter_t *adapter)
3897 {
3898 struct mbox_out mbox;
3899 unsigned char *raw_mbox = (u8 *)&mbox;
3900
3901 memset(&mbox, 0, sizeof(mbox));
3902
3903 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3904
3905 mbox.xferaddr = (u32)adapter->buf_dma_handle;
3906
3907 /*
3908 * Try to get the initiator id. This command will succeed iff the
3909 * clustering is available on this HBA.
3910 */
3911 raw_mbox[0] = MEGA_GET_TARGET_ID;
3912
3913 if( issue_scb_block(adapter, raw_mbox) == 0 ) {
3914
3915 /*
3916 * Cluster support available. Get the initiator target id.
3917 * Tell our id to mid-layer too.
3918 */
3919 adapter->this_id = *(u32 *)adapter->mega_buffer;
3920 adapter->host->this_id = adapter->this_id;
3921
3922 return 1;
3923 }
3924
3925 return 0;
3926 }
3927
3928 #ifdef CONFIG_PROC_FS
3929 /**
3930 * mega_adapinq()
3931 * @adapter: pointer to our soft state
3932 * @dma_handle: DMA address of the buffer
3933 *
3934 * Issue internal commands while interrupts are available.
3935 * We only issue direct mailbox commands from within the driver. ioctl()
3936 * interface using these routines can issue passthru commands.
3937 */
3938 static int
mega_adapinq(adapter_t * adapter,dma_addr_t dma_handle)3939 mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle)
3940 {
3941 megacmd_t mc;
3942
3943 memset(&mc, 0, sizeof(megacmd_t));
3944
3945 if( adapter->flag & BOARD_40LD ) {
3946 mc.cmd = FC_NEW_CONFIG;
3947 mc.opcode = NC_SUBOP_ENQUIRY3;
3948 mc.subopcode = ENQ3_GET_SOLICITED_FULL;
3949 }
3950 else {
3951 mc.cmd = MEGA_MBOXCMD_ADPEXTINQ;
3952 }
3953
3954 mc.xferaddr = (u32)dma_handle;
3955
3956 if ( mega_internal_command(adapter, &mc, NULL) != 0 ) {
3957 return -1;
3958 }
3959
3960 return 0;
3961 }
3962
3963
3964 /**
3965 * mega_internal_dev_inquiry()
3966 * @adapter: pointer to our soft state
3967 * @ch: channel for this device
3968 * @tgt: ID of this device
3969 * @buf_dma_handle: DMA address of the buffer
3970 *
3971 * Issue the scsi inquiry for the specified device.
3972 */
3973 static int
mega_internal_dev_inquiry(adapter_t * adapter,u8 ch,u8 tgt,dma_addr_t buf_dma_handle)3974 mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
3975 dma_addr_t buf_dma_handle)
3976 {
3977 mega_passthru *pthru;
3978 dma_addr_t pthru_dma_handle;
3979 megacmd_t mc;
3980 int rval;
3981 struct pci_dev *pdev;
3982
3983
3984 /*
3985 * For all internal commands, the buffer must be allocated in <4GB
3986 * address range
3987 */
3988 if( make_local_pdev(adapter, &pdev) != 0 ) return -1;
3989
3990 pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru),
3991 &pthru_dma_handle, GFP_KERNEL);
3992
3993 if( pthru == NULL ) {
3994 free_local_pdev(pdev);
3995 return -1;
3996 }
3997
3998 pthru->timeout = 2;
3999 pthru->ars = 1;
4000 pthru->reqsenselen = 14;
4001 pthru->islogical = 0;
4002
4003 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch;
4004
4005 pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt;
4006
4007 pthru->cdblen = 6;
4008
4009 pthru->cdb[0] = INQUIRY;
4010 pthru->cdb[1] = 0;
4011 pthru->cdb[2] = 0;
4012 pthru->cdb[3] = 0;
4013 pthru->cdb[4] = 255;
4014 pthru->cdb[5] = 0;
4015
4016
4017 pthru->dataxferaddr = (u32)buf_dma_handle;
4018 pthru->dataxferlen = 256;
4019
4020 memset(&mc, 0, sizeof(megacmd_t));
4021
4022 mc.cmd = MEGA_MBOXCMD_PASSTHRU;
4023 mc.xferaddr = (u32)pthru_dma_handle;
4024
4025 rval = mega_internal_command(adapter, &mc, pthru);
4026
4027 dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru,
4028 pthru_dma_handle);
4029
4030 free_local_pdev(pdev);
4031
4032 return rval;
4033 }
4034 #endif
4035
4036 /**
4037 * mega_internal_command()
4038 * @adapter: pointer to our soft state
4039 * @mc: the mailbox command
4040 * @pthru: Passthru structure for DCDB commands
4041 *
4042 * Issue the internal commands in interrupt mode.
4043 * The last argument is the address of the passthru structure if the command
4044 * to be fired is a passthru command
4045 *
4046 * Note: parameter 'pthru' is null for non-passthru commands.
4047 */
4048 static int
mega_internal_command(adapter_t * adapter,megacmd_t * mc,mega_passthru * pthru)4049 mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4050 {
4051 unsigned long flags;
4052 scb_t *scb;
4053 int rval;
4054
4055 /*
4056 * The internal commands share one command id and hence are
4057 * serialized. This is so because we want to reserve maximum number of
4058 * available command ids for the I/O commands.
4059 */
4060 mutex_lock(&adapter->int_mtx);
4061
4062 scb = &adapter->int_scb;
4063 memset(scb, 0, sizeof(scb_t));
4064
4065 scb->idx = CMDID_INT_CMDS;
4066 scb->state |= SCB_ACTIVE | SCB_PENDQ;
4067
4068 memcpy(scb->raw_mbox, mc, sizeof(megacmd_t));
4069
4070 /*
4071 * Is it a passthru command
4072 */
4073 if (mc->cmd == MEGA_MBOXCMD_PASSTHRU)
4074 scb->pthru = pthru;
4075
4076 spin_lock_irqsave(&adapter->lock, flags);
4077 list_add_tail(&scb->list, &adapter->pending_list);
4078 /*
4079 * Check if the HBA is in quiescent state, e.g., during a
4080 * delete logical drive opertion. If it is, don't run
4081 * the pending_list.
4082 */
4083 if (atomic_read(&adapter->quiescent) == 0)
4084 mega_runpendq(adapter);
4085 spin_unlock_irqrestore(&adapter->lock, flags);
4086
4087 wait_for_completion(&adapter->int_waitq);
4088
4089 mc->status = rval = adapter->int_status;
4090
4091 /*
4092 * Print a debug message for all failed commands. Applications can use
4093 * this information.
4094 */
4095 if (rval && trace_level) {
4096 dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n",
4097 mc->cmd, mc->opcode, mc->subopcode, rval);
4098 }
4099
4100 mutex_unlock(&adapter->int_mtx);
4101 return rval;
4102 }
4103
4104 static const struct scsi_host_template megaraid_template = {
4105 .module = THIS_MODULE,
4106 .name = "MegaRAID",
4107 .proc_name = "megaraid_legacy",
4108 .info = megaraid_info,
4109 .queuecommand = megaraid_queue,
4110 .bios_param = megaraid_biosparam,
4111 .max_sectors = MAX_SECTORS_PER_IO,
4112 .can_queue = MAX_COMMANDS,
4113 .this_id = DEFAULT_INITIATOR_ID,
4114 .sg_tablesize = MAX_SGLIST,
4115 .cmd_per_lun = DEF_CMD_PER_LUN,
4116 .eh_abort_handler = megaraid_abort,
4117 .eh_device_reset_handler = megaraid_reset,
4118 .eh_bus_reset_handler = megaraid_reset,
4119 .eh_host_reset_handler = megaraid_reset,
4120 .no_write_same = 1,
4121 .cmd_size = sizeof(struct megaraid_cmd_priv),
4122 };
4123
4124 static int
megaraid_probe_one(struct pci_dev * pdev,const struct pci_device_id * id)4125 megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4126 {
4127 struct Scsi_Host *host;
4128 adapter_t *adapter;
4129 unsigned long mega_baseport, tbase, flag = 0;
4130 u16 subsysid, subsysvid;
4131 u8 pci_bus, pci_dev_func;
4132 int irq, i, j;
4133 int error = -ENODEV;
4134
4135 if (hba_count >= MAX_CONTROLLERS)
4136 goto out;
4137
4138 if (pci_enable_device(pdev))
4139 goto out;
4140 pci_set_master(pdev);
4141
4142 pci_bus = pdev->bus->number;
4143 pci_dev_func = pdev->devfn;
4144
4145 /*
4146 * The megaraid3 stuff reports the ID of the Intel part which is not
4147 * remotely specific to the megaraid
4148 */
4149 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
4150 u16 magic;
4151 /*
4152 * Don't fall over the Compaq management cards using the same
4153 * PCI identifier
4154 */
4155 if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
4156 pdev->subsystem_device == 0xC000)
4157 goto out_disable_device;
4158 /* Now check the magic signature byte */
4159 pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
4160 if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
4161 goto out_disable_device;
4162 /* Ok it is probably a megaraid */
4163 }
4164
4165 /*
4166 * For these vendor and device ids, signature offsets are not
4167 * valid and 64 bit is implicit
4168 */
4169 if (id->driver_data & BOARD_64BIT)
4170 flag |= BOARD_64BIT;
4171 else {
4172 u32 magic64;
4173
4174 pci_read_config_dword(pdev, PCI_CONF_AMISIG64, &magic64);
4175 if (magic64 == HBA_SIGNATURE_64BIT)
4176 flag |= BOARD_64BIT;
4177 }
4178
4179 subsysvid = pdev->subsystem_vendor;
4180 subsysid = pdev->subsystem_device;
4181
4182 dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n",
4183 id->vendor, id->device);
4184
4185 /* Read the base port and IRQ from PCI */
4186 mega_baseport = pci_resource_start(pdev, 0);
4187 irq = pdev->irq;
4188
4189 tbase = mega_baseport;
4190 if (pci_resource_flags(pdev, 0) & IORESOURCE_MEM) {
4191 flag |= BOARD_MEMMAP;
4192
4193 if (!request_mem_region(mega_baseport, 128, "megaraid")) {
4194 dev_warn(&pdev->dev, "mem region busy!\n");
4195 goto out_disable_device;
4196 }
4197
4198 mega_baseport = (unsigned long)ioremap(mega_baseport, 128);
4199 if (!mega_baseport) {
4200 dev_warn(&pdev->dev, "could not map hba memory\n");
4201 goto out_release_region;
4202 }
4203 } else {
4204 flag |= BOARD_IOMAP;
4205 mega_baseport += 0x10;
4206
4207 if (!request_region(mega_baseport, 16, "megaraid"))
4208 goto out_disable_device;
4209 }
4210
4211 /* Initialize SCSI Host structure */
4212 host = scsi_host_alloc(&megaraid_template, sizeof(adapter_t));
4213 if (!host)
4214 goto out_iounmap;
4215
4216 adapter = (adapter_t *)host->hostdata;
4217 memset(adapter, 0, sizeof(adapter_t));
4218
4219 dev_notice(&pdev->dev,
4220 "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n",
4221 host->host_no, mega_baseport, irq);
4222
4223 adapter->base = mega_baseport;
4224 if (flag & BOARD_MEMMAP)
4225 adapter->mmio_base = (void __iomem *) mega_baseport;
4226
4227 INIT_LIST_HEAD(&adapter->free_list);
4228 INIT_LIST_HEAD(&adapter->pending_list);
4229 INIT_LIST_HEAD(&adapter->completed_list);
4230
4231 adapter->flag = flag;
4232 spin_lock_init(&adapter->lock);
4233
4234 host->cmd_per_lun = max_cmd_per_lun;
4235 host->max_sectors = max_sectors_per_io;
4236
4237 adapter->dev = pdev;
4238 adapter->host = host;
4239
4240 adapter->host->irq = irq;
4241
4242 if (flag & BOARD_MEMMAP)
4243 adapter->host->base = tbase;
4244 else {
4245 adapter->host->io_port = tbase;
4246 adapter->host->n_io_port = 16;
4247 }
4248
4249 adapter->host->unique_id = (pci_bus << 8) | pci_dev_func;
4250
4251 /*
4252 * Allocate buffer to issue internal commands.
4253 */
4254 adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev,
4255 MEGA_BUFFER_SIZE,
4256 &adapter->buf_dma_handle,
4257 GFP_KERNEL);
4258 if (!adapter->mega_buffer) {
4259 dev_warn(&pdev->dev, "out of RAM\n");
4260 goto out_host_put;
4261 }
4262
4263 adapter->scb_list = kmalloc_array(MAX_COMMANDS, sizeof(scb_t),
4264 GFP_KERNEL);
4265 if (!adapter->scb_list) {
4266 dev_warn(&pdev->dev, "out of RAM\n");
4267 goto out_free_cmd_buffer;
4268 }
4269
4270 if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ?
4271 megaraid_isr_memmapped : megaraid_isr_iomapped,
4272 IRQF_SHARED, "megaraid", adapter)) {
4273 dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq);
4274 goto out_free_scb_list;
4275 }
4276
4277 if (mega_setup_mailbox(adapter))
4278 goto out_free_irq;
4279
4280 if (mega_query_adapter(adapter))
4281 goto out_free_mbox;
4282
4283 /*
4284 * Have checks for some buggy f/w
4285 */
4286 if ((subsysid == 0x1111) && (subsysvid == 0x1111)) {
4287 /*
4288 * Which firmware
4289 */
4290 if (!strcmp(adapter->fw_version, "3.00") ||
4291 !strcmp(adapter->fw_version, "3.01")) {
4292
4293 dev_warn(&pdev->dev,
4294 "Your card is a Dell PERC "
4295 "2/SC RAID controller with "
4296 "firmware\nmegaraid: 3.00 or 3.01. "
4297 "This driver is known to have "
4298 "corruption issues\nmegaraid: with "
4299 "those firmware versions on this "
4300 "specific card. In order\nmegaraid: "
4301 "to protect your data, please upgrade "
4302 "your firmware to version\nmegaraid: "
4303 "3.10 or later, available from the "
4304 "Dell Technical Support web\n"
4305 "megaraid: site at\nhttp://support."
4306 "dell.com/us/en/filelib/download/"
4307 "index.asp?fileid=2940\n"
4308 );
4309 }
4310 }
4311
4312 /*
4313 * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with
4314 * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit
4315 * support, since this firmware cannot handle 64 bit
4316 * addressing
4317 */
4318 if ((subsysvid == PCI_VENDOR_ID_HP) &&
4319 ((subsysid == 0x60E7) || (subsysid == 0x60E8))) {
4320 /*
4321 * which firmware
4322 */
4323 if (!strcmp(adapter->fw_version, "H01.07") ||
4324 !strcmp(adapter->fw_version, "H01.08") ||
4325 !strcmp(adapter->fw_version, "H01.09") ) {
4326 dev_warn(&pdev->dev,
4327 "Firmware H.01.07, "
4328 "H.01.08, and H.01.09 on 1M/2M "
4329 "controllers\n"
4330 "do not support 64 bit "
4331 "addressing.\nDISABLING "
4332 "64 bit support.\n");
4333 adapter->flag &= ~BOARD_64BIT;
4334 }
4335 }
4336
4337 if (mega_is_bios_enabled(adapter))
4338 mega_hbas[hba_count].is_bios_enabled = 1;
4339 mega_hbas[hba_count].hostdata_addr = adapter;
4340
4341 /*
4342 * Find out which channel is raid and which is scsi. This is
4343 * for ROMB support.
4344 */
4345 mega_enum_raid_scsi(adapter);
4346
4347 /*
4348 * Find out if a logical drive is set as the boot drive. If
4349 * there is one, will make that as the first logical drive.
4350 * ROMB: Do we have to boot from a physical drive. Then all
4351 * the physical drives would appear before the logical disks.
4352 * Else, all the physical drives would be exported to the mid
4353 * layer after logical drives.
4354 */
4355 mega_get_boot_drv(adapter);
4356
4357 if (adapter->boot_pdrv_enabled) {
4358 j = adapter->product_info.nchannels;
4359 for( i = 0; i < j; i++ )
4360 adapter->logdrv_chan[i] = 0;
4361 for( i = j; i < NVIRT_CHAN + j; i++ )
4362 adapter->logdrv_chan[i] = 1;
4363 } else {
4364 for (i = 0; i < NVIRT_CHAN; i++)
4365 adapter->logdrv_chan[i] = 1;
4366 for (i = NVIRT_CHAN; i < MAX_CHANNELS+NVIRT_CHAN; i++)
4367 adapter->logdrv_chan[i] = 0;
4368 adapter->mega_ch_class <<= NVIRT_CHAN;
4369 }
4370
4371 /*
4372 * Do we support random deletion and addition of logical
4373 * drives
4374 */
4375 adapter->read_ldidmap = 0; /* set it after first logdrv
4376 delete cmd */
4377 adapter->support_random_del = mega_support_random_del(adapter);
4378
4379 /* Initialize SCBs */
4380 if (mega_init_scb(adapter))
4381 goto out_free_mbox;
4382
4383 /*
4384 * Reset the pending commands counter
4385 */
4386 atomic_set(&adapter->pend_cmds, 0);
4387
4388 /*
4389 * Reset the adapter quiescent flag
4390 */
4391 atomic_set(&adapter->quiescent, 0);
4392
4393 hba_soft_state[hba_count] = adapter;
4394
4395 /*
4396 * Fill in the structure which needs to be passed back to the
4397 * application when it does an ioctl() for controller related
4398 * information.
4399 */
4400 i = hba_count;
4401
4402 mcontroller[i].base = mega_baseport;
4403 mcontroller[i].irq = irq;
4404 mcontroller[i].numldrv = adapter->numldrv;
4405 mcontroller[i].pcibus = pci_bus;
4406 mcontroller[i].pcidev = id->device;
4407 mcontroller[i].pcifun = PCI_FUNC (pci_dev_func);
4408 mcontroller[i].pciid = -1;
4409 mcontroller[i].pcivendor = id->vendor;
4410 mcontroller[i].pcislot = PCI_SLOT(pci_dev_func);
4411 mcontroller[i].uid = (pci_bus << 8) | pci_dev_func;
4412
4413
4414 /* Set the Mode of addressing to 64 bit if we can */
4415 if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) {
4416 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4417 adapter->has_64bit_addr = 1;
4418 } else {
4419 dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4420 adapter->has_64bit_addr = 0;
4421 }
4422
4423 mutex_init(&adapter->int_mtx);
4424 init_completion(&adapter->int_waitq);
4425
4426 adapter->this_id = DEFAULT_INITIATOR_ID;
4427 adapter->host->this_id = DEFAULT_INITIATOR_ID;
4428
4429 #if MEGA_HAVE_CLUSTERING
4430 /*
4431 * Is cluster support enabled on this controller
4432 * Note: In a cluster the HBAs ( the initiators ) will have
4433 * different target IDs and we cannot assume it to be 7. Call
4434 * to mega_support_cluster() will get the target ids also if
4435 * the cluster support is available
4436 */
4437 adapter->has_cluster = mega_support_cluster(adapter);
4438 if (adapter->has_cluster) {
4439 dev_notice(&pdev->dev,
4440 "Cluster driver, initiator id:%d\n",
4441 adapter->this_id);
4442 }
4443 #endif
4444
4445 pci_set_drvdata(pdev, host);
4446
4447 mega_create_proc_entry(hba_count, mega_proc_dir_entry);
4448
4449 error = scsi_add_host(host, &pdev->dev);
4450 if (error)
4451 goto out_free_mbox;
4452
4453 scsi_scan_host(host);
4454 hba_count++;
4455 return 0;
4456
4457 out_free_mbox:
4458 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
4459 adapter->una_mbox64, adapter->una_mbox64_dma);
4460 out_free_irq:
4461 free_irq(adapter->host->irq, adapter);
4462 out_free_scb_list:
4463 kfree(adapter->scb_list);
4464 out_free_cmd_buffer:
4465 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
4466 adapter->mega_buffer, adapter->buf_dma_handle);
4467 out_host_put:
4468 scsi_host_put(host);
4469 out_iounmap:
4470 if (flag & BOARD_MEMMAP)
4471 iounmap((void *)mega_baseport);
4472 out_release_region:
4473 if (flag & BOARD_MEMMAP)
4474 release_mem_region(tbase, 128);
4475 else
4476 release_region(mega_baseport, 16);
4477 out_disable_device:
4478 pci_disable_device(pdev);
4479 out:
4480 return error;
4481 }
4482
4483 static void
__megaraid_shutdown(adapter_t * adapter)4484 __megaraid_shutdown(adapter_t *adapter)
4485 {
4486 u_char raw_mbox[sizeof(struct mbox_out)];
4487 mbox_t *mbox = (mbox_t *)raw_mbox;
4488 int i;
4489
4490 /* Flush adapter cache */
4491 memset(&mbox->m_out, 0, sizeof(raw_mbox));
4492 raw_mbox[0] = FLUSH_ADAPTER;
4493
4494 free_irq(adapter->host->irq, adapter);
4495
4496 /* Issue a blocking (interrupts disabled) command to the card */
4497 issue_scb_block(adapter, raw_mbox);
4498
4499 /* Flush disks cache */
4500 memset(&mbox->m_out, 0, sizeof(raw_mbox));
4501 raw_mbox[0] = FLUSH_SYSTEM;
4502
4503 /* Issue a blocking (interrupts disabled) command to the card */
4504 issue_scb_block(adapter, raw_mbox);
4505
4506 if (atomic_read(&adapter->pend_cmds) > 0)
4507 dev_warn(&adapter->dev->dev, "pending commands!!\n");
4508
4509 /*
4510 * Have a delibrate delay to make sure all the caches are
4511 * actually flushed.
4512 */
4513 for (i = 0; i <= 10; i++)
4514 mdelay(1000);
4515 }
4516
4517 static void
megaraid_remove_one(struct pci_dev * pdev)4518 megaraid_remove_one(struct pci_dev *pdev)
4519 {
4520 struct Scsi_Host *host = pci_get_drvdata(pdev);
4521 adapter_t *adapter = (adapter_t *)host->hostdata;
4522 char buf[12] = { 0 };
4523
4524 scsi_remove_host(host);
4525
4526 __megaraid_shutdown(adapter);
4527
4528 /* Free our resources */
4529 if (adapter->flag & BOARD_MEMMAP) {
4530 iounmap((void *)adapter->base);
4531 release_mem_region(adapter->host->base, 128);
4532 } else
4533 release_region(adapter->base, 16);
4534
4535 mega_free_sgl(adapter);
4536
4537 sprintf(buf, "hba%d", adapter->host->host_no);
4538 remove_proc_subtree(buf, mega_proc_dir_entry);
4539
4540 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
4541 adapter->mega_buffer, adapter->buf_dma_handle);
4542 kfree(adapter->scb_list);
4543 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
4544 adapter->una_mbox64, adapter->una_mbox64_dma);
4545
4546 scsi_host_put(host);
4547 pci_disable_device(pdev);
4548
4549 hba_count--;
4550 }
4551
4552 static void
megaraid_shutdown(struct pci_dev * pdev)4553 megaraid_shutdown(struct pci_dev *pdev)
4554 {
4555 struct Scsi_Host *host = pci_get_drvdata(pdev);
4556 adapter_t *adapter = (adapter_t *)host->hostdata;
4557
4558 __megaraid_shutdown(adapter);
4559 }
4560
4561 static struct pci_device_id megaraid_pci_tbl[] = {
4562 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID,
4563 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4564 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2,
4565 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4566 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3,
4567 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4568 {0,}
4569 };
4570 MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl);
4571
4572 static struct pci_driver megaraid_pci_driver = {
4573 .name = "megaraid_legacy",
4574 .id_table = megaraid_pci_tbl,
4575 .probe = megaraid_probe_one,
4576 .remove = megaraid_remove_one,
4577 .shutdown = megaraid_shutdown,
4578 };
4579
megaraid_init(void)4580 static int __init megaraid_init(void)
4581 {
4582 int error;
4583
4584 if ((max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN))
4585 max_cmd_per_lun = MAX_CMD_PER_LUN;
4586 if (max_mbox_busy_wait > MBOX_BUSY_WAIT)
4587 max_mbox_busy_wait = MBOX_BUSY_WAIT;
4588
4589 #ifdef CONFIG_PROC_FS
4590 mega_proc_dir_entry = proc_mkdir("megaraid", NULL);
4591 if (!mega_proc_dir_entry) {
4592 printk(KERN_WARNING
4593 "megaraid: failed to create megaraid root\n");
4594 }
4595 #endif
4596 error = pci_register_driver(&megaraid_pci_driver);
4597 if (error) {
4598 #ifdef CONFIG_PROC_FS
4599 remove_proc_entry("megaraid", NULL);
4600 #endif
4601 return error;
4602 }
4603
4604 /*
4605 * Register the driver as a character device, for applications
4606 * to access it for ioctls.
4607 * First argument (major) to register_chrdev implies a dynamic
4608 * major number allocation.
4609 */
4610 major = register_chrdev(0, "megadev_legacy", &megadev_fops);
4611 if (major < 0) {
4612 printk(KERN_WARNING
4613 "megaraid: failed to register char device\n");
4614 }
4615
4616 return 0;
4617 }
4618
megaraid_exit(void)4619 static void __exit megaraid_exit(void)
4620 {
4621 /*
4622 * Unregister the character device interface to the driver.
4623 */
4624 unregister_chrdev(major, "megadev_legacy");
4625
4626 pci_unregister_driver(&megaraid_pci_driver);
4627
4628 #ifdef CONFIG_PROC_FS
4629 remove_proc_entry("megaraid", NULL);
4630 #endif
4631 }
4632
4633 module_init(megaraid_init);
4634 module_exit(megaraid_exit);
4635
4636 /* vi: set ts=8 sw=8 tw=78: */
4637