1 /*
2 * Adaptec AIC79xx device driver for Linux.
3 *
4 * Copyright (c) 2000-2001 Adaptec Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm.h#135 $
40 *
41 */
42 #ifndef _AIC79XX_LINUX_H_
43 #define _AIC79XX_LINUX_H_
44
45 #include <linux/types.h>
46 #include <linux/blk.h>
47 #include <linux/blkdev.h>
48 #include <linux/delay.h>
49 #include <linux/ioport.h>
50 #include <linux/pci.h>
51 #include <linux/smp_lock.h>
52 #include <linux/version.h>
53 #include <linux/module.h>
54 #include <asm/byteorder.h>
55 #include <asm/io.h>
56
57 #ifndef KERNEL_VERSION
58 #define KERNEL_VERSION(x,y,z) (((x)<<16)+((y)<<8)+(z))
59 #endif
60
61 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
62 #include <linux/interrupt.h> /* For tasklet support. */
63 #include <linux/config.h>
64 #include <linux/slab.h>
65 #else
66 #include <linux/malloc.h>
67 #endif
68
69 /* Core SCSI definitions */
70 #define AIC_LIB_PREFIX ahd
71 #include "scsi.h"
72 #include "hosts.h"
73
74 /* Name space conflict with BSD queue macros */
75 #ifdef LIST_HEAD
76 #undef LIST_HEAD
77 #endif
78
79 #include "cam.h"
80 #include "queue.h"
81 #include "scsi_message.h"
82 #include "scsi_iu.h"
83 #include "aiclib.h"
84
85 /*********************************** Debugging ********************************/
86 #ifdef CONFIG_AIC79XX_DEBUG_ENABLE
87 #ifdef CONFIG_AIC79XX_DEBUG_MASK
88 #define AHD_DEBUG 1
89 #define AHD_DEBUG_OPTS CONFIG_AIC79XX_DEBUG_MASK
90 #else
91 /*
92 * Compile in debugging code, but do not enable any printfs.
93 */
94 #define AHD_DEBUG 1
95 #define AHD_DEBUG_OPTS 0
96 #endif
97 /* No debugging code. */
98 #endif
99
100 /********************************** Misc Macros *******************************/
101 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
102 #define powerof2(x) ((((x)-1)&(x))==0)
103
104 /************************* Forward Declarations *******************************/
105 struct ahd_softc;
106 typedef struct pci_dev *ahd_dev_softc_t;
107 typedef Scsi_Cmnd *ahd_io_ctx_t;
108
109 /******************************* Byte Order ***********************************/
110 #define ahd_htobe16(x) cpu_to_be16(x)
111 #define ahd_htobe32(x) cpu_to_be32(x)
112 #define ahd_htobe64(x) cpu_to_be64(x)
113 #define ahd_htole16(x) cpu_to_le16(x)
114 #define ahd_htole32(x) cpu_to_le32(x)
115 #define ahd_htole64(x) cpu_to_le64(x)
116
117 #define ahd_be16toh(x) be16_to_cpu(x)
118 #define ahd_be32toh(x) be32_to_cpu(x)
119 #define ahd_be64toh(x) be64_to_cpu(x)
120 #define ahd_le16toh(x) le16_to_cpu(x)
121 #define ahd_le32toh(x) le32_to_cpu(x)
122 #define ahd_le64toh(x) le64_to_cpu(x)
123
124 #ifndef LITTLE_ENDIAN
125 #define LITTLE_ENDIAN 1234
126 #endif
127
128 #ifndef BIG_ENDIAN
129 #define BIG_ENDIAN 4321
130 #endif
131
132 #ifndef BYTE_ORDER
133 #if defined(__BIG_ENDIAN)
134 #define BYTE_ORDER BIG_ENDIAN
135 #endif
136 #if defined(__LITTLE_ENDIAN)
137 #define BYTE_ORDER LITTLE_ENDIAN
138 #endif
139 #endif /* BYTE_ORDER */
140
141 /************************* Configuration Data *********************************/
142 extern uint32_t aic79xx_allow_memio;
143 extern int aic79xx_detect_complete;
144 extern Scsi_Host_Template aic79xx_driver_template;
145
146 /***************************** Bus Space/DMA **********************************/
147
148 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,2,17)
149 typedef dma_addr_t bus_addr_t;
150 #else
151 typedef uint32_t bus_addr_t;
152 #endif
153 typedef uint32_t bus_size_t;
154
155 typedef enum {
156 BUS_SPACE_MEMIO,
157 BUS_SPACE_PIO
158 } bus_space_tag_t;
159
160 typedef union {
161 u_long ioport;
162 volatile uint8_t *maddr;
163 } bus_space_handle_t;
164
165 typedef struct bus_dma_segment
166 {
167 bus_addr_t ds_addr;
168 bus_size_t ds_len;
169 } bus_dma_segment_t;
170
171 struct ahd_linux_dma_tag
172 {
173 bus_size_t alignment;
174 bus_size_t boundary;
175 bus_size_t maxsize;
176 };
177 typedef struct ahd_linux_dma_tag* bus_dma_tag_t;
178
179 struct ahd_linux_dmamap
180 {
181 bus_addr_t bus_addr;
182 };
183 typedef struct ahd_linux_dmamap* bus_dmamap_t;
184
185 typedef int bus_dma_filter_t(void*, bus_addr_t);
186 typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
187
188 #define BUS_DMA_WAITOK 0x0
189 #define BUS_DMA_NOWAIT 0x1
190 #define BUS_DMA_ALLOCNOW 0x2
191 #define BUS_DMA_LOAD_SEGS 0x4 /*
192 * Argument is an S/G list not
193 * a single buffer.
194 */
195
196 #define BUS_SPACE_MAXADDR 0xFFFFFFFF
197 #define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFF
198 #define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFF
199
200 int ahd_dma_tag_create(struct ahd_softc *, bus_dma_tag_t /*parent*/,
201 bus_size_t /*alignment*/, bus_size_t /*boundary*/,
202 bus_addr_t /*lowaddr*/, bus_addr_t /*highaddr*/,
203 bus_dma_filter_t*/*filter*/, void */*filterarg*/,
204 bus_size_t /*maxsize*/, int /*nsegments*/,
205 bus_size_t /*maxsegsz*/, int /*flags*/,
206 bus_dma_tag_t */*dma_tagp*/);
207
208 void ahd_dma_tag_destroy(struct ahd_softc *, bus_dma_tag_t /*tag*/);
209
210 int ahd_dmamem_alloc(struct ahd_softc *, bus_dma_tag_t /*dmat*/,
211 void** /*vaddr*/, int /*flags*/,
212 bus_dmamap_t* /*mapp*/);
213
214 void ahd_dmamem_free(struct ahd_softc *, bus_dma_tag_t /*dmat*/,
215 void* /*vaddr*/, bus_dmamap_t /*map*/);
216
217 void ahd_dmamap_destroy(struct ahd_softc *, bus_dma_tag_t /*tag*/,
218 bus_dmamap_t /*map*/);
219
220 int ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t /*dmat*/,
221 bus_dmamap_t /*map*/, void * /*buf*/,
222 bus_size_t /*buflen*/, bus_dmamap_callback_t *,
223 void */*callback_arg*/, int /*flags*/);
224
225 int ahd_dmamap_unload(struct ahd_softc *, bus_dma_tag_t, bus_dmamap_t);
226
227 /*
228 * Operations performed by ahd_dmamap_sync().
229 */
230 #define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */
231 #define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */
232 #define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */
233 #define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */
234
235 /*
236 * XXX
237 * ahd_dmamap_sync is only used on buffers allocated with
238 * the pci_alloc_consistent() API. Although I'm not sure how
239 * this works on architectures with a write buffer, Linux does
240 * not have an API to sync "coherent" memory. Perhaps we need
241 * to do an mb()?
242 */
243 #define ahd_dmamap_sync(ahd, dma_tag, dmamap, offset, len, op)
244
245 /************************** Timer DataStructures ******************************/
246 typedef struct timer_list ahd_timer_t;
247
248 /********************************** Includes **********************************/
249 #ifdef CONFIG_AIC79XX_REG_PRETTY_PRINT
250 #define AIC_DEBUG_REGISTERS 1
251 #else
252 #define AIC_DEBUG_REGISTERS 0
253 #endif
254 #include "aic79xx.h"
255
256 /***************************** Timer Facilities *******************************/
257 #define ahd_timer_init init_timer
258 #define ahd_timer_stop del_timer_sync
259 typedef void ahd_linux_callback_t (u_long);
260 static __inline void ahd_timer_reset(ahd_timer_t *timer, u_int usec,
261 ahd_callback_t *func, void *arg);
262 static __inline void ahd_scb_timer_reset(struct scb *scb, u_int usec);
263
264 static __inline void
ahd_timer_reset(ahd_timer_t * timer,u_int usec,ahd_callback_t * func,void * arg)265 ahd_timer_reset(ahd_timer_t *timer, u_int usec, ahd_callback_t *func, void *arg)
266 {
267 struct ahd_softc *ahd;
268
269 ahd = (struct ahd_softc *)arg;
270 del_timer(timer);
271 timer->data = (u_long)arg;
272 timer->expires = jiffies + (usec * HZ)/1000000;
273 timer->function = (ahd_linux_callback_t*)func;
274 add_timer(timer);
275 }
276
277 static __inline void
ahd_scb_timer_reset(struct scb * scb,u_int usec)278 ahd_scb_timer_reset(struct scb *scb, u_int usec)
279 {
280 mod_timer(&scb->io_ctx->eh_timeout, jiffies + (usec * HZ)/1000000);
281 }
282
283 /***************************** SMP support ************************************/
284 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,17)
285 #include <linux/spinlock.h>
286 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,93)
287 #include <linux/smp.h>
288 #endif
289
290 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) || defined(SCSI_HAS_HOST_LOCK))
291 #define AHD_SCSI_HAS_HOST_LOCK 1
292 #else
293 #define AHD_SCSI_HAS_HOST_LOCK 0
294 #endif
295
296 #define AIC79XX_DRIVER_VERSION "1.3.10"
297
298 /**************************** Front End Queues ********************************/
299 /*
300 * Data structure used to cast the Linux struct scsi_cmnd to something
301 * that allows us to use the queue macros. The linux structure has
302 * plenty of space to hold the links fields as required by the queue
303 * macros, but the queue macors require them to have the correct type.
304 */
305 struct ahd_cmd_internal {
306 /* Area owned by the Linux scsi layer. */
307 uint8_t private[offsetof(struct scsi_cmnd, SCp.Status)];
308 union {
309 STAILQ_ENTRY(ahd_cmd) ste;
310 LIST_ENTRY(ahd_cmd) le;
311 TAILQ_ENTRY(ahd_cmd) tqe;
312 } links;
313 uint32_t end;
314 };
315
316 struct ahd_cmd {
317 union {
318 struct ahd_cmd_internal icmd;
319 struct scsi_cmnd scsi_cmd;
320 } un;
321 };
322
323 #define acmd_icmd(cmd) ((cmd)->un.icmd)
324 #define acmd_scsi_cmd(cmd) ((cmd)->un.scsi_cmd)
325 #define acmd_links un.icmd.links
326
327 /*************************** Device Data Structures ***************************/
328 /*
329 * A per probed device structure used to deal with some error recovery
330 * scenarios that the Linux mid-layer code just doesn't know how to
331 * handle. The structure allocated for a device only becomes persistent
332 * after a successfully completed inquiry command to the target when
333 * that inquiry data indicates a lun is present.
334 */
335 TAILQ_HEAD(ahd_busyq, ahd_cmd);
336 typedef enum {
337 AHD_DEV_UNCONFIGURED = 0x01,
338 AHD_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */
339 AHD_DEV_TIMER_ACTIVE = 0x04, /* Our timer is active */
340 AHD_DEV_ON_RUN_LIST = 0x08, /* Queued to be run later */
341 AHD_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */
342 AHD_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */
343 AHD_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */
344 AHD_DEV_SLAVE_CONFIGURED = 0x80 /* slave_configure() has been called */
345 } ahd_linux_dev_flags;
346
347 struct ahd_linux_target;
348 struct ahd_linux_device {
349 TAILQ_ENTRY(ahd_linux_device) links;
350 struct ahd_busyq busyq;
351
352 /*
353 * The number of transactions currently
354 * queued to the device.
355 */
356 int active;
357
358 /*
359 * The currently allowed number of
360 * transactions that can be queued to
361 * the device. Must be signed for
362 * conversion from tagged to untagged
363 * mode where the device may have more
364 * than one outstanding active transaction.
365 */
366 int openings;
367
368 /*
369 * A positive count indicates that this
370 * device's queue is halted.
371 */
372 u_int qfrozen;
373
374 /*
375 * Cumulative command counter.
376 */
377 u_long commands_issued;
378
379 /*
380 * The number of tagged transactions when
381 * running at our current opening level
382 * that have been successfully received by
383 * this device since the last QUEUE FULL.
384 */
385 u_int tag_success_count;
386 #define AHD_TAG_SUCCESS_INTERVAL 50
387
388 ahd_linux_dev_flags flags;
389
390 /*
391 * Per device timer.
392 */
393 struct timer_list timer;
394
395 /*
396 * The high limit for the tags variable.
397 */
398 u_int maxtags;
399
400 /*
401 * The computed number of tags outstanding
402 * at the time of the last QUEUE FULL event.
403 */
404 u_int tags_on_last_queuefull;
405
406 /*
407 * How many times we have seen a queue full
408 * with the same number of tags. This is used
409 * to stop our adaptive queue depth algorithm
410 * on devices with a fixed number of tags.
411 */
412 u_int last_queuefull_same_count;
413 #define AHD_LOCK_TAGS_COUNT 50
414
415 /*
416 * How many transactions have been queued
417 * without the device going idle. We use
418 * this statistic to determine when to issue
419 * an ordered tag to prevent transaction
420 * starvation. This statistic is only updated
421 * if the AHD_DEV_PERIODIC_OTAG flag is set
422 * on this device.
423 */
424 u_int commands_since_idle_or_otag;
425 #define AHD_OTAG_THRESH 500
426
427 int lun;
428 Scsi_Device *scsi_device;
429 struct ahd_linux_target *target;
430 };
431
432 typedef enum {
433 AHD_DV_REQUIRED = 0x01,
434 AHD_INQ_VALID = 0x02,
435 AHD_BASIC_DV = 0x04,
436 AHD_ENHANCED_DV = 0x08
437 } ahd_linux_targ_flags;
438
439 /* DV States */
440 typedef enum {
441 AHD_DV_STATE_EXIT = 0,
442 AHD_DV_STATE_INQ_SHORT_ASYNC,
443 AHD_DV_STATE_INQ_ASYNC,
444 AHD_DV_STATE_INQ_ASYNC_VERIFY,
445 AHD_DV_STATE_TUR,
446 AHD_DV_STATE_REBD,
447 AHD_DV_STATE_INQ_VERIFY,
448 AHD_DV_STATE_WEB,
449 AHD_DV_STATE_REB,
450 AHD_DV_STATE_SU,
451 AHD_DV_STATE_BUSY
452 } ahd_dv_state;
453
454 struct ahd_linux_target {
455 struct ahd_linux_device *devices[AHD_NUM_LUNS];
456 int channel;
457 int target;
458 int refcount;
459 struct ahd_transinfo last_tinfo;
460 struct ahd_softc *ahd;
461 ahd_linux_targ_flags flags;
462 struct scsi_inquiry_data *inq_data;
463 /*
464 * The next "fallback" period to use for narrow/wide transfers.
465 */
466 uint8_t dv_next_narrow_period;
467 uint8_t dv_next_wide_period;
468 uint8_t dv_max_width;
469 uint8_t dv_max_ppr_options;
470 uint8_t dv_last_ppr_options;
471 u_int dv_echo_size;
472 ahd_dv_state dv_state;
473 u_int dv_state_retry;
474 uint8_t *dv_buffer;
475 uint8_t *dv_buffer1;
476
477 /*
478 * Cumulative counter of errors.
479 */
480 u_long errors_detected;
481 u_long cmds_since_error;
482 };
483
484 /********************* Definitions Required by the Core ***********************/
485 /*
486 * Number of SG segments we require. So long as the S/G segments for
487 * a particular transaction are allocated in a physically contiguous
488 * manner and are allocated below 4GB, the number of S/G segments is
489 * unrestricted.
490 */
491 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
492 /*
493 * We dynamically adjust the number of segments in pre-2.5 kernels to
494 * avoid fragmentation issues in the SCSI mid-layer's private memory
495 * allocator. See aic79xx_osm.c ahd_linux_size_nseg() for details.
496 */
497 extern u_int ahd_linux_nseg;
498 #define AHD_NSEG ahd_linux_nseg
499 #define AHD_LINUX_MIN_NSEG 64
500 #else
501 #define AHD_NSEG 128
502 #endif
503
504 /*
505 * Per-SCB OSM storage.
506 */
507 typedef enum {
508 AHD_SCB_UP_EH_SEM = 0x1
509 } ahd_linux_scb_flags;
510
511 struct scb_platform_data {
512 struct ahd_linux_device *dev;
513 bus_addr_t buf_busaddr;
514 uint32_t xfer_len;
515 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
516 uint32_t resid; /* Transfer residual */
517 #endif
518 uint32_t sense_resid; /* Auto-Sense residual */
519 ahd_linux_scb_flags flags;
520 };
521
522 /*
523 * Define a structure used for each host adapter. All members are
524 * aligned on a boundary >= the size of the member to honor the
525 * alignment restrictions of the various platforms supported by
526 * this driver.
527 */
528 typedef enum {
529 AHD_DV_WAIT_SIMQ_EMPTY = 0x01,
530 AHD_DV_WAIT_SIMQ_RELEASE = 0x02,
531 AHD_DV_ACTIVE = 0x04,
532 AHD_DV_SHUTDOWN = 0x08,
533 AHD_RUN_CMPLT_Q_TIMER = 0x10
534 } ahd_linux_softc_flags;
535
536 TAILQ_HEAD(ahd_completeq, ahd_cmd);
537
538 struct ahd_platform_data {
539 /*
540 * Fields accessed from interrupt context.
541 */
542 struct ahd_linux_target *targets[AHD_NUM_TARGETS];
543 TAILQ_HEAD(, ahd_linux_device) device_runq;
544 struct ahd_completeq completeq;
545
546 spinlock_t spin_lock;
547 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
548 struct tasklet_struct runq_tasklet;
549 #endif
550 u_int qfrozen;
551 pid_t dv_pid;
552 struct timer_list completeq_timer;
553 struct timer_list reset_timer;
554 struct timer_list stats_timer;
555 struct semaphore eh_sem;
556 struct semaphore dv_sem;
557 struct semaphore dv_cmd_sem; /* XXX This needs to be in
558 * the target struct
559 */
560 struct scsi_device *dv_scsi_dev;
561 struct Scsi_Host *host; /* pointer to scsi host */
562 #define AHD_LINUX_NOIRQ ((uint32_t)~0)
563 uint32_t irq; /* IRQ for this adapter */
564 uint32_t bios_address;
565 uint32_t mem_busaddr; /* Mem Base Addr */
566 bus_addr_t hw_dma_mask;
567 ahd_linux_softc_flags flags;
568 };
569
570 /************************** OS Utility Wrappers *******************************/
571 #define printf printk
572 #define M_NOWAIT GFP_ATOMIC
573 #define M_WAITOK 0
574 #define malloc(size, type, flags) kmalloc(size, flags)
575 #define free(ptr, type) kfree(ptr)
576
577 static __inline void ahd_delay(long);
578 static __inline void
ahd_delay(long usec)579 ahd_delay(long usec)
580 {
581 /*
582 * udelay on Linux can have problems for
583 * multi-millisecond waits. Wait at most
584 * 1024us per call.
585 */
586 while (usec > 0) {
587 udelay(usec % 1024);
588 usec -= 1024;
589 }
590 }
591
592
593 /***************************** Low Level I/O **********************************/
594 #if defined(__powerpc__) || defined(__i386__) || defined(__ia64__)
595 #define MMAPIO
596 #endif
597
598 static __inline uint8_t ahd_inb(struct ahd_softc * ahd, long port);
599 static __inline uint16_t ahd_inw_atomic(struct ahd_softc * ahd, long port);
600 static __inline void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
601 static __inline void ahd_outw_atomic(struct ahd_softc * ahd,
602 long port, uint16_t val);
603 static __inline void ahd_outsb(struct ahd_softc * ahd, long port,
604 uint8_t *, int count);
605 static __inline void ahd_insb(struct ahd_softc * ahd, long port,
606 uint8_t *, int count);
607
608 static __inline uint8_t
ahd_inb(struct ahd_softc * ahd,long port)609 ahd_inb(struct ahd_softc * ahd, long port)
610 {
611 uint8_t x;
612 #ifdef MMAPIO
613
614 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
615 x = readb(ahd->bshs[0].maddr + port);
616 } else {
617 x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
618 }
619 #else
620 x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
621 #endif
622 mb();
623 return (x);
624 }
625
626 static __inline uint16_t
ahd_inw_atomic(struct ahd_softc * ahd,long port)627 ahd_inw_atomic(struct ahd_softc * ahd, long port)
628 {
629 uint8_t x;
630 #ifdef MMAPIO
631
632 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
633 x = readw(ahd->bshs[0].maddr + port);
634 } else {
635 x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
636 }
637 #else
638 x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
639 #endif
640 mb();
641 return (x);
642 }
643
644 static __inline void
ahd_outb(struct ahd_softc * ahd,long port,uint8_t val)645 ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
646 {
647 #ifdef MMAPIO
648 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
649 writeb(val, ahd->bshs[0].maddr + port);
650 } else {
651 outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
652 }
653 #else
654 outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
655 #endif
656 mb();
657 }
658
659 static __inline void
ahd_outw_atomic(struct ahd_softc * ahd,long port,uint16_t val)660 ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
661 {
662 #ifdef MMAPIO
663 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
664 writew(val, ahd->bshs[0].maddr + port);
665 } else {
666 outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
667 }
668 #else
669 outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
670 #endif
671 mb();
672 }
673
674 static __inline void
ahd_outsb(struct ahd_softc * ahd,long port,uint8_t * array,int count)675 ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
676 {
677 int i;
678
679 /*
680 * There is probably a more efficient way to do this on Linux
681 * but we don't use this for anything speed critical and this
682 * should work.
683 */
684 for (i = 0; i < count; i++)
685 ahd_outb(ahd, port, *array++);
686 }
687
688 static __inline void
ahd_insb(struct ahd_softc * ahd,long port,uint8_t * array,int count)689 ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
690 {
691 int i;
692
693 /*
694 * There is probably a more efficient way to do this on Linux
695 * but we don't use this for anything speed critical and this
696 * should work.
697 */
698 for (i = 0; i < count; i++)
699 *array++ = ahd_inb(ahd, port);
700 }
701
702 /**************************** Initialization **********************************/
703 int ahd_linux_register_host(struct ahd_softc *,
704 Scsi_Host_Template *);
705
706 uint64_t ahd_linux_get_memsize(void);
707
708 /*************************** Pretty Printing **********************************/
709 struct info_str {
710 char *buffer;
711 int length;
712 off_t offset;
713 int pos;
714 };
715
716 void ahd_format_transinfo(struct info_str *info,
717 struct ahd_transinfo *tinfo);
718
719 /******************************** Locking *************************************/
720 /* Lock protecting internal data structures */
721 static __inline void ahd_lockinit(struct ahd_softc *);
722 static __inline void ahd_lock(struct ahd_softc *, unsigned long *flags);
723 static __inline void ahd_unlock(struct ahd_softc *, unsigned long *flags);
724
725 /* Lock acquisition and release of the above lock in midlayer entry points. */
726 static __inline void ahd_midlayer_entrypoint_lock(struct ahd_softc *,
727 unsigned long *flags);
728 static __inline void ahd_midlayer_entrypoint_unlock(struct ahd_softc *,
729 unsigned long *flags);
730
731 /* Lock held during command compeletion to the upper layer */
732 static __inline void ahd_done_lockinit(struct ahd_softc *);
733 static __inline void ahd_done_lock(struct ahd_softc *, unsigned long *flags);
734 static __inline void ahd_done_unlock(struct ahd_softc *, unsigned long *flags);
735
736 /* Lock held during ahd_list manipulation and ahd softc frees */
737 extern spinlock_t ahd_list_spinlock;
738 static __inline void ahd_list_lockinit(void);
739 static __inline void ahd_list_lock(unsigned long *flags);
740 static __inline void ahd_list_unlock(unsigned long *flags);
741
742 static __inline void
ahd_lockinit(struct ahd_softc * ahd)743 ahd_lockinit(struct ahd_softc *ahd)
744 {
745 spin_lock_init(&ahd->platform_data->spin_lock);
746 }
747
748 static __inline void
ahd_lock(struct ahd_softc * ahd,unsigned long * flags)749 ahd_lock(struct ahd_softc *ahd, unsigned long *flags)
750 {
751 spin_lock_irqsave(&ahd->platform_data->spin_lock, *flags);
752 }
753
754 static __inline void
ahd_unlock(struct ahd_softc * ahd,unsigned long * flags)755 ahd_unlock(struct ahd_softc *ahd, unsigned long *flags)
756 {
757 spin_unlock_irqrestore(&ahd->platform_data->spin_lock, *flags);
758 }
759
760 static __inline void
ahd_midlayer_entrypoint_lock(struct ahd_softc * ahd,unsigned long * flags)761 ahd_midlayer_entrypoint_lock(struct ahd_softc *ahd, unsigned long *flags)
762 {
763 /*
764 * In 2.5.X and some 2.4.X versions, the midlayer takes our
765 * lock just before calling us, so we avoid locking again.
766 * For other kernel versions, the io_request_lock is taken
767 * just before our entry point is called. In this case, we
768 * trade the io_request_lock for our per-softc lock.
769 */
770 #if AHD_SCSI_HAS_HOST_LOCK == 0
771 spin_unlock(&io_request_lock);
772 spin_lock(&ahd->platform_data->spin_lock);
773 #endif
774 }
775
776 static __inline void
ahd_midlayer_entrypoint_unlock(struct ahd_softc * ahd,unsigned long * flags)777 ahd_midlayer_entrypoint_unlock(struct ahd_softc *ahd, unsigned long *flags)
778 {
779 #if AHD_SCSI_HAS_HOST_LOCK == 0
780 spin_unlock(&ahd->platform_data->spin_lock);
781 spin_lock(&io_request_lock);
782 #endif
783 }
784
785 static __inline void
ahd_done_lockinit(struct ahd_softc * ahd)786 ahd_done_lockinit(struct ahd_softc *ahd)
787 {
788 /*
789 * In 2.5.X, our own lock is held during completions.
790 * In previous versions, the io_request_lock is used.
791 * In either case, we can't initialize this lock again.
792 */
793 }
794
795 static __inline void
ahd_done_lock(struct ahd_softc * ahd,unsigned long * flags)796 ahd_done_lock(struct ahd_softc *ahd, unsigned long *flags)
797 {
798 #if AHD_SCSI_HAS_HOST_LOCK == 0
799 spin_lock(&io_request_lock);
800 #endif
801 }
802
803 static __inline void
ahd_done_unlock(struct ahd_softc * ahd,unsigned long * flags)804 ahd_done_unlock(struct ahd_softc *ahd, unsigned long *flags)
805 {
806 #if AHD_SCSI_HAS_HOST_LOCK == 0
807 spin_unlock(&io_request_lock);
808 #endif
809 }
810
811 static __inline void
ahd_list_lockinit()812 ahd_list_lockinit()
813 {
814 spin_lock_init(&ahd_list_spinlock);
815 }
816
817 static __inline void
ahd_list_lock(unsigned long * flags)818 ahd_list_lock(unsigned long *flags)
819 {
820 spin_lock_irqsave(&ahd_list_spinlock, *flags);
821 }
822
823 static __inline void
ahd_list_unlock(unsigned long * flags)824 ahd_list_unlock(unsigned long *flags)
825 {
826 spin_unlock_irqrestore(&ahd_list_spinlock, *flags);
827 }
828
829 /******************************* PCI Definitions ******************************/
830 /*
831 * PCIM_xxx: mask to locate subfield in register
832 * PCIR_xxx: config register offset
833 * PCIC_xxx: device class
834 * PCIS_xxx: device subclass
835 * PCIP_xxx: device programming interface
836 * PCIV_xxx: PCI vendor ID (only required to fixup ancient devices)
837 * PCID_xxx: device ID
838 */
839 #define PCIR_DEVVENDOR 0x00
840 #define PCIR_VENDOR 0x00
841 #define PCIR_DEVICE 0x02
842 #define PCIR_COMMAND 0x04
843 #define PCIM_CMD_PORTEN 0x0001
844 #define PCIM_CMD_MEMEN 0x0002
845 #define PCIM_CMD_BUSMASTEREN 0x0004
846 #define PCIM_CMD_MWRICEN 0x0010
847 #define PCIM_CMD_PERRESPEN 0x0040
848 #define PCIM_CMD_SERRESPEN 0x0100
849 #define PCIR_STATUS 0x06
850 #define PCIR_REVID 0x08
851 #define PCIR_PROGIF 0x09
852 #define PCIR_SUBCLASS 0x0a
853 #define PCIR_CLASS 0x0b
854 #define PCIR_CACHELNSZ 0x0c
855 #define PCIR_LATTIMER 0x0d
856 #define PCIR_HEADERTYPE 0x0e
857 #define PCIM_MFDEV 0x80
858 #define PCIR_BIST 0x0f
859 #define PCIR_CAP_PTR 0x34
860
861 /* config registers for header type 0 devices */
862 #define PCIR_MAPS 0x10
863 #define PCIR_SUBVEND_0 0x2c
864 #define PCIR_SUBDEV_0 0x2e
865
866 /****************************** PCI-X definitions *****************************/
867 #define PCIXR_COMMAND 0x96
868 #define PCIXR_DEVADDR 0x98
869 #define PCIXM_DEVADDR_FNUM 0x0003 /* Function Number */
870 #define PCIXM_DEVADDR_DNUM 0x00F8 /* Device Number */
871 #define PCIXM_DEVADDR_BNUM 0xFF00 /* Bus Number */
872 #define PCIXR_STATUS 0x9A
873 #define PCIXM_STATUS_64BIT 0x0001 /* Active 64bit connection to device. */
874 #define PCIXM_STATUS_133CAP 0x0002 /* Device is 133MHz capable */
875 #define PCIXM_STATUS_SCDISC 0x0004 /* Split Completion Discarded */
876 #define PCIXM_STATUS_UNEXPSC 0x0008 /* Unexpected Split Completion */
877 #define PCIXM_STATUS_CMPLEXDEV 0x0010 /* Device Complexity (set == bridge) */
878 #define PCIXM_STATUS_MAXMRDBC 0x0060 /* Maximum Burst Read Count */
879 #define PCIXM_STATUS_MAXSPLITS 0x0380 /* Maximum Split Transactions */
880 #define PCIXM_STATUS_MAXCRDS 0x1C00 /* Maximum Cumulative Read Size */
881 #define PCIXM_STATUS_RCVDSCEM 0x2000 /* Received a Split Comp w/Error msg */
882
883 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
884 extern struct pci_driver aic79xx_pci_driver;
885 #endif
886
887 typedef enum
888 {
889 AHD_POWER_STATE_D0,
890 AHD_POWER_STATE_D1,
891 AHD_POWER_STATE_D2,
892 AHD_POWER_STATE_D3
893 } ahd_power_state;
894
895 void ahd_power_state_change(struct ahd_softc *ahd,
896 ahd_power_state new_state);
897
898 /******************************* PCI Routines *********************************/
899 int ahd_linux_pci_init(void);
900 void ahd_linux_pci_exit(void);
901 int ahd_pci_map_registers(struct ahd_softc *ahd);
902 int ahd_pci_map_int(struct ahd_softc *ahd);
903
904 static __inline uint32_t ahd_pci_read_config(ahd_dev_softc_t pci,
905 int reg, int width);
906
907 static __inline uint32_t
ahd_pci_read_config(ahd_dev_softc_t pci,int reg,int width)908 ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
909 {
910 switch (width) {
911 case 1:
912 {
913 uint8_t retval;
914
915 pci_read_config_byte(pci, reg, &retval);
916 return (retval);
917 }
918 case 2:
919 {
920 uint16_t retval;
921 pci_read_config_word(pci, reg, &retval);
922 return (retval);
923 }
924 case 4:
925 {
926 uint32_t retval;
927 pci_read_config_dword(pci, reg, &retval);
928 return (retval);
929 }
930 default:
931 panic("ahd_pci_read_config: Read size too big");
932 /* NOTREACHED */
933 return (0);
934 }
935 }
936
937 static __inline void ahd_pci_write_config(ahd_dev_softc_t pci,
938 int reg, uint32_t value,
939 int width);
940
941 static __inline void
ahd_pci_write_config(ahd_dev_softc_t pci,int reg,uint32_t value,int width)942 ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
943 {
944 switch (width) {
945 case 1:
946 pci_write_config_byte(pci, reg, value);
947 break;
948 case 2:
949 pci_write_config_word(pci, reg, value);
950 break;
951 case 4:
952 pci_write_config_dword(pci, reg, value);
953 break;
954 default:
955 panic("ahd_pci_write_config: Write size too big");
956 /* NOTREACHED */
957 }
958 }
959
960 static __inline int ahd_get_pci_function(ahd_dev_softc_t);
961 static __inline int
ahd_get_pci_function(ahd_dev_softc_t pci)962 ahd_get_pci_function(ahd_dev_softc_t pci)
963 {
964 return (PCI_FUNC(pci->devfn));
965 }
966
967 static __inline int ahd_get_pci_slot(ahd_dev_softc_t);
968 static __inline int
ahd_get_pci_slot(ahd_dev_softc_t pci)969 ahd_get_pci_slot(ahd_dev_softc_t pci)
970 {
971 return (PCI_SLOT(pci->devfn));
972 }
973
974 static __inline int ahd_get_pci_bus(ahd_dev_softc_t);
975 static __inline int
ahd_get_pci_bus(ahd_dev_softc_t pci)976 ahd_get_pci_bus(ahd_dev_softc_t pci)
977 {
978 return (pci->bus->number);
979 }
980
981 static __inline void ahd_flush_device_writes(struct ahd_softc *);
982 static __inline void
ahd_flush_device_writes(struct ahd_softc * ahd)983 ahd_flush_device_writes(struct ahd_softc *ahd)
984 {
985 /* XXX Is this sufficient for all architectures??? */
986 ahd_inb(ahd, INTSTAT);
987 }
988
989 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,3,0)
990 #define pci_map_sg(pdev, sg_list, nseg, direction) (nseg)
991 #define pci_unmap_sg(pdev, sg_list, nseg, direction)
992 #define sg_dma_address(sg) (VIRT_TO_BUS((sg)->address))
993 #define sg_dma_len(sg) ((sg)->length)
994 #define pci_map_single(pdev, buffer, bufflen, direction) \
995 (VIRT_TO_BUS(buffer))
996 #define pci_unmap_single(pdev, buffer, buflen, direction)
997 #endif
998
999 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,3)
1000 #define ahd_pci_set_dma_mask pci_set_dma_mask
1001 #else
1002 /*
1003 * Always "return" 0 for success.
1004 */
1005 #define ahd_pci_set_dma_mask(dev_softc, mask) \
1006 (((dev_softc)->dma_mask = mask) && 0)
1007 #endif
1008 /**************************** Proc FS Support *********************************/
1009 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
1010 int ahd_linux_proc_info(char *, char **, off_t, int, int, int);
1011 #else
1012 int ahd_linux_proc_info(struct Scsi_Host *, char *, char **,
1013 off_t, int, int);
1014 #endif
1015
1016 /*************************** Domain Validation ********************************/
1017 #define AHD_DV_CMD(cmd) ((cmd)->scsi_done == ahd_linux_dv_complete)
1018 #define AHD_DV_SIMQ_FROZEN(ahd) \
1019 ((((ahd)->platform_data->flags & AHD_DV_ACTIVE) != 0) \
1020 && (ahd)->platform_data->qfrozen == 1)
1021
1022 /*********************** Transaction Access Wrappers **************************/
1023 static __inline void ahd_cmd_set_transaction_status(Scsi_Cmnd *, uint32_t);
1024 static __inline void ahd_set_transaction_status(struct scb *, uint32_t);
1025 static __inline void ahd_cmd_set_scsi_status(Scsi_Cmnd *, uint32_t);
1026 static __inline void ahd_set_scsi_status(struct scb *, uint32_t);
1027 static __inline uint32_t ahd_cmd_get_transaction_status(Scsi_Cmnd *cmd);
1028 static __inline uint32_t ahd_get_transaction_status(struct scb *);
1029 static __inline uint32_t ahd_cmd_get_scsi_status(Scsi_Cmnd *cmd);
1030 static __inline uint32_t ahd_get_scsi_status(struct scb *);
1031 static __inline void ahd_set_transaction_tag(struct scb *, int, u_int);
1032 static __inline u_long ahd_get_transfer_length(struct scb *);
1033 static __inline int ahd_get_transfer_dir(struct scb *);
1034 static __inline void ahd_set_residual(struct scb *, u_long);
1035 static __inline void ahd_set_sense_residual(struct scb *scb, u_long resid);
1036 static __inline u_long ahd_get_residual(struct scb *);
1037 static __inline u_long ahd_get_sense_residual(struct scb *);
1038 static __inline int ahd_perform_autosense(struct scb *);
1039 static __inline uint32_t ahd_get_sense_bufsize(struct ahd_softc *,
1040 struct scb *);
1041 static __inline void ahd_notify_xfer_settings_change(struct ahd_softc *,
1042 struct ahd_devinfo *);
1043 static __inline void ahd_platform_scb_free(struct ahd_softc *ahd,
1044 struct scb *scb);
1045 static __inline void ahd_freeze_scb(struct scb *scb);
1046
1047 static __inline
ahd_cmd_set_transaction_status(Scsi_Cmnd * cmd,uint32_t status)1048 void ahd_cmd_set_transaction_status(Scsi_Cmnd *cmd, uint32_t status)
1049 {
1050 cmd->result &= ~(CAM_STATUS_MASK << 16);
1051 cmd->result |= status << 16;
1052 }
1053
1054 static __inline
ahd_set_transaction_status(struct scb * scb,uint32_t status)1055 void ahd_set_transaction_status(struct scb *scb, uint32_t status)
1056 {
1057 ahd_cmd_set_transaction_status(scb->io_ctx,status);
1058 }
1059
1060 static __inline
ahd_cmd_set_scsi_status(Scsi_Cmnd * cmd,uint32_t status)1061 void ahd_cmd_set_scsi_status(Scsi_Cmnd *cmd, uint32_t status)
1062 {
1063 cmd->result &= ~0xFFFF;
1064 cmd->result |= status;
1065 }
1066
1067 static __inline
ahd_set_scsi_status(struct scb * scb,uint32_t status)1068 void ahd_set_scsi_status(struct scb *scb, uint32_t status)
1069 {
1070 ahd_cmd_set_scsi_status(scb->io_ctx, status);
1071 }
1072
1073 static __inline
ahd_cmd_get_transaction_status(Scsi_Cmnd * cmd)1074 uint32_t ahd_cmd_get_transaction_status(Scsi_Cmnd *cmd)
1075 {
1076 return ((cmd->result >> 16) & CAM_STATUS_MASK);
1077 }
1078
1079 static __inline
ahd_get_transaction_status(struct scb * scb)1080 uint32_t ahd_get_transaction_status(struct scb *scb)
1081 {
1082 return (ahd_cmd_get_transaction_status(scb->io_ctx));
1083 }
1084
1085 static __inline
ahd_cmd_get_scsi_status(Scsi_Cmnd * cmd)1086 uint32_t ahd_cmd_get_scsi_status(Scsi_Cmnd *cmd)
1087 {
1088 return (cmd->result & 0xFFFF);
1089 }
1090
1091 static __inline
ahd_get_scsi_status(struct scb * scb)1092 uint32_t ahd_get_scsi_status(struct scb *scb)
1093 {
1094 return (ahd_cmd_get_scsi_status(scb->io_ctx));
1095 }
1096
1097 static __inline
ahd_set_transaction_tag(struct scb * scb,int enabled,u_int type)1098 void ahd_set_transaction_tag(struct scb *scb, int enabled, u_int type)
1099 {
1100 /*
1101 * Nothing to do for linux as the incoming transaction
1102 * has no concept of tag/non tagged, etc.
1103 */
1104 }
1105
1106 static __inline
ahd_get_transfer_length(struct scb * scb)1107 u_long ahd_get_transfer_length(struct scb *scb)
1108 {
1109 return (scb->platform_data->xfer_len);
1110 }
1111
1112 static __inline
ahd_get_transfer_dir(struct scb * scb)1113 int ahd_get_transfer_dir(struct scb *scb)
1114 {
1115 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,40)
1116 return (scb->io_ctx->sc_data_direction);
1117 #else
1118 if (scb->io_ctx->bufflen == 0)
1119 return (CAM_DIR_NONE);
1120
1121 switch(scb->io_ctx->cmnd[0]) {
1122 case 0x08: /* READ(6) */
1123 case 0x28: /* READ(10) */
1124 case 0xA8: /* READ(12) */
1125 return (CAM_DIR_IN);
1126 case 0x0A: /* WRITE(6) */
1127 case 0x2A: /* WRITE(10) */
1128 case 0xAA: /* WRITE(12) */
1129 return (CAM_DIR_OUT);
1130 default:
1131 return (CAM_DIR_NONE);
1132 }
1133 #endif
1134 }
1135
1136 static __inline
ahd_set_residual(struct scb * scb,u_long resid)1137 void ahd_set_residual(struct scb *scb, u_long resid)
1138 {
1139 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
1140 scb->io_ctx->resid = resid;
1141 #else
1142 scb->platform_data->resid = resid;
1143 #endif
1144 }
1145
1146 static __inline
ahd_set_sense_residual(struct scb * scb,u_long resid)1147 void ahd_set_sense_residual(struct scb *scb, u_long resid)
1148 {
1149 scb->platform_data->sense_resid = resid;
1150 }
1151
1152 static __inline
ahd_get_residual(struct scb * scb)1153 u_long ahd_get_residual(struct scb *scb)
1154 {
1155 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
1156 return (scb->io_ctx->resid);
1157 #else
1158 return (scb->platform_data->resid);
1159 #endif
1160 }
1161
1162 static __inline
ahd_get_sense_residual(struct scb * scb)1163 u_long ahd_get_sense_residual(struct scb *scb)
1164 {
1165 return (scb->platform_data->sense_resid);
1166 }
1167
1168 static __inline
ahd_perform_autosense(struct scb * scb)1169 int ahd_perform_autosense(struct scb *scb)
1170 {
1171 /*
1172 * We always perform autosense in Linux.
1173 * On other platforms this is set on a
1174 * per-transaction basis.
1175 */
1176 return (1);
1177 }
1178
1179 static __inline uint32_t
ahd_get_sense_bufsize(struct ahd_softc * ahd,struct scb * scb)1180 ahd_get_sense_bufsize(struct ahd_softc *ahd, struct scb *scb)
1181 {
1182 return (sizeof(struct scsi_sense_data));
1183 }
1184
1185 static __inline void
ahd_notify_xfer_settings_change(struct ahd_softc * ahd,struct ahd_devinfo * devinfo)1186 ahd_notify_xfer_settings_change(struct ahd_softc *ahd,
1187 struct ahd_devinfo *devinfo)
1188 {
1189 /* Nothing to do here for linux */
1190 }
1191
1192 static __inline void
ahd_platform_scb_free(struct ahd_softc * ahd,struct scb * scb)1193 ahd_platform_scb_free(struct ahd_softc *ahd, struct scb *scb)
1194 {
1195 ahd->flags &= ~AHD_RESOURCE_SHORTAGE;
1196 }
1197
1198 int ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg);
1199 void ahd_platform_free(struct ahd_softc *ahd);
1200 void ahd_platform_init(struct ahd_softc *ahd);
1201 void ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb);
1202 void ahd_freeze_simq(struct ahd_softc *ahd);
1203 void ahd_release_simq(struct ahd_softc *ahd);
1204
1205 static __inline void
ahd_freeze_scb(struct scb * scb)1206 ahd_freeze_scb(struct scb *scb)
1207 {
1208 if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
1209 scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
1210 scb->platform_data->dev->qfrozen++;
1211 }
1212 }
1213
1214 void ahd_platform_set_tags(struct ahd_softc *ahd,
1215 struct ahd_devinfo *devinfo, ahd_queue_alg);
1216 int ahd_platform_abort_scbs(struct ahd_softc *ahd, int target,
1217 char channel, int lun, u_int tag,
1218 role_t role, uint32_t status);
1219 irqreturn_t
1220 ahd_linux_isr(int irq, void *dev_id, struct pt_regs * regs);
1221 void ahd_platform_flushwork(struct ahd_softc *ahd);
1222 int ahd_softc_comp(struct ahd_softc *, struct ahd_softc *);
1223 void ahd_done(struct ahd_softc*, struct scb*);
1224 void ahd_send_async(struct ahd_softc *, char channel,
1225 u_int target, u_int lun, ac_code, void *);
1226 void ahd_print_path(struct ahd_softc *, struct scb *);
1227 void ahd_platform_dump_card_state(struct ahd_softc *ahd);
1228
1229 #ifdef CONFIG_PCI
1230 #define AHD_PCI_CONFIG 1
1231 #else
1232 #define AHD_PCI_CONFIG 0
1233 #endif
1234 #define bootverbose aic79xx_verbose
1235 extern uint32_t aic79xx_verbose;
1236 #endif /* _AIC79XX_LINUX_H_ */
1237