1 /*
2 * Adaptec AIC7xxx device driver for Linux.
3 *
4 * Copyright (c) 1994 John Aycock
5 * The University of Calgary Department of Computer Science.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; see the file COPYING. If not, write to
19 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 * Copyright (c) 2000-2003 Adaptec Inc.
22 * All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions, and the following disclaimer,
29 * without modification.
30 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
31 * substantially similar to the "NO WARRANTY" disclaimer below
32 * ("Disclaimer") and any redistribution must be conditioned upon
33 * including a substantially similar Disclaimer requirement for further
34 * binary redistribution.
35 * 3. Neither the names of the above-listed copyright holders nor the names
36 * of any contributors may be used to endorse or promote products derived
37 * from this software without specific prior written permission.
38 *
39 * Alternatively, this software may be distributed under the terms of the
40 * GNU General Public License ("GPL") version 2 as published by the Free
41 * Software Foundation.
42 *
43 * NO WARRANTY
44 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
45 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
46 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
47 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
48 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
52 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
53 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
54 * POSSIBILITY OF SUCH DAMAGES.
55 *
56 * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.h#150 $
57 *
58 */
59 #ifndef _AIC7XXX_LINUX_H_
60 #define _AIC7XXX_LINUX_H_
61
62 #include <linux/types.h>
63 #include <linux/blk.h>
64 #include <linux/blkdev.h>
65 #include <linux/delay.h>
66 #include <linux/ioport.h>
67 #include <linux/pci.h>
68 #include <linux/smp_lock.h>
69 #include <linux/version.h>
70 #include <linux/module.h>
71 #include <asm/byteorder.h>
72 #include <asm/io.h>
73
74 #ifndef KERNEL_VERSION
75 #define KERNEL_VERSION(x,y,z) (((x)<<16)+((y)<<8)+(z))
76 #endif
77
78 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
79 #include <linux/interrupt.h> /* For tasklet support. */
80 #include <linux/config.h>
81 #include <linux/slab.h>
82 #else
83 #include <linux/malloc.h>
84 #endif
85
86 /* Core SCSI definitions */
87 #define AIC_LIB_PREFIX ahc
88 #include "scsi.h"
89 #include "hosts.h"
90
91 /* Name space conflict with BSD queue macros */
92 #ifdef LIST_HEAD
93 #undef LIST_HEAD
94 #endif
95
96 #include "cam.h"
97 #include "queue.h"
98 #include "scsi_message.h"
99 #include "aiclib.h"
100
101 /*********************************** Debugging ********************************/
102 #ifdef CONFIG_AIC7XXX_DEBUG_ENABLE
103 #ifdef CONFIG_AIC7XXX_DEBUG_MASK
104 #define AHC_DEBUG 1
105 #define AHC_DEBUG_OPTS CONFIG_AIC7XXX_DEBUG_MASK
106 #else
107 /*
108 * Compile in debugging code, but do not enable any printfs.
109 */
110 #define AHC_DEBUG 1
111 #endif
112 /* No debugging code. */
113 #endif
114
115 /************************* Forward Declarations *******************************/
116 struct ahc_softc;
117 typedef struct pci_dev *ahc_dev_softc_t;
118 typedef Scsi_Cmnd *ahc_io_ctx_t;
119
120 /******************************* Byte Order ***********************************/
121 #define ahc_htobe16(x) cpu_to_be16(x)
122 #define ahc_htobe32(x) cpu_to_be32(x)
123 #define ahc_htobe64(x) cpu_to_be64(x)
124 #define ahc_htole16(x) cpu_to_le16(x)
125 #define ahc_htole32(x) cpu_to_le32(x)
126 #define ahc_htole64(x) cpu_to_le64(x)
127
128 #define ahc_be16toh(x) be16_to_cpu(x)
129 #define ahc_be32toh(x) be32_to_cpu(x)
130 #define ahc_be64toh(x) be64_to_cpu(x)
131 #define ahc_le16toh(x) le16_to_cpu(x)
132 #define ahc_le32toh(x) le32_to_cpu(x)
133 #define ahc_le64toh(x) le64_to_cpu(x)
134
135 #ifndef LITTLE_ENDIAN
136 #define LITTLE_ENDIAN 1234
137 #endif
138
139 #ifndef BIG_ENDIAN
140 #define BIG_ENDIAN 4321
141 #endif
142
143 #ifndef BYTE_ORDER
144 #if defined(__BIG_ENDIAN)
145 #define BYTE_ORDER BIG_ENDIAN
146 #endif
147 #if defined(__LITTLE_ENDIAN)
148 #define BYTE_ORDER LITTLE_ENDIAN
149 #endif
150 #endif /* BYTE_ORDER */
151
152 /************************* Configuration Data *********************************/
153 extern u_int aic7xxx_no_probe;
154 extern u_int aic7xxx_allow_memio;
155 extern int aic7xxx_detect_complete;
156 extern Scsi_Host_Template aic7xxx_driver_template;
157
158 /***************************** Bus Space/DMA **********************************/
159
160 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,2,17)
161 typedef dma_addr_t bus_addr_t;
162 #else
163 typedef uint32_t bus_addr_t;
164 #endif
165 typedef uint32_t bus_size_t;
166
167 typedef enum {
168 BUS_SPACE_MEMIO,
169 BUS_SPACE_PIO
170 } bus_space_tag_t;
171
172 typedef union {
173 u_long ioport;
174 volatile uint8_t *maddr;
175 } bus_space_handle_t;
176
177 typedef struct bus_dma_segment
178 {
179 bus_addr_t ds_addr;
180 bus_size_t ds_len;
181 } bus_dma_segment_t;
182
183 struct ahc_linux_dma_tag
184 {
185 bus_size_t alignment;
186 bus_size_t boundary;
187 bus_size_t maxsize;
188 };
189 typedef struct ahc_linux_dma_tag* bus_dma_tag_t;
190
191 struct ahc_linux_dmamap
192 {
193 bus_addr_t bus_addr;
194 };
195 typedef struct ahc_linux_dmamap* bus_dmamap_t;
196
197 typedef int bus_dma_filter_t(void*, bus_addr_t);
198 typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
199
200 #define BUS_DMA_WAITOK 0x0
201 #define BUS_DMA_NOWAIT 0x1
202 #define BUS_DMA_ALLOCNOW 0x2
203 #define BUS_DMA_LOAD_SEGS 0x4 /*
204 * Argument is an S/G list not
205 * a single buffer.
206 */
207
208 #define BUS_SPACE_MAXADDR 0xFFFFFFFF
209 #define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFF
210 #define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFF
211
212 int ahc_dma_tag_create(struct ahc_softc *, bus_dma_tag_t /*parent*/,
213 bus_size_t /*alignment*/, bus_size_t /*boundary*/,
214 bus_addr_t /*lowaddr*/, bus_addr_t /*highaddr*/,
215 bus_dma_filter_t*/*filter*/, void */*filterarg*/,
216 bus_size_t /*maxsize*/, int /*nsegments*/,
217 bus_size_t /*maxsegsz*/, int /*flags*/,
218 bus_dma_tag_t */*dma_tagp*/);
219
220 void ahc_dma_tag_destroy(struct ahc_softc *, bus_dma_tag_t /*tag*/);
221
222 int ahc_dmamem_alloc(struct ahc_softc *, bus_dma_tag_t /*dmat*/,
223 void** /*vaddr*/, int /*flags*/,
224 bus_dmamap_t* /*mapp*/);
225
226 void ahc_dmamem_free(struct ahc_softc *, bus_dma_tag_t /*dmat*/,
227 void* /*vaddr*/, bus_dmamap_t /*map*/);
228
229 void ahc_dmamap_destroy(struct ahc_softc *, bus_dma_tag_t /*tag*/,
230 bus_dmamap_t /*map*/);
231
232 int ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t /*dmat*/,
233 bus_dmamap_t /*map*/, void * /*buf*/,
234 bus_size_t /*buflen*/, bus_dmamap_callback_t *,
235 void */*callback_arg*/, int /*flags*/);
236
237 int ahc_dmamap_unload(struct ahc_softc *, bus_dma_tag_t, bus_dmamap_t);
238
239 /*
240 * Operations performed by ahc_dmamap_sync().
241 */
242 #define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */
243 #define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */
244 #define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */
245 #define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */
246
247 /*
248 * XXX
249 * ahc_dmamap_sync is only used on buffers allocated with
250 * the pci_alloc_consistent() API. Although I'm not sure how
251 * this works on architectures with a write buffer, Linux does
252 * not have an API to sync "coherent" memory. Perhaps we need
253 * to do an mb()?
254 */
255 #define ahc_dmamap_sync(ahc, dma_tag, dmamap, offset, len, op)
256
257 /************************** Timer DataStructures ******************************/
258 typedef struct timer_list ahc_timer_t;
259
260 /********************************** Includes **********************************/
261 #ifdef CONFIG_AIC7XXX_REG_PRETTY_PRINT
262 #define AIC_DEBUG_REGISTERS 1
263 #else
264 #define AIC_DEBUG_REGISTERS 0
265 #endif
266 #include "aic7xxx.h"
267
268 /***************************** Timer Facilities *******************************/
269 #define ahc_timer_init init_timer
270 #define ahc_timer_stop del_timer_sync
271 typedef void ahc_linux_callback_t (u_long);
272 static __inline void ahc_timer_reset(ahc_timer_t *timer, int usec,
273 ahc_callback_t *func, void *arg);
274 static __inline void ahc_scb_timer_reset(struct scb *scb, u_int usec);
275
276 static __inline void
ahc_timer_reset(ahc_timer_t * timer,int usec,ahc_callback_t * func,void * arg)277 ahc_timer_reset(ahc_timer_t *timer, int usec, ahc_callback_t *func, void *arg)
278 {
279 struct ahc_softc *ahc;
280
281 ahc = (struct ahc_softc *)arg;
282 del_timer(timer);
283 timer->data = (u_long)arg;
284 timer->expires = jiffies + (usec * HZ)/1000000;
285 timer->function = (ahc_linux_callback_t*)func;
286 add_timer(timer);
287 }
288
289 static __inline void
ahc_scb_timer_reset(struct scb * scb,u_int usec)290 ahc_scb_timer_reset(struct scb *scb, u_int usec)
291 {
292 mod_timer(&scb->io_ctx->eh_timeout, jiffies + (usec * HZ)/1000000);
293 }
294
295 /***************************** SMP support ************************************/
296 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,17)
297 #include <linux/spinlock.h>
298 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,93)
299 #include <linux/smp.h>
300 #endif
301
302 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) || defined(SCSI_HAS_HOST_LOCK))
303 #define AHC_SCSI_HAS_HOST_LOCK 1
304 #else
305 #define AHC_SCSI_HAS_HOST_LOCK 0
306 #endif
307
308 #define AIC7XXX_DRIVER_VERSION "6.2.36"
309
310 /**************************** Front End Queues ********************************/
311 /*
312 * Data structure used to cast the Linux struct scsi_cmnd to something
313 * that allows us to use the queue macros. The linux structure has
314 * plenty of space to hold the links fields as required by the queue
315 * macros, but the queue macors require them to have the correct type.
316 */
317 struct ahc_cmd_internal {
318 /* Area owned by the Linux scsi layer. */
319 uint8_t private[offsetof(struct scsi_cmnd, SCp.Status)];
320 union {
321 STAILQ_ENTRY(ahc_cmd) ste;
322 LIST_ENTRY(ahc_cmd) le;
323 TAILQ_ENTRY(ahc_cmd) tqe;
324 } links;
325 uint32_t end;
326 };
327
328 struct ahc_cmd {
329 union {
330 struct ahc_cmd_internal icmd;
331 struct scsi_cmnd scsi_cmd;
332 } un;
333 };
334
335 #define acmd_icmd(cmd) ((cmd)->un.icmd)
336 #define acmd_scsi_cmd(cmd) ((cmd)->un.scsi_cmd)
337 #define acmd_links un.icmd.links
338
339 /*************************** Device Data Structures ***************************/
340 /*
341 * A per probed device structure used to deal with some error recovery
342 * scenarios that the Linux mid-layer code just doesn't know how to
343 * handle. The structure allocated for a device only becomes persistent
344 * after a successfully completed inquiry command to the target when
345 * that inquiry data indicates a lun is present.
346 */
347 TAILQ_HEAD(ahc_busyq, ahc_cmd);
348 typedef enum {
349 AHC_DEV_UNCONFIGURED = 0x01,
350 AHC_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */
351 AHC_DEV_TIMER_ACTIVE = 0x04, /* Our timer is active */
352 AHC_DEV_ON_RUN_LIST = 0x08, /* Queued to be run later */
353 AHC_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */
354 AHC_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */
355 AHC_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */
356 AHC_DEV_SLAVE_CONFIGURED = 0x80 /* slave_configure() has been called */
357 } ahc_linux_dev_flags;
358
359 struct ahc_linux_target;
360 struct ahc_linux_device {
361 TAILQ_ENTRY(ahc_linux_device) links;
362 struct ahc_busyq busyq;
363
364 /*
365 * The number of transactions currently
366 * queued to the device.
367 */
368 int active;
369
370 /*
371 * The currently allowed number of
372 * transactions that can be queued to
373 * the device. Must be signed for
374 * conversion from tagged to untagged
375 * mode where the device may have more
376 * than one outstanding active transaction.
377 */
378 int openings;
379
380 /*
381 * A positive count indicates that this
382 * device's queue is halted.
383 */
384 u_int qfrozen;
385
386 /*
387 * Cumulative command counter.
388 */
389 u_long commands_issued;
390
391 /*
392 * The number of tagged transactions when
393 * running at our current opening level
394 * that have been successfully received by
395 * this device since the last QUEUE FULL.
396 */
397 u_int tag_success_count;
398 #define AHC_TAG_SUCCESS_INTERVAL 50
399
400 ahc_linux_dev_flags flags;
401
402 /*
403 * Per device timer.
404 */
405 struct timer_list timer;
406
407 /*
408 * The high limit for the tags variable.
409 */
410 u_int maxtags;
411
412 /*
413 * The computed number of tags outstanding
414 * at the time of the last QUEUE FULL event.
415 */
416 u_int tags_on_last_queuefull;
417
418 /*
419 * How many times we have seen a queue full
420 * with the same number of tags. This is used
421 * to stop our adaptive queue depth algorithm
422 * on devices with a fixed number of tags.
423 */
424 u_int last_queuefull_same_count;
425 #define AHC_LOCK_TAGS_COUNT 50
426
427 /*
428 * How many transactions have been queued
429 * without the device going idle. We use
430 * this statistic to determine when to issue
431 * an ordered tag to prevent transaction
432 * starvation. This statistic is only updated
433 * if the AHC_DEV_PERIODIC_OTAG flag is set
434 * on this device.
435 */
436 u_int commands_since_idle_or_otag;
437 #define AHC_OTAG_THRESH 500
438
439 int lun;
440 Scsi_Device *scsi_device;
441 struct ahc_linux_target *target;
442 };
443
444 typedef enum {
445 AHC_DV_REQUIRED = 0x01,
446 AHC_INQ_VALID = 0x02,
447 AHC_BASIC_DV = 0x04,
448 AHC_ENHANCED_DV = 0x08
449 } ahc_linux_targ_flags;
450
451 /* DV States */
452 typedef enum {
453 AHC_DV_STATE_EXIT = 0,
454 AHC_DV_STATE_INQ_SHORT_ASYNC,
455 AHC_DV_STATE_INQ_ASYNC,
456 AHC_DV_STATE_INQ_ASYNC_VERIFY,
457 AHC_DV_STATE_TUR,
458 AHC_DV_STATE_REBD,
459 AHC_DV_STATE_INQ_VERIFY,
460 AHC_DV_STATE_WEB,
461 AHC_DV_STATE_REB,
462 AHC_DV_STATE_SU,
463 AHC_DV_STATE_BUSY
464 } ahc_dv_state;
465
466 struct ahc_linux_target {
467 struct ahc_linux_device *devices[AHC_NUM_LUNS];
468 int channel;
469 int target;
470 int refcount;
471 struct ahc_transinfo last_tinfo;
472 struct ahc_softc *ahc;
473 ahc_linux_targ_flags flags;
474 struct scsi_inquiry_data *inq_data;
475 /*
476 * The next "fallback" period to use for narrow/wide transfers.
477 */
478 uint8_t dv_next_narrow_period;
479 uint8_t dv_next_wide_period;
480 uint8_t dv_max_width;
481 uint8_t dv_max_ppr_options;
482 uint8_t dv_last_ppr_options;
483 u_int dv_echo_size;
484 ahc_dv_state dv_state;
485 u_int dv_state_retry;
486 char *dv_buffer;
487 char *dv_buffer1;
488 };
489
490 /********************* Definitions Required by the Core ***********************/
491 /*
492 * Number of SG segments we require. So long as the S/G segments for
493 * a particular transaction are allocated in a physically contiguous
494 * manner and are allocated below 4GB, the number of S/G segments is
495 * unrestricted.
496 */
497 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
498 /*
499 * We dynamically adjust the number of segments in pre-2.5 kernels to
500 * avoid fragmentation issues in the SCSI mid-layer's private memory
501 * allocator. See aic7xxx_osm.c ahc_linux_size_nseg() for details.
502 */
503 extern u_int ahc_linux_nseg;
504 #define AHC_NSEG ahc_linux_nseg
505 #define AHC_LINUX_MIN_NSEG 64
506 #else
507 #define AHC_NSEG 128
508 #endif
509
510 /*
511 * Per-SCB OSM storage.
512 */
513 typedef enum {
514 AHC_UP_EH_SEMAPHORE = 0x1
515 } ahc_linux_scb_flags;
516
517 struct scb_platform_data {
518 struct ahc_linux_device *dev;
519 bus_addr_t buf_busaddr;
520 uint32_t xfer_len;
521 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
522 uint32_t resid; /* Transfer residual */
523 #endif
524 uint32_t sense_resid; /* Auto-Sense residual */
525 ahc_linux_scb_flags flags;
526 };
527
528 /*
529 * Define a structure used for each host adapter. All members are
530 * aligned on a boundary >= the size of the member to honor the
531 * alignment restrictions of the various platforms supported by
532 * this driver.
533 */
534 typedef enum {
535 AHC_DV_WAIT_SIMQ_EMPTY = 0x01,
536 AHC_DV_WAIT_SIMQ_RELEASE = 0x02,
537 AHC_DV_ACTIVE = 0x04,
538 AHC_DV_SHUTDOWN = 0x08,
539 AHC_RUN_CMPLT_Q_TIMER = 0x10
540 } ahc_linux_softc_flags;
541
542 TAILQ_HEAD(ahc_completeq, ahc_cmd);
543
544 struct ahc_platform_data {
545 /*
546 * Fields accessed from interrupt context.
547 */
548 struct ahc_linux_target *targets[AHC_NUM_TARGETS];
549 TAILQ_HEAD(, ahc_linux_device) device_runq;
550 struct ahc_completeq completeq;
551
552 spinlock_t spin_lock;
553 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
554 struct tasklet_struct runq_tasklet;
555 #endif
556 u_int qfrozen;
557 pid_t dv_pid;
558 struct timer_list completeq_timer;
559 struct timer_list reset_timer;
560 struct semaphore eh_sem;
561 struct semaphore dv_sem;
562 struct semaphore dv_cmd_sem; /* XXX This needs to be in
563 * the target struct
564 */
565 struct scsi_device *dv_scsi_dev;
566 struct Scsi_Host *host; /* pointer to scsi host */
567 #define AHC_LINUX_NOIRQ ((uint32_t)~0)
568 uint32_t irq; /* IRQ for this adapter */
569 uint32_t bios_address;
570 uint32_t mem_busaddr; /* Mem Base Addr */
571 bus_addr_t hw_dma_mask;
572 ahc_linux_softc_flags flags;
573 };
574
575 /************************** OS Utility Wrappers *******************************/
576 #define printf printk
577 #define M_NOWAIT GFP_ATOMIC
578 #define M_WAITOK 0
579 #define malloc(size, type, flags) kmalloc(size, flags)
580 #define free(ptr, type) kfree(ptr)
581
582 static __inline void ahc_delay(long);
583 static __inline void
ahc_delay(long usec)584 ahc_delay(long usec)
585 {
586 /*
587 * udelay on Linux can have problems for
588 * multi-millisecond waits. Wait at most
589 * 1024us per call.
590 */
591 while (usec > 0) {
592 udelay(usec % 1024);
593 usec -= 1024;
594 }
595 }
596
597
598 /***************************** Low Level I/O **********************************/
599 #if defined(__powerpc__) || defined(__i386__) || defined(__ia64__)
600 #define MMAPIO
601 #endif
602
603 static __inline uint8_t ahc_inb(struct ahc_softc * ahc, long port);
604 static __inline void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val);
605 static __inline void ahc_outsb(struct ahc_softc * ahc, long port,
606 uint8_t *, int count);
607 static __inline void ahc_insb(struct ahc_softc * ahc, long port,
608 uint8_t *, int count);
609
610 static __inline uint8_t
ahc_inb(struct ahc_softc * ahc,long port)611 ahc_inb(struct ahc_softc * ahc, long port)
612 {
613 uint8_t x;
614 #ifdef MMAPIO
615
616 if (ahc->tag == BUS_SPACE_MEMIO) {
617 x = readb(ahc->bsh.maddr + port);
618 } else {
619 x = inb(ahc->bsh.ioport + port);
620 }
621 #else
622 x = inb(ahc->bsh.ioport + port);
623 #endif
624 mb();
625 return (x);
626 }
627
628 static __inline void
ahc_outb(struct ahc_softc * ahc,long port,uint8_t val)629 ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
630 {
631 #ifdef MMAPIO
632 if (ahc->tag == BUS_SPACE_MEMIO) {
633 writeb(val, ahc->bsh.maddr + port);
634 } else {
635 outb(val, ahc->bsh.ioport + port);
636 }
637 #else
638 outb(val, ahc->bsh.ioport + port);
639 #endif
640 mb();
641 }
642
643 static __inline void
ahc_outsb(struct ahc_softc * ahc,long port,uint8_t * array,int count)644 ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
645 {
646 int i;
647
648 /*
649 * There is probably a more efficient way to do this on Linux
650 * but we don't use this for anything speed critical and this
651 * should work.
652 */
653 for (i = 0; i < count; i++)
654 ahc_outb(ahc, port, *array++);
655 }
656
657 static __inline void
ahc_insb(struct ahc_softc * ahc,long port,uint8_t * array,int count)658 ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
659 {
660 int i;
661
662 /*
663 * There is probably a more efficient way to do this on Linux
664 * but we don't use this for anything speed critical and this
665 * should work.
666 */
667 for (i = 0; i < count; i++)
668 *array++ = ahc_inb(ahc, port);
669 }
670
671 /**************************** Initialization **********************************/
672 int ahc_linux_register_host(struct ahc_softc *,
673 Scsi_Host_Template *);
674
675 uint64_t ahc_linux_get_memsize(void);
676
677 /*************************** Pretty Printing **********************************/
678 struct info_str {
679 char *buffer;
680 int length;
681 off_t offset;
682 int pos;
683 };
684
685 void ahc_format_transinfo(struct info_str *info,
686 struct ahc_transinfo *tinfo);
687
688 /******************************** Locking *************************************/
689 /* Lock protecting internal data structures */
690 static __inline void ahc_lockinit(struct ahc_softc *);
691 static __inline void ahc_lock(struct ahc_softc *, unsigned long *flags);
692 static __inline void ahc_unlock(struct ahc_softc *, unsigned long *flags);
693
694 /* Lock acquisition and release of the above lock in midlayer entry points. */
695 static __inline void ahc_midlayer_entrypoint_lock(struct ahc_softc *,
696 unsigned long *flags);
697 static __inline void ahc_midlayer_entrypoint_unlock(struct ahc_softc *,
698 unsigned long *flags);
699
700 /* Lock held during command compeletion to the upper layer */
701 static __inline void ahc_done_lockinit(struct ahc_softc *);
702 static __inline void ahc_done_lock(struct ahc_softc *, unsigned long *flags);
703 static __inline void ahc_done_unlock(struct ahc_softc *, unsigned long *flags);
704
705 /* Lock held during ahc_list manipulation and ahc softc frees */
706 extern spinlock_t ahc_list_spinlock;
707 static __inline void ahc_list_lockinit(void);
708 static __inline void ahc_list_lock(unsigned long *flags);
709 static __inline void ahc_list_unlock(unsigned long *flags);
710
711 static __inline void
ahc_lockinit(struct ahc_softc * ahc)712 ahc_lockinit(struct ahc_softc *ahc)
713 {
714 spin_lock_init(&ahc->platform_data->spin_lock);
715 }
716
717 static __inline void
ahc_lock(struct ahc_softc * ahc,unsigned long * flags)718 ahc_lock(struct ahc_softc *ahc, unsigned long *flags)
719 {
720 spin_lock_irqsave(&ahc->platform_data->spin_lock, *flags);
721 }
722
723 static __inline void
ahc_unlock(struct ahc_softc * ahc,unsigned long * flags)724 ahc_unlock(struct ahc_softc *ahc, unsigned long *flags)
725 {
726 spin_unlock_irqrestore(&ahc->platform_data->spin_lock, *flags);
727 }
728
729 static __inline void
ahc_midlayer_entrypoint_lock(struct ahc_softc * ahc,unsigned long * flags)730 ahc_midlayer_entrypoint_lock(struct ahc_softc *ahc, unsigned long *flags)
731 {
732 /*
733 * In 2.5.X and some 2.4.X versions, the midlayer takes our
734 * lock just before calling us, so we avoid locking again.
735 * For other kernel versions, the io_request_lock is taken
736 * just before our entry point is called. In this case, we
737 * trade the io_request_lock for our per-softc lock.
738 */
739 #if AHC_SCSI_HAS_HOST_LOCK == 0
740 spin_unlock(&io_request_lock);
741 spin_lock(&ahc->platform_data->spin_lock);
742 #endif
743 }
744
745 static __inline void
ahc_midlayer_entrypoint_unlock(struct ahc_softc * ahc,unsigned long * flags)746 ahc_midlayer_entrypoint_unlock(struct ahc_softc *ahc, unsigned long *flags)
747 {
748 #if AHC_SCSI_HAS_HOST_LOCK == 0
749 spin_unlock(&ahc->platform_data->spin_lock);
750 spin_lock(&io_request_lock);
751 #endif
752 }
753
754 static __inline void
ahc_done_lockinit(struct ahc_softc * ahc)755 ahc_done_lockinit(struct ahc_softc *ahc)
756 {
757 /*
758 * In 2.5.X, our own lock is held during completions.
759 * In previous versions, the io_request_lock is used.
760 * In either case, we can't initialize this lock again.
761 */
762 }
763
764 static __inline void
ahc_done_lock(struct ahc_softc * ahc,unsigned long * flags)765 ahc_done_lock(struct ahc_softc *ahc, unsigned long *flags)
766 {
767 #if AHC_SCSI_HAS_HOST_LOCK == 0
768 spin_lock_irqsave(&io_request_lock, *flags);
769 #endif
770 }
771
772 static __inline void
ahc_done_unlock(struct ahc_softc * ahc,unsigned long * flags)773 ahc_done_unlock(struct ahc_softc *ahc, unsigned long *flags)
774 {
775 #if AHC_SCSI_HAS_HOST_LOCK == 0
776 spin_unlock_irqrestore(&io_request_lock, *flags);
777 #endif
778 }
779
780 static __inline void
ahc_list_lockinit(void)781 ahc_list_lockinit(void)
782 {
783 spin_lock_init(&ahc_list_spinlock);
784 }
785
786 static __inline void
ahc_list_lock(unsigned long * flags)787 ahc_list_lock(unsigned long *flags)
788 {
789 spin_lock_irqsave(&ahc_list_spinlock, *flags);
790 }
791
792 static __inline void
ahc_list_unlock(unsigned long * flags)793 ahc_list_unlock(unsigned long *flags)
794 {
795 spin_unlock_irqrestore(&ahc_list_spinlock, *flags);
796 }
797
798 /******************************* PCI Definitions ******************************/
799 /*
800 * PCIM_xxx: mask to locate subfield in register
801 * PCIR_xxx: config register offset
802 * PCIC_xxx: device class
803 * PCIS_xxx: device subclass
804 * PCIP_xxx: device programming interface
805 * PCIV_xxx: PCI vendor ID (only required to fixup ancient devices)
806 * PCID_xxx: device ID
807 */
808 #define PCIR_DEVVENDOR 0x00
809 #define PCIR_VENDOR 0x00
810 #define PCIR_DEVICE 0x02
811 #define PCIR_COMMAND 0x04
812 #define PCIM_CMD_PORTEN 0x0001
813 #define PCIM_CMD_MEMEN 0x0002
814 #define PCIM_CMD_BUSMASTEREN 0x0004
815 #define PCIM_CMD_MWRICEN 0x0010
816 #define PCIM_CMD_PERRESPEN 0x0040
817 #define PCIM_CMD_SERRESPEN 0x0100
818 #define PCIR_STATUS 0x06
819 #define PCIR_REVID 0x08
820 #define PCIR_PROGIF 0x09
821 #define PCIR_SUBCLASS 0x0a
822 #define PCIR_CLASS 0x0b
823 #define PCIR_CACHELNSZ 0x0c
824 #define PCIR_LATTIMER 0x0d
825 #define PCIR_HEADERTYPE 0x0e
826 #define PCIM_MFDEV 0x80
827 #define PCIR_BIST 0x0f
828 #define PCIR_CAP_PTR 0x34
829
830 /* config registers for header type 0 devices */
831 #define PCIR_MAPS 0x10
832 #define PCIR_SUBVEND_0 0x2c
833 #define PCIR_SUBDEV_0 0x2e
834
835 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
836 extern struct pci_driver aic7xxx_pci_driver;
837 #endif
838
839 typedef enum
840 {
841 AHC_POWER_STATE_D0,
842 AHC_POWER_STATE_D1,
843 AHC_POWER_STATE_D2,
844 AHC_POWER_STATE_D3
845 } ahc_power_state;
846
847 /**************************** VL/EISA Routines ********************************/
848 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) \
849 && (defined(__i386__) || defined(__alpha__)) \
850 && (!defined(CONFIG_EISA)))
851 #define CONFIG_EISA
852 #endif
853
854 #ifdef CONFIG_EISA
855 extern uint32_t aic7xxx_probe_eisa_vl;
856 void ahc_linux_eisa_init(void);
857 void ahc_linux_eisa_exit(void);
858 int aic7770_map_registers(struct ahc_softc *ahc,
859 u_int port);
860 int aic7770_map_int(struct ahc_softc *ahc, u_int irq);
861 #endif
862
863 /******************************* PCI Routines *********************************/
864 #ifdef CONFIG_PCI
865 void ahc_power_state_change(struct ahc_softc *ahc,
866 ahc_power_state new_state);
867 int ahc_linux_pci_init(void);
868 void ahc_linux_pci_exit(void);
869 int ahc_pci_map_registers(struct ahc_softc *ahc);
870 int ahc_pci_map_int(struct ahc_softc *ahc);
871
872 static __inline uint32_t ahc_pci_read_config(ahc_dev_softc_t pci,
873 int reg, int width);
874
875 static __inline uint32_t
ahc_pci_read_config(ahc_dev_softc_t pci,int reg,int width)876 ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width)
877 {
878 switch (width) {
879 case 1:
880 {
881 uint8_t retval;
882
883 pci_read_config_byte(pci, reg, &retval);
884 return (retval);
885 }
886 case 2:
887 {
888 uint16_t retval;
889 pci_read_config_word(pci, reg, &retval);
890 return (retval);
891 }
892 case 4:
893 {
894 uint32_t retval;
895 pci_read_config_dword(pci, reg, &retval);
896 return (retval);
897 }
898 default:
899 panic("ahc_pci_read_config: Read size too big");
900 /* NOTREACHED */
901 return (0);
902 }
903 }
904
905 static __inline void ahc_pci_write_config(ahc_dev_softc_t pci,
906 int reg, uint32_t value,
907 int width);
908
909 static __inline void
ahc_pci_write_config(ahc_dev_softc_t pci,int reg,uint32_t value,int width)910 ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
911 {
912 switch (width) {
913 case 1:
914 pci_write_config_byte(pci, reg, value);
915 break;
916 case 2:
917 pci_write_config_word(pci, reg, value);
918 break;
919 case 4:
920 pci_write_config_dword(pci, reg, value);
921 break;
922 default:
923 panic("ahc_pci_write_config: Write size too big");
924 /* NOTREACHED */
925 }
926 }
927
928 static __inline int ahc_get_pci_function(ahc_dev_softc_t);
929 static __inline int
ahc_get_pci_function(ahc_dev_softc_t pci)930 ahc_get_pci_function(ahc_dev_softc_t pci)
931 {
932 return (PCI_FUNC(pci->devfn));
933 }
934
935 static __inline int ahc_get_pci_slot(ahc_dev_softc_t);
936 static __inline int
ahc_get_pci_slot(ahc_dev_softc_t pci)937 ahc_get_pci_slot(ahc_dev_softc_t pci)
938 {
939 return (PCI_SLOT(pci->devfn));
940 }
941
942 static __inline int ahc_get_pci_bus(ahc_dev_softc_t);
943 static __inline int
ahc_get_pci_bus(ahc_dev_softc_t pci)944 ahc_get_pci_bus(ahc_dev_softc_t pci)
945 {
946 return (pci->bus->number);
947 }
948 #endif
949
950 static __inline void ahc_flush_device_writes(struct ahc_softc *);
951 static __inline void
ahc_flush_device_writes(struct ahc_softc * ahc)952 ahc_flush_device_writes(struct ahc_softc *ahc)
953 {
954 /* XXX Is this sufficient for all architectures??? */
955 ahc_inb(ahc, INTSTAT);
956 }
957
958 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,3,0)
959 #define pci_map_sg(pdev, sg_list, nseg, direction) (nseg)
960 #define pci_unmap_sg(pdev, sg_list, nseg, direction)
961 #define sg_dma_address(sg) (VIRT_TO_BUS((sg)->address))
962 #define sg_dma_len(sg) ((sg)->length)
963 #define pci_map_single(pdev, buffer, bufflen, direction) \
964 (VIRT_TO_BUS(buffer))
965 #define pci_unmap_single(pdev, buffer, buflen, direction)
966 #endif
967
968 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,3)
969 #define ahc_pci_set_dma_mask pci_set_dma_mask
970 #else
971 /*
972 * Always "return" 0 for success.
973 */
974 #define ahc_pci_set_dma_mask(dev_softc, mask) \
975 (((dev_softc)->dma_mask = mask) && 0)
976 #endif
977 /**************************** Proc FS Support *********************************/
978 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
979 int ahc_linux_proc_info(char *, char **, off_t, int, int, int);
980 #else
981 int ahc_linux_proc_info(struct Scsi_Host *, char *, char **,
982 off_t, int, int);
983 #endif
984
985 /*************************** Domain Validation ********************************/
986 #define AHC_DV_CMD(cmd) ((cmd)->scsi_done == ahc_linux_dv_complete)
987 #define AHC_DV_SIMQ_FROZEN(ahc) \
988 ((((ahc)->platform_data->flags & AHC_DV_ACTIVE) != 0) \
989 && (ahc)->platform_data->qfrozen == 1)
990
991 /*********************** Transaction Access Wrappers *************************/
992 static __inline void ahc_cmd_set_transaction_status(Scsi_Cmnd *, uint32_t);
993 static __inline void ahc_set_transaction_status(struct scb *, uint32_t);
994 static __inline void ahc_cmd_set_scsi_status(Scsi_Cmnd *, uint32_t);
995 static __inline void ahc_set_scsi_status(struct scb *, uint32_t);
996 static __inline uint32_t ahc_cmd_get_transaction_status(Scsi_Cmnd *cmd);
997 static __inline uint32_t ahc_get_transaction_status(struct scb *);
998 static __inline uint32_t ahc_cmd_get_scsi_status(Scsi_Cmnd *cmd);
999 static __inline uint32_t ahc_get_scsi_status(struct scb *);
1000 static __inline void ahc_set_transaction_tag(struct scb *, int, u_int);
1001 static __inline u_long ahc_get_transfer_length(struct scb *);
1002 static __inline int ahc_get_transfer_dir(struct scb *);
1003 static __inline void ahc_set_residual(struct scb *, u_long);
1004 static __inline void ahc_set_sense_residual(struct scb *scb, u_long resid);
1005 static __inline u_long ahc_get_residual(struct scb *);
1006 static __inline u_long ahc_get_sense_residual(struct scb *);
1007 static __inline int ahc_perform_autosense(struct scb *);
1008 static __inline uint32_t ahc_get_sense_bufsize(struct ahc_softc *,
1009 struct scb *);
1010 static __inline void ahc_notify_xfer_settings_change(struct ahc_softc *,
1011 struct ahc_devinfo *);
1012 static __inline void ahc_platform_scb_free(struct ahc_softc *ahc,
1013 struct scb *scb);
1014 static __inline void ahc_freeze_scb(struct scb *scb);
1015
1016 static __inline
ahc_cmd_set_transaction_status(Scsi_Cmnd * cmd,uint32_t status)1017 void ahc_cmd_set_transaction_status(Scsi_Cmnd *cmd, uint32_t status)
1018 {
1019 cmd->result &= ~(CAM_STATUS_MASK << 16);
1020 cmd->result |= status << 16;
1021 }
1022
1023 static __inline
ahc_set_transaction_status(struct scb * scb,uint32_t status)1024 void ahc_set_transaction_status(struct scb *scb, uint32_t status)
1025 {
1026 ahc_cmd_set_transaction_status(scb->io_ctx,status);
1027 }
1028
1029 static __inline
ahc_cmd_set_scsi_status(Scsi_Cmnd * cmd,uint32_t status)1030 void ahc_cmd_set_scsi_status(Scsi_Cmnd *cmd, uint32_t status)
1031 {
1032 cmd->result &= ~0xFFFF;
1033 cmd->result |= status;
1034 }
1035
1036 static __inline
ahc_set_scsi_status(struct scb * scb,uint32_t status)1037 void ahc_set_scsi_status(struct scb *scb, uint32_t status)
1038 {
1039 ahc_cmd_set_scsi_status(scb->io_ctx, status);
1040 }
1041
1042 static __inline
ahc_cmd_get_transaction_status(Scsi_Cmnd * cmd)1043 uint32_t ahc_cmd_get_transaction_status(Scsi_Cmnd *cmd)
1044 {
1045 return ((cmd->result >> 16) & CAM_STATUS_MASK);
1046 }
1047
1048 static __inline
ahc_get_transaction_status(struct scb * scb)1049 uint32_t ahc_get_transaction_status(struct scb *scb)
1050 {
1051 return (ahc_cmd_get_transaction_status(scb->io_ctx));
1052 }
1053
1054 static __inline
ahc_cmd_get_scsi_status(Scsi_Cmnd * cmd)1055 uint32_t ahc_cmd_get_scsi_status(Scsi_Cmnd *cmd)
1056 {
1057 return (cmd->result & 0xFFFF);
1058 }
1059
1060 static __inline
ahc_get_scsi_status(struct scb * scb)1061 uint32_t ahc_get_scsi_status(struct scb *scb)
1062 {
1063 return (ahc_cmd_get_scsi_status(scb->io_ctx));
1064 }
1065
1066 static __inline
ahc_set_transaction_tag(struct scb * scb,int enabled,u_int type)1067 void ahc_set_transaction_tag(struct scb *scb, int enabled, u_int type)
1068 {
1069 /*
1070 * Nothing to do for linux as the incoming transaction
1071 * has no concept of tag/non tagged, etc.
1072 */
1073 }
1074
1075 static __inline
ahc_get_transfer_length(struct scb * scb)1076 u_long ahc_get_transfer_length(struct scb *scb)
1077 {
1078 return (scb->platform_data->xfer_len);
1079 }
1080
1081 static __inline
ahc_get_transfer_dir(struct scb * scb)1082 int ahc_get_transfer_dir(struct scb *scb)
1083 {
1084 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,40)
1085 return (scb->io_ctx->sc_data_direction);
1086 #else
1087 if (scb->io_ctx->bufflen == 0)
1088 return (CAM_DIR_NONE);
1089
1090 switch(scb->io_ctx->cmnd[0]) {
1091 case 0x08: /* READ(6) */
1092 case 0x28: /* READ(10) */
1093 case 0xA8: /* READ(12) */
1094 return (CAM_DIR_IN);
1095 case 0x0A: /* WRITE(6) */
1096 case 0x2A: /* WRITE(10) */
1097 case 0xAA: /* WRITE(12) */
1098 return (CAM_DIR_OUT);
1099 default:
1100 return (CAM_DIR_NONE);
1101 }
1102 #endif
1103 }
1104
1105 static __inline
ahc_set_residual(struct scb * scb,u_long resid)1106 void ahc_set_residual(struct scb *scb, u_long resid)
1107 {
1108 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
1109 scb->io_ctx->resid = resid;
1110 #else
1111 scb->platform_data->resid = resid;
1112 #endif
1113 }
1114
1115 static __inline
ahc_set_sense_residual(struct scb * scb,u_long resid)1116 void ahc_set_sense_residual(struct scb *scb, u_long resid)
1117 {
1118 scb->platform_data->sense_resid = resid;
1119 }
1120
1121 static __inline
ahc_get_residual(struct scb * scb)1122 u_long ahc_get_residual(struct scb *scb)
1123 {
1124 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
1125 return (scb->io_ctx->resid);
1126 #else
1127 return (scb->platform_data->resid);
1128 #endif
1129 }
1130
1131 static __inline
ahc_get_sense_residual(struct scb * scb)1132 u_long ahc_get_sense_residual(struct scb *scb)
1133 {
1134 return (scb->platform_data->sense_resid);
1135 }
1136
1137 static __inline
ahc_perform_autosense(struct scb * scb)1138 int ahc_perform_autosense(struct scb *scb)
1139 {
1140 /*
1141 * We always perform autosense in Linux.
1142 * On other platforms this is set on a
1143 * per-transaction basis.
1144 */
1145 return (1);
1146 }
1147
1148 static __inline uint32_t
ahc_get_sense_bufsize(struct ahc_softc * ahc,struct scb * scb)1149 ahc_get_sense_bufsize(struct ahc_softc *ahc, struct scb *scb)
1150 {
1151 return (sizeof(struct scsi_sense_data));
1152 }
1153
1154 static __inline void
ahc_notify_xfer_settings_change(struct ahc_softc * ahc,struct ahc_devinfo * devinfo)1155 ahc_notify_xfer_settings_change(struct ahc_softc *ahc,
1156 struct ahc_devinfo *devinfo)
1157 {
1158 /* Nothing to do here for linux */
1159 }
1160
1161 static __inline void
ahc_platform_scb_free(struct ahc_softc * ahc,struct scb * scb)1162 ahc_platform_scb_free(struct ahc_softc *ahc, struct scb *scb)
1163 {
1164 ahc->flags &= ~AHC_RESOURCE_SHORTAGE;
1165 }
1166
1167 int ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg);
1168 void ahc_platform_free(struct ahc_softc *ahc);
1169 void ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
1170
1171 static __inline void
ahc_freeze_scb(struct scb * scb)1172 ahc_freeze_scb(struct scb *scb)
1173 {
1174 if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
1175 scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
1176 scb->platform_data->dev->qfrozen++;
1177 }
1178 }
1179
1180 void ahc_platform_set_tags(struct ahc_softc *ahc,
1181 struct ahc_devinfo *devinfo, ahc_queue_alg);
1182 int ahc_platform_abort_scbs(struct ahc_softc *ahc, int target,
1183 char channel, int lun, u_int tag,
1184 role_t role, uint32_t status);
1185 irqreturn_t
1186 ahc_linux_isr(int irq, void *dev_id, struct pt_regs * regs);
1187 void ahc_platform_flushwork(struct ahc_softc *ahc);
1188 int ahc_softc_comp(struct ahc_softc *, struct ahc_softc *);
1189 void ahc_done(struct ahc_softc*, struct scb*);
1190 void ahc_send_async(struct ahc_softc *, char channel,
1191 u_int target, u_int lun, ac_code, void *);
1192 void ahc_print_path(struct ahc_softc *, struct scb *);
1193 void ahc_platform_dump_card_state(struct ahc_softc *ahc);
1194
1195 #ifdef CONFIG_PCI
1196 #define AHC_PCI_CONFIG 1
1197 #else
1198 #define AHC_PCI_CONFIG 0
1199 #endif
1200 #define bootverbose aic7xxx_verbose
1201 extern u_int aic7xxx_verbose;
1202 #endif /* _AIC7XXX_LINUX_H_ */
1203