1 /*
2  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
3  * of PCI-SCSI IO processors.
4  *
5  * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
6  *
7  * This driver is derived from the Linux sym53c8xx driver.
8  * Copyright (C) 1998-2000  Gerard Roudier
9  *
10  * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
11  * a port of the FreeBSD ncr driver to Linux-1.2.13.
12  *
13  * The original ncr driver has been written for 386bsd and FreeBSD by
14  *         Wolfgang Stanglmeier        <wolf@cologne.de>
15  *         Stefan Esser                <se@mi.Uni-Koeln.de>
16  * Copyright (C) 1994  Wolfgang Stanglmeier
17  *
18  * Other major contributions:
19  *
20  * NVRAM detection and reading.
21  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
22  *
23  *-----------------------------------------------------------------------------
24  *
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions
27  * are met:
28  * 1. Redistributions of source code must retain the above copyright
29  *    notice, this list of conditions and the following disclaimer.
30  * 2. The name of the author may not be used to endorse or promote products
31  *    derived from this software without specific prior written permission.
32  *
33  * Where this Software is combined with software released under the terms of
34  * the GNU Public License ("GPL") and the terms of the GPL would require the
35  * combined work to also be released under the terms of the GPL, the terms
36  * and conditions of this License will apply in addition to those of the
37  * GPL with the exception of any terms or conditions of this License that
38  * conflict with, or are expressly prohibited by, the GPL.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
44  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  */
52 
53 #ifndef SYM_MISC_H
54 #define SYM_MISC_H
55 
56 /*
57  *  A 'read barrier' flushes any data that have been prefetched
58  *  by the processor due to out of order execution. Such a barrier
59  *  must notably be inserted prior to looking at data that have
60  *  been DMAed, assuming that program does memory READs in proper
61  *  order and that the device ensured proper ordering of WRITEs.
62  *
63  *  A 'write barrier' prevents any previous WRITEs to pass further
64  *  WRITEs. Such barriers must be inserted each time another agent
65  *  relies on ordering of WRITEs.
66  *
67  *  Note that, due to posting of PCI memory writes, we also must
68  *  insert dummy PCI read transactions when some ordering involving
69  *  both directions over the PCI does matter. PCI transactions are
70  *  fully ordered in each direction.
71  *
72  *  IA32 processors insert implicit barriers when the processor
73  *  accesses unchacheable either for reading or writing, and
74  *  donnot reorder WRITEs. As a result, some 'read barriers' can
75  *  be avoided (following access to uncacheable), and 'write
76  *  barriers' should be useless (preventing compiler optimizations
77  *  should be enough).
78  */
79 
80 #if	defined	__i386__
81 #define __READ_BARRIER()	\
82 		__asm__ volatile("lock; addl $0,0(%%esp)": : :"memory")
83 #define __WRITE_BARRIER()	__asm__ volatile ("": : :"memory")
84 #elif	defined	__powerpc__
85 #define __READ_BARRIER()	__asm__ volatile("eieio; sync" : : : "memory")
86 #define __WRITE_BARRIER()	__asm__ volatile("eieio; sync" : : : "memory")
87 #elif	defined	__ia64__
88 #define __READ_BARRIER()	__asm__ volatile("mf.a; mf" : : : "memory")
89 #define __WRITE_BARRIER()	__asm__ volatile("mf.a; mf" : : : "memory")
90 #elif	defined	__alpha__
91 #define __READ_BARRIER()	__asm__ volatile("mb": : :"memory")
92 #define __WRITE_BARRIER()	__asm__ volatile("mb": : :"memory")
93 #else
94 #define __READ_BARRIER()	mb()
95 #define __WRITE_BARRIER()	mb()
96 #endif
97 
98 #ifndef MEMORY_READ_BARRIER
99 #define MEMORY_READ_BARRIER()	__READ_BARRIER()
100 #endif
101 #ifndef MEMORY_WRITE_BARRIER
102 #define MEMORY_WRITE_BARRIER()	__WRITE_BARRIER()
103 #endif
104 
105 
106 /*
107  *  A la VMS/CAM-3 queue management.
108  */
109 typedef struct sym_quehead {
110 	struct sym_quehead *flink;	/* Forward  pointer */
111 	struct sym_quehead *blink;	/* Backward pointer */
112 } SYM_QUEHEAD;
113 
114 #define sym_que_init(ptr) do { \
115 	(ptr)->flink = (ptr); (ptr)->blink = (ptr); \
116 } while (0)
117 
sym_que_first(struct sym_quehead * head)118 static __inline struct sym_quehead *sym_que_first(struct sym_quehead *head)
119 {
120 	return (head->flink == head) ? 0 : head->flink;
121 }
122 
sym_que_last(struct sym_quehead * head)123 static __inline struct sym_quehead *sym_que_last(struct sym_quehead *head)
124 {
125 	return (head->blink == head) ? 0 : head->blink;
126 }
127 
__sym_que_add(struct sym_quehead * new,struct sym_quehead * blink,struct sym_quehead * flink)128 static __inline void __sym_que_add(struct sym_quehead * new,
129 	struct sym_quehead * blink,
130 	struct sym_quehead * flink)
131 {
132 	flink->blink	= new;
133 	new->flink	= flink;
134 	new->blink	= blink;
135 	blink->flink	= new;
136 }
137 
__sym_que_del(struct sym_quehead * blink,struct sym_quehead * flink)138 static __inline void __sym_que_del(struct sym_quehead * blink,
139 	struct sym_quehead * flink)
140 {
141 	flink->blink = blink;
142 	blink->flink = flink;
143 }
144 
sym_que_empty(struct sym_quehead * head)145 static __inline int sym_que_empty(struct sym_quehead *head)
146 {
147 	return head->flink == head;
148 }
149 
sym_que_splice(struct sym_quehead * list,struct sym_quehead * head)150 static __inline void sym_que_splice(struct sym_quehead *list,
151 	struct sym_quehead *head)
152 {
153 	struct sym_quehead *first = list->flink;
154 
155 	if (first != list) {
156 		struct sym_quehead *last = list->blink;
157 		struct sym_quehead *at   = head->flink;
158 
159 		first->blink = head;
160 		head->flink  = first;
161 
162 		last->flink = at;
163 		at->blink   = last;
164 	}
165 }
166 
sym_que_move(struct sym_quehead * orig,struct sym_quehead * dest)167 static __inline void sym_que_move(struct sym_quehead *orig,
168 	struct sym_quehead *dest)
169 {
170 	struct sym_quehead *first, *last;
171 
172 	first = orig->flink;
173 	if (first != orig) {
174 		first->blink = dest;
175 		dest->flink  = first;
176 		last = orig->blink;
177 		last->flink  = dest;
178 		dest->blink  = last;
179 		orig->flink  = orig;
180 		orig->blink  = orig;
181 	} else {
182 		dest->flink  = dest;
183 		dest->blink  = dest;
184 	}
185 }
186 
187 #define sym_que_entry(ptr, type, member) \
188 	((type *)((char *)(ptr)-(unsigned int)(&((type *)0)->member)))
189 
190 
191 #define sym_insque(new, pos)		__sym_que_add(new, pos, (pos)->flink)
192 
193 #define sym_remque(el)			__sym_que_del((el)->blink, (el)->flink)
194 
195 #define sym_insque_head(new, head)	__sym_que_add(new, head, (head)->flink)
196 
sym_remque_head(struct sym_quehead * head)197 static __inline struct sym_quehead *sym_remque_head(struct sym_quehead *head)
198 {
199 	struct sym_quehead *elem = head->flink;
200 
201 	if (elem != head)
202 		__sym_que_del(head, elem->flink);
203 	else
204 		elem = 0;
205 	return elem;
206 }
207 
208 #define sym_insque_tail(new, head)	__sym_que_add(new, (head)->blink, head)
209 
sym_remque_tail(struct sym_quehead * head)210 static __inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head)
211 {
212 	struct sym_quehead *elem = head->blink;
213 
214 	if (elem != head)
215 		__sym_que_del(elem->blink, head);
216 	else
217 		elem = 0;
218 	return elem;
219 }
220 
221 /*
222  *  This one may be useful.
223  */
224 #define FOR_EACH_QUEUED_ELEMENT(head, qp) \
225 	for (qp = (head)->flink; qp != (head); qp = qp->flink)
226 /*
227  *  FreeBSD does not offer our kind of queue in the CAM CCB.
228  *  So, we have to cast.
229  */
230 #define sym_qptr(p)	((struct sym_quehead *) (p))
231 
232 /*
233  *  Simple bitmap operations.
234  */
235 #define sym_set_bit(p, n)	(((u32 *)(p))[(n)>>5] |=  (1<<((n)&0x1f)))
236 #define sym_clr_bit(p, n)	(((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f)))
237 #define sym_is_bit(p, n)	(((u32 *)(p))[(n)>>5] &   (1<<((n)&0x1f)))
238 
239 /*
240  *  Portable but silly implemented byte order primitives.
241  */
242 #if	BYTE_ORDER == BIG_ENDIAN
243 
244 #define __revb16(x) (	(((u16)(x) & (u16)0x00ffU) << 8) | \
245 			(((u16)(x) & (u16)0xff00U) >> 8) 	)
246 #define __revb32(x) (	(((u32)(x) & 0x000000ffU) << 24) | \
247 			(((u32)(x) & 0x0000ff00U) <<  8) | \
248 			(((u32)(x) & 0x00ff0000U) >>  8) | \
249 			(((u32)(x) & 0xff000000U) >> 24)	)
250 
251 #define __htole16(v)	__revb16(v)
252 #define __htole32(v)	__revb32(v)
253 #define __le16toh(v)	__htole16(v)
254 #define __le32toh(v)	__htole32(v)
255 
_htole16(u16 v)256 static __inline u16	_htole16(u16 v) { return __htole16(v); }
_htole32(u32 v)257 static __inline u32	_htole32(u32 v) { return __htole32(v); }
258 #define _le16toh	_htole16
259 #define _le32toh	_htole32
260 
261 #else	/* LITTLE ENDIAN */
262 
263 #define __htole16(v)	(v)
264 #define __htole32(v)	(v)
265 #define __le16toh(v)	(v)
266 #define __le32toh(v)	(v)
267 
268 #define _htole16(v)	(v)
269 #define _htole32(v)	(v)
270 #define _le16toh(v)	(v)
271 #define _le32toh(v)	(v)
272 
273 #endif	/* BYTE_ORDER */
274 
275 /*
276  * The below round up/down macros are to be used with a constant
277  * as argument (sizeof(...) for example), for the compiler to
278  * optimize the whole thing.
279  */
280 #define _U_(a,m)	(a)<=(1<<m)?m:
281 #define _D_(a,m)	(a)<(1<<(m+1))?m:
282 
283 /*
284  * Round up logarithm to base 2 of a 16 bit constant.
285  */
286 #define _LGRU16_(a) \
287 ( \
288  _U_(a, 0)_U_(a, 1)_U_(a, 2)_U_(a, 3)_U_(a, 4)_U_(a, 5)_U_(a, 6)_U_(a, 7) \
289  _U_(a, 8)_U_(a, 9)_U_(a,10)_U_(a,11)_U_(a,12)_U_(a,13)_U_(a,14)_U_(a,15) \
290  16)
291 
292 /*
293  * Round down logarithm to base 2 of a 16 bit constant.
294  */
295 #define _LGRD16_(a) \
296 ( \
297  _D_(a, 0)_D_(a, 1)_D_(a, 2)_D_(a, 3)_D_(a, 4)_D_(a, 5)_D_(a, 6)_D_(a, 7) \
298  _D_(a, 8)_D_(a, 9)_D_(a,10)_D_(a,11)_D_(a,12)_D_(a,13)_D_(a,14)_D_(a,15) \
299  16)
300 
301 /*
302  * Round up a 16 bit constant to the nearest power of 2.
303  */
304 #define _SZRU16_(a) ((a)==0?0:(1<<_LGRU16_(a)))
305 
306 /*
307  * Round down a 16 bit constant to the nearest power of 2.
308  */
309 #define _SZRD16_(a) ((a)==0?0:(1<<_LGRD16_(a)))
310 
311 #endif /* SYM_MISC_H */
312