1 /*
2  * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it would be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11  *
12  * Further, this software is distributed without any warranty that it is
13  * free of the rightful claim of any third person regarding infringement
14  * or the like.  Any license provided herein, whether implied or
15  * otherwise, applies only to this software file.  Patent licenses, if
16  * any, provided herein do not apply to combinations of this program with
17  * other software, or any other product whatsoever.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22  *
23  * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24  * Mountain View, CA  94043, or:
25  *
26  * http://www.sgi.com
27  *
28  * For further information regarding this notice, see:
29  *
30  * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31  */
32 #ifndef __XFS_ARCH_H__
33 #define __XFS_ARCH_H__
34 
35 #ifndef XFS_BIG_INUMS
36 # error XFS_BIG_INUMS must be defined true or false
37 #endif
38 
39 #ifdef __KERNEL__
40 
41 #include <asm/byteorder.h>
42 
43 #ifdef __LITTLE_ENDIAN
44 # define __BYTE_ORDER	__LITTLE_ENDIAN
45 #endif
46 #ifdef __BIG_ENDIAN
47 # define __BYTE_ORDER	__BIG_ENDIAN
48 #endif
49 
50 #endif	/* __KERNEL__ */
51 
52 /* do we need conversion? */
53 
54 #define ARCH_NOCONVERT 1
55 #if __BYTE_ORDER == __LITTLE_ENDIAN
56 # define ARCH_CONVERT	0
57 #else
58 # define ARCH_CONVERT	ARCH_NOCONVERT
59 #endif
60 
61 /* generic swapping macros */
62 
63 #ifndef HAVE_SWABMACROS
64 #define INT_SWAP16(type,var) ((typeof(type))(__swab16((__u16)(var))))
65 #define INT_SWAP32(type,var) ((typeof(type))(__swab32((__u32)(var))))
66 #define INT_SWAP64(type,var) ((typeof(type))(__swab64((__u64)(var))))
67 #endif
68 
69 #define INT_SWAP(type, var) \
70     ((sizeof(type) == 8) ? INT_SWAP64(type,var) : \
71     ((sizeof(type) == 4) ? INT_SWAP32(type,var) : \
72     ((sizeof(type) == 2) ? INT_SWAP16(type,var) : \
73     (var))))
74 
75 #define INT_SWAP_UNALIGNED_32(from,to) \
76     { \
77 	((__u8*)(to))[0] = ((__u8*)(from))[3]; \
78 	((__u8*)(to))[1] = ((__u8*)(from))[2]; \
79 	((__u8*)(to))[2] = ((__u8*)(from))[1]; \
80 	((__u8*)(to))[3] = ((__u8*)(from))[0]; \
81     }
82 
83 #define INT_SWAP_UNALIGNED_64(from,to) \
84     { \
85 	INT_SWAP_UNALIGNED_32( ((__u8*)(from)) + 4, ((__u8*)(to))); \
86 	INT_SWAP_UNALIGNED_32( ((__u8*)(from)), ((__u8*)(to)) + 4); \
87     }
88 
89 /*
90  * get and set integers from potentially unaligned locations
91  */
92 
93 #define INT_GET_UNALIGNED_16_LE(pointer) \
94    ((__u16)((((__u8*)(pointer))[0]	) | (((__u8*)(pointer))[1] << 8 )))
95 #define INT_GET_UNALIGNED_16_BE(pointer) \
96    ((__u16)((((__u8*)(pointer))[0] << 8) | (((__u8*)(pointer))[1])))
97 #define INT_SET_UNALIGNED_16_LE(pointer,value) \
98     { \
99 	((__u8*)(pointer))[0] = (((value)     ) & 0xff); \
100 	((__u8*)(pointer))[1] = (((value) >> 8) & 0xff); \
101     }
102 #define INT_SET_UNALIGNED_16_BE(pointer,value) \
103     { \
104 	((__u8*)(pointer))[0] = (((value) >> 8) & 0xff); \
105 	((__u8*)(pointer))[1] = (((value)     ) & 0xff); \
106     }
107 
108 #define INT_GET_UNALIGNED_32_LE(pointer) \
109    ((__u32)((((__u8*)(pointer))[0]	) | (((__u8*)(pointer))[1] << 8 ) \
110 	   |(((__u8*)(pointer))[2] << 16) | (((__u8*)(pointer))[3] << 24)))
111 #define INT_GET_UNALIGNED_32_BE(pointer) \
112    ((__u32)((((__u8*)(pointer))[0] << 24) | (((__u8*)(pointer))[1] << 16) \
113 	   |(((__u8*)(pointer))[2] << 8)  | (((__u8*)(pointer))[3]	)))
114 
115 #define INT_GET_UNALIGNED_64_LE(pointer) \
116    (((__u64)(INT_GET_UNALIGNED_32_LE(((__u8*)(pointer))+4)) << 32 ) \
117    |((__u64)(INT_GET_UNALIGNED_32_LE(((__u8*)(pointer))	 ))	  ))
118 #define INT_GET_UNALIGNED_64_BE(pointer) \
119    (((__u64)(INT_GET_UNALIGNED_32_BE(((__u8*)(pointer))	 )) << 32  ) \
120    |((__u64)(INT_GET_UNALIGNED_32_BE(((__u8*)(pointer))+4))	   ))
121 
122 /*
123  * now pick the right ones for our MACHINE ARCHITECTURE
124  */
125 
126 #if __BYTE_ORDER == __LITTLE_ENDIAN
127 #define INT_GET_UNALIGNED_16(pointer)	    INT_GET_UNALIGNED_16_LE(pointer)
128 #define INT_SET_UNALIGNED_16(pointer,value) INT_SET_UNALIGNED_16_LE(pointer,value)
129 #define INT_GET_UNALIGNED_32(pointer)	    INT_GET_UNALIGNED_32_LE(pointer)
130 #define INT_GET_UNALIGNED_64(pointer)	    INT_GET_UNALIGNED_64_LE(pointer)
131 #else
132 #define INT_GET_UNALIGNED_16(pointer)	    INT_GET_UNALIGNED_16_BE(pointer)
133 #define INT_SET_UNALIGNED_16(pointer,value) INT_SET_UNALIGNED_16_BE(pointer,value)
134 #define INT_GET_UNALIGNED_32(pointer)	    INT_GET_UNALIGNED_32_BE(pointer)
135 #define INT_GET_UNALIGNED_64(pointer)	    INT_GET_UNALIGNED_64_BE(pointer)
136 #endif
137 
138 /* define generic INT_ macros */
139 
140 #define INT_GET(reference,arch) \
141     (((arch) == ARCH_NOCONVERT) \
142 	? \
143 	    (reference) \
144 	: \
145 	    INT_SWAP((reference),(reference)) \
146     )
147 
148 /* does not return a value */
149 #define INT_SET(reference,arch,valueref) \
150     (__builtin_constant_p(valueref) ? \
151 	(void)( (reference) = ( ((arch) != ARCH_NOCONVERT) ? (INT_SWAP((reference),(valueref))) : (valueref)) ) : \
152 	(void)( \
153 	    ((reference) = (valueref)), \
154 	    ( ((arch) != ARCH_NOCONVERT) ? (reference) = INT_SWAP((reference),(reference)) : 0 ) \
155 	) \
156     )
157 
158 /* does not return a value */
159 #define INT_MOD_EXPR(reference,arch,code) \
160     (((arch) == ARCH_NOCONVERT) \
161 	? \
162 	    (void)((reference) code) \
163 	: \
164 	    (void)( \
165 		(reference) = INT_GET((reference),arch) , \
166 		((reference) code), \
167 		INT_SET(reference, arch, reference) \
168 	    ) \
169     )
170 
171 /* does not return a value */
172 #define INT_MOD(reference,arch,delta) \
173     (void)( \
174 	INT_MOD_EXPR(reference,arch,+=(delta)) \
175     )
176 
177 /*
178  * INT_COPY - copy a value between two locations with the
179  *	      _same architecture_ but _potentially different sizes_
180  *
181  *	    if the types of the two parameters are equal or they are
182  *		in native architecture, a simple copy is done
183  *
184  *	    otherwise, architecture conversions are done
185  *
186  */
187 
188 /* does not return a value */
189 #define INT_COPY(dst,src,arch) \
190     ( \
191 	((sizeof(dst) == sizeof(src)) || ((arch) == ARCH_NOCONVERT)) \
192 	    ? \
193 		(void)((dst) = (src)) \
194 	    : \
195 		INT_SET(dst, arch, INT_GET(src, arch)) \
196     )
197 
198 /*
199  * INT_XLATE - copy a value in either direction between two locations
200  *	       with different architectures
201  *
202  *		    dir < 0	- copy from memory to buffer (native to arch)
203  *		    dir > 0	- copy from buffer to memory (arch to native)
204  */
205 
206 /* does not return a value */
207 #define INT_XLATE(buf,mem,dir,arch) {\
208     ASSERT(dir); \
209     if (dir>0) { \
210 	(mem)=INT_GET(buf, arch); \
211     } else { \
212 	INT_SET(buf, arch, mem); \
213     } \
214 }
215 
216 #define INT_ISZERO(reference,arch) \
217     ((reference) == 0)
218 
219 #define INT_ZERO(reference,arch) \
220     ((reference) = 0)
221 
222 #define INT_GET_UNALIGNED_16_ARCH(pointer,arch) \
223     ( ((arch) == ARCH_NOCONVERT) \
224 	? \
225 	    (INT_GET_UNALIGNED_16(pointer)) \
226 	: \
227 	    (INT_GET_UNALIGNED_16_BE(pointer)) \
228     )
229 #define INT_SET_UNALIGNED_16_ARCH(pointer,value,arch) \
230     if ((arch) == ARCH_NOCONVERT) { \
231 	INT_SET_UNALIGNED_16(pointer,value); \
232     } else { \
233 	INT_SET_UNALIGNED_16_BE(pointer,value); \
234     }
235 
236 #define DIRINO4_GET_ARCH(pointer,arch) \
237     ( ((arch) == ARCH_NOCONVERT) \
238 	? \
239 	    (INT_GET_UNALIGNED_32(pointer)) \
240 	: \
241 	    (INT_GET_UNALIGNED_32_BE(pointer)) \
242     )
243 
244 #if XFS_BIG_INUMS
245 #define DIRINO_GET_ARCH(pointer,arch) \
246     ( ((arch) == ARCH_NOCONVERT) \
247 	? \
248 	    (INT_GET_UNALIGNED_64(pointer)) \
249 	: \
250 	    (INT_GET_UNALIGNED_64_BE(pointer)) \
251     )
252 #else
253 /* MACHINE ARCHITECTURE dependent */
254 #if __BYTE_ORDER == __LITTLE_ENDIAN
255 #define DIRINO_GET_ARCH(pointer,arch) \
256     DIRINO4_GET_ARCH((((__u8*)pointer)+4),arch)
257 #else
258 #define DIRINO_GET_ARCH(pointer,arch) \
259     DIRINO4_GET_ARCH(pointer,arch)
260 #endif
261 #endif
262 
263 #define DIRINO_COPY_ARCH(from,to,arch) \
264     if ((arch) == ARCH_NOCONVERT) { \
265 	memcpy(to,from,sizeof(xfs_ino_t)); \
266     } else { \
267 	INT_SWAP_UNALIGNED_64(from,to); \
268     }
269 #define DIRINO4_COPY_ARCH(from,to,arch) \
270     if ((arch) == ARCH_NOCONVERT) { \
271 	memcpy(to,(((__u8*)from+4)),sizeof(xfs_dir2_ino4_t)); \
272     } else { \
273 	INT_SWAP_UNALIGNED_32(from,to); \
274     }
275 
276 #endif	/* __XFS_ARCH_H__ */
277