1 /*
2 * Copyright (C) 1996 Paul Mackerras.
3 * Adapted for ppc64 - Todd Inglett, Anton Blanchard
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11 #include <linux/kernel.h>
12 #include <asm/bitops.h>
13 #include <linux/fs.h>
14 #include <linux/ext2_fs.h>
15 #include <linux/locks.h>
16 #include <linux/quotaops.h>
17 #include <asm/ppcdebug.h>
18
19 #undef DEBUG_BITOPS
20
21 /*
22 * Bitops are weird when viewed on big-endian systems. They were designed
23 * on little endian so the size of the bitset doesn't matter (low order bytes
24 * come first) as long as the bit in question is valid.
25 *
26 * Bits are "tested" often using the C expression (val & (1<<nr)) so we do
27 * our best to stay compatible with that. The assumption is that val will
28 * be unsigned long for such tests. As such, we assume the bits are stored
29 * as an array of unsigned long (the usual case is a single unsigned long,
30 * of course). Here's an example bitset with bit numbering:
31 *
32 * |63..........0|127........64|195.......128|255.......196|
33 *
34 * This leads to two problems. First, if an int, short or char is passed as
35 * a bitset it will be a bad memory reference since we want to store in chunks
36 * of unsigned long (64 bits here) size. Second, since these could be char
37 * arrays we might have an alignment problem. We ease the alignment problem
38 * by actually doing the operation with 32 bit values yet preserving the
39 * 64 bit long layout as shown above. Got that? Good.
40 */
41
find_next_zero_bit(unsigned long * addr,unsigned long size,unsigned long offset)42 unsigned long find_next_zero_bit(unsigned long* addr, unsigned long size,
43 unsigned long offset)
44 {
45 unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
46 unsigned long result = offset & ~63UL;
47 unsigned long tmp;
48
49 if (offset >= size)
50 return size;
51 size -= result;
52 offset &= 63UL;
53
54 if (offset) {
55 tmp = *p++;
56 tmp |= ~0UL >> (64-offset);
57 if (size < 64)
58 goto found_first;
59 if (~tmp)
60 goto found_middle;
61 size -= 64;
62 result += 64;
63 }
64 while (size & ~63UL) {
65 if (~(tmp = *(p++)))
66 goto found_middle;
67 result += 64;
68 size -= 64;
69 }
70 if (!size)
71 return result;
72 tmp = *p;
73
74 found_first:
75 tmp |= ~0UL << size;
76 if (tmp == ~0UL)
77 return result+size;
78 found_middle:
79 return result + ffz(tmp);
80 }
81
find_next_bit(unsigned long * addr,unsigned long size,unsigned long offset)82 unsigned long find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset)
83 {
84 unsigned long *p = addr + (offset >> 6);
85 unsigned long result = offset & ~63UL;
86 unsigned long tmp;
87
88 if (offset >= size)
89 return size;
90 size -= result;
91 offset &= 63UL;
92 if (offset) {
93 tmp = *(p++);
94 tmp &= (~0UL << offset);
95 if (size < 64)
96 goto found_first;
97 if (tmp)
98 goto found_middle;
99 size -= 64;
100 result += 64;
101 }
102 while (size & ~63UL) {
103 if ((tmp = *(p++)))
104 goto found_middle;
105 result += 64;
106 size -= 64;
107 }
108 if (!size)
109 return result;
110 tmp = *p;
111
112 found_first:
113 tmp &= (~0UL >> (64 - size));
114 if (tmp == 0UL) /* Are any bits set? */
115 return result + size; /* Nope. */
116 found_middle:
117 return result + __ffs(tmp);
118 }
119
ext2_ilog2(unsigned int x)120 static __inline__ unsigned int ext2_ilog2(unsigned int x)
121 {
122 int lz;
123
124 asm("cntlzw %0,%1" : "=r" (lz) : "r" (x));
125 return 31 - lz;
126 }
127
ext2_ffz(unsigned int x)128 static __inline__ unsigned int ext2_ffz(unsigned int x)
129 {
130 u32 tempRC;
131 if ((x = ~x) == 0)
132 return 32;
133 tempRC = ext2_ilog2(x & -x);
134 return tempRC;
135 }
136
find_next_zero_le_bit(unsigned long * addr,unsigned long size,unsigned long offset)137 unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long
138 offset)
139 {
140 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
141 unsigned int result = offset & ~31;
142 unsigned int tmp;
143
144 if (offset >= size)
145 return size;
146 size -= result;
147 offset &= 31;
148 if (offset) {
149 tmp = cpu_to_le32p(p++);
150 tmp |= ~0U >> (32-offset); /* bug or feature ? */
151 if (size < 32)
152 goto found_first;
153 if (tmp != ~0)
154 goto found_middle;
155 size -= 32;
156 result += 32;
157 }
158 while (size >= 32) {
159 if ((tmp = cpu_to_le32p(p++)) != ~0)
160 goto found_middle;
161 result += 32;
162 size -= 32;
163 }
164 if (!size)
165 return result;
166 tmp = cpu_to_le32p(p);
167 found_first:
168 tmp |= ~0 << size;
169 if (tmp == ~0) /* Are any bits zero? */
170 return result + size; /* Nope. */
171 found_middle:
172 return result + ext2_ffz(tmp);
173 }
174
BUG_OUTLINE(char * file,unsigned line)175 void BUG_OUTLINE(char* file, unsigned line)
176 {
177 udbg_printf("BUG - kernel BUG at %s:%d! \n", __FILE__, __LINE__);
178 PPCDBG_ENTER_DEBUGGER();
179 printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__);
180 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR);
181 }
182
183