1 : #ifndef _I386_BITOPS_H
2 : #define _I386_BITOPS_H
3 :
4 : /*
5 : * Copyright 1992, Linus Torvalds.
6 : */
7 :
8 : #include <linux/config.h>
9 :
10 : /*
11 : * These have to be done with inline assembly: that way the bit-setting
12 : * is guaranteed to be atomic. All bit operations return 0 if the bit
13 : * was cleared before the operation and != 0 if it was not.
14 : *
15 : * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
16 : */
17 :
18 : #ifdef CONFIG_SMP
19 : #define LOCK_PREFIX "lock ; "
20 : #else
21 : #define LOCK_PREFIX ""
22 : #endif
23 :
24 : #define ADDR (*(volatile long *) addr)
25 :
26 : /**
27 : * set_bit - Atomically set a bit in memory
28 : * @nr: the bit to set
29 : * @addr: the address to start counting from
30 : *
31 : * This function is atomic and may not be reordered. See __set_bit()
32 : * if you do not require the atomic guarantees.
33 : * Note that @nr may be almost arbitrarily large; this function is not
34 : * restricted to acting on a single-word quantity.
35 : */
36 : static __inline__ void set_bit(int nr, volatile void * addr)
37 : {
38 : __asm__ __volatile__( LOCK_PREFIX
39 : "btsl %1,%0"
40 : :"=m" (ADDR)
41 : :"Ir" (nr));
42 : }
43 :
44 : /**
45 : * __set_bit - Set a bit in memory
46 : * @nr: the bit to set
47 : * @addr: the address to start counting from
48 : *
49 : * Unlike set_bit(), this function is non-atomic and may be reordered.
50 : * If it's called on the same region of memory simultaneously, the effect
51 : * may be that only one operation succeeds.
52 : */
53 : static __inline__ void __set_bit(int nr, volatile void * addr)
54 : {
55 : __asm__(
56 : "btsl %1,%0"
57 : :"=m" (ADDR)
58 : :"Ir" (nr));
59 : }
60 :
61 : /**
62 : * clear_bit - Clears a bit in memory
63 : * @nr: Bit to clear
64 : * @addr: Address to start counting from
65 : *
66 : * clear_bit() is atomic and may not be reordered. However, it does
67 : * not contain a memory barrier, so if it is used for locking purposes,
68 : * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
69 : * in order to ensure changes are visible on other processors.
70 : */
71 : static __inline__ void clear_bit(int nr, volatile void * addr)
72 : {
73 : __asm__ __volatile__( LOCK_PREFIX
74 : "btrl %1,%0"
75 : :"=m" (ADDR)
76 22 : :"Ir" (nr));
77 : }
78 : #define smp_mb__before_clear_bit() barrier()
79 : #define smp_mb__after_clear_bit() barrier()
80 :
81 : /**
82 : * __change_bit - Toggle a bit in memory
83 : * @nr: the bit to change
84 : * @addr: the address to start counting from
85 : *
86 : * Unlike change_bit(), this function is non-atomic and may be reordered.
87 : * If it's called on the same region of memory simultaneously, the effect
88 : * may be that only one operation succeeds.
89 : */
90 : static __inline__ void __change_bit(int nr, volatile void * addr)
91 : {
92 : __asm__ __volatile__(
93 : "btcl %1,%0"
94 : :"=m" (ADDR)
95 : :"Ir" (nr));
96 : }
97 :
98 : /**
99 : * change_bit - Toggle a bit in memory
100 : * @nr: Bit to change
101 : * @addr: Address to start counting from
102 : *
103 : * change_bit() is atomic and may not be reordered.
104 : * Note that @nr may be almost arbitrarily large; this function is not
105 : * restricted to acting on a single-word quantity.
106 : */
107 : static __inline__ void change_bit(int nr, volatile void * addr)
108 : {
109 : __asm__ __volatile__( LOCK_PREFIX
110 : "btcl %1,%0"
111 : :"=m" (ADDR)
112 : :"Ir" (nr));
113 : }
114 :
115 : /**
116 : * test_and_set_bit - Set a bit and return its old value
117 : * @nr: Bit to set
118 : * @addr: Address to count from
119 : *
120 : * This operation is atomic and cannot be reordered.
121 : * It also implies a memory barrier.
122 : */
123 : static __inline__ int test_and_set_bit(int nr, volatile void * addr)
124 : {
125 : int oldbit;
126 :
127 : __asm__ __volatile__( LOCK_PREFIX
128 : "btsl %2,%1\n\tsbbl %0,%0"
129 : :"=r" (oldbit),"=m" (ADDR)
130 10 : :"Ir" (nr) : "memory");
131 : return oldbit;
132 : }
133 :
134 : /**
135 : * __test_and_set_bit - Set a bit and return its old value
136 : * @nr: Bit to set
137 : * @addr: Address to count from
138 : *
139 : * This operation is non-atomic and can be reordered.
140 : * If two examples of this operation race, one can appear to succeed
141 : * but actually fail. You must protect multiple accesses with a lock.
142 : */
143 : static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
144 : {
145 : int oldbit;
146 :
147 : __asm__(
148 : "btsl %2,%1\n\tsbbl %0,%0"
149 : :"=r" (oldbit),"=m" (ADDR)
150 : :"Ir" (nr));
151 : return oldbit;
152 : }
153 :
154 : /**
155 : * test_and_clear_bit - Clear a bit and return its old value
156 : * @nr: Bit to clear
157 : * @addr: Address to count from
158 : *
159 : * This operation is atomic and cannot be reordered.
160 : * It also implies a memory barrier.
161 : */
162 : static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
163 : {
164 : int oldbit;
165 :
166 : __asm__ __volatile__( LOCK_PREFIX
167 : "btrl %2,%1\n\tsbbl %0,%0"
168 : :"=r" (oldbit),"=m" (ADDR)
169 : :"Ir" (nr) : "memory");
170 : return oldbit;
171 : }
172 :
173 : /**
174 : * __test_and_clear_bit - Clear a bit and return its old value
175 : * @nr: Bit to clear
176 : * @addr: Address to count from
177 : *
178 : * This operation is non-atomic and can be reordered.
179 : * If two examples of this operation race, one can appear to succeed
180 : * but actually fail. You must protect multiple accesses with a lock.
181 : */
182 : static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
183 : {
184 : int oldbit;
185 :
186 : __asm__(
187 : "btrl %2,%1\n\tsbbl %0,%0"
188 : :"=r" (oldbit),"=m" (ADDR)
189 : :"Ir" (nr));
190 : return oldbit;
191 : }
192 :
193 : /* WARNING: non atomic and it can be reordered! */
194 : static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
195 : {
196 : int oldbit;
197 :
198 : __asm__ __volatile__(
199 : "btcl %2,%1\n\tsbbl %0,%0"
200 : :"=r" (oldbit),"=m" (ADDR)
201 : :"Ir" (nr) : "memory");
202 : return oldbit;
203 : }
204 :
205 : /**
206 : * test_and_change_bit - Change a bit and return its new value
207 : * @nr: Bit to change
208 : * @addr: Address to count from
209 : *
210 : * This operation is atomic and cannot be reordered.
211 : * It also implies a memory barrier.
212 : */
213 : static __inline__ int test_and_change_bit(int nr, volatile void * addr)
214 : {
215 : int oldbit;
216 :
217 : __asm__ __volatile__( LOCK_PREFIX
218 : "btcl %2,%1\n\tsbbl %0,%0"
219 : :"=r" (oldbit),"=m" (ADDR)
220 : :"Ir" (nr) : "memory");
221 : return oldbit;
222 : }
223 :
224 : #if 0 /* Fool kernel-doc since it doesn't do macros yet */
225 : /**
226 : * test_bit - Determine whether a bit is set
227 : * @nr: bit number to test
228 : * @addr: Address to start counting from
229 : */
230 : static int test_bit(int nr, const volatile void * addr);
231 : #endif
232 :
233 : static __inline__ int constant_test_bit(int nr, const volatile void * addr)
234 : {
235 4 : return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
236 : }
237 :
238 : static __inline__ int variable_test_bit(int nr, volatile void * addr)
239 : {
240 : int oldbit;
241 :
242 : __asm__ __volatile__(
243 : "btl %2,%1\n\tsbbl %0,%0"
244 : :"=r" (oldbit)
245 : :"m" (ADDR),"Ir" (nr));
246 : return oldbit;
247 : }
248 :
249 : #define test_bit(nr,addr) \
250 : (__builtin_constant_p(nr) ? \
251 : constant_test_bit((nr),(addr)) : \
252 : variable_test_bit((nr),(addr)))
253 :
254 : /**
255 : * find_first_zero_bit - find the first zero bit in a memory region
256 : * @addr: The address to start the search at
257 : * @size: The maximum size to search
258 : *
259 : * Returns the bit-number of the first zero bit, not the number of the byte
260 : * containing a bit.
261 : */
262 : static __inline__ int find_first_zero_bit(void * addr, unsigned size)
263 : {
264 : int d0, d1, d2;
265 : int res;
266 :
267 : if (!size)
268 : return 0;
269 : __asm__ __volatile__(
270 : "movl $-1,%%eax\n\t"
271 : "xorl %%edx,%%edx\n\t"
272 : "repe; scasl\n\t"
273 : "je 1f\n\t"
274 : "xorl -4(%%edi),%%eax\n\t"
275 : "subl $4,%%edi\n\t"
276 : "bsfl %%eax,%%edx\n"
277 : "1:\tsubl %%ebx,%%edi\n\t"
278 : "shll $3,%%edi\n\t"
279 : "addl %%edi,%%edx"
280 : :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
281 : :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
282 : return res;
283 : }
284 :
285 : /**
286 : * find_next_zero_bit - find the first zero bit in a memory region
287 : * @addr: The address to base the search on
288 : * @offset: The bitnumber to start searching at
289 : * @size: The maximum size to search
290 : */
291 : static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
292 : {
293 : unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
294 : int set = 0, bit = offset & 31, res;
295 :
296 : if (bit) {
297 : /*
298 : * Look for zero in first byte
299 : */
300 : __asm__("bsfl %1,%0\n\t"
301 : "jne 1f\n\t"
302 : "movl $32, %0\n"
303 : "1:"
304 : : "=r" (set)
305 : : "r" (~(*p >> bit)));
306 : if (set < (32 - bit))
307 : return set + offset;
308 : set = 32 - bit;
309 : p++;
310 : }
311 : /*
312 : * No zero yet, search remaining full bytes for a zero
313 : */
314 : res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
315 : return (offset + set + res);
316 : }
317 :
318 : /**
319 : * ffz - find first zero in word.
320 : * @word: The word to search
321 : *
322 : * Undefined if no zero exists, so code should check against ~0UL first.
323 : */
324 : static __inline__ unsigned long ffz(unsigned long word)
325 : {
326 : __asm__("bsfl %1,%0"
327 : :"=r" (word)
328 : :"r" (~word));
329 : return word;
330 : }
331 :
332 : #ifdef __KERNEL__
333 :
334 : /**
335 : * ffs - find first bit set
336 : * @x: the word to search
337 : *
338 : * This is defined the same way as
339 : * the libc and compiler builtin ffs routines, therefore
340 : * differs in spirit from the above ffz (man ffs).
341 : */
342 : static __inline__ int ffs(int x)
343 : {
344 : int r;
345 :
346 : __asm__("bsfl %1,%0\n\t"
347 : "jnz 1f\n\t"
348 : "movl $-1,%0\n"
349 : "1:" : "=r" (r) : "rm" (x));
350 : return r+1;
351 : }
352 :
353 : /**
354 : * hweightN - returns the hamming weight of a N-bit word
355 : * @x: the word to weigh
356 : *
357 : * The Hamming Weight of a number is the total number of bits set in it.
358 : */
359 :
360 : #define hweight32(x) generic_hweight32(x)
361 : #define hweight16(x) generic_hweight16(x)
362 : #define hweight8(x) generic_hweight8(x)
363 :
364 : #endif /* __KERNEL__ */
365 :
366 : #ifdef __KERNEL__
367 :
368 : #define ext2_set_bit __test_and_set_bit
369 : #define ext2_clear_bit __test_and_clear_bit
370 : #define ext2_test_bit test_bit
371 : #define ext2_find_first_zero_bit find_first_zero_bit
372 : #define ext2_find_next_zero_bit find_next_zero_bit
373 :
374 : /* Bitmap functions for the minix filesystem. */
375 : #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
376 : #define minix_set_bit(nr,addr) __set_bit(nr,addr)
377 : #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
378 : #define minix_test_bit(nr,addr) test_bit(nr,addr)
379 : #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
380 :
381 : #endif /* __KERNEL__ */
382 :
383 : #endif /* _I386_BITOPS_H */
|