add an inlined version of iter_div_u64_rem

iter_div_u64_rem is used in the x86-64 vdso, which cannot call other
kernel code.  For this case, provide the always_inlined version,
__iter_div_u64_rem.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Jeremy Fitzhardinge 2008-06-12 10:47:58 +02:00 committed by Ingo Molnar
parent f595ec964d
commit d5e181f78a
2 changed files with 20 additions and 14 deletions

View file

@ -83,4 +83,23 @@ static inline s64 div_s64(s64 dividend, s32 divisor)
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
static __always_inline u32
__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
{
u32 ret = 0;
while (dividend >= divisor) {
/* The following asm() prevents the compiler from
optimising this loop into a modulo operation. */
asm("" : "+rm"(dividend));
dividend -= divisor;
ret++;
}
*remainder = dividend;
return ret;
}
#endif /* _LINUX_MATH64_H */ #endif /* _LINUX_MATH64_H */

View file

@ -105,19 +105,6 @@ EXPORT_SYMBOL(div64_u64);
*/ */
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
{ {
u32 ret = 0; return __iter_div_u64_rem(dividend, divisor, remainder);
while (dividend >= divisor) {
/* The following asm() prevents the compiler from
optimising this loop into a modulo operation. */
asm("" : "+rm"(dividend));
dividend -= divisor;
ret++;
}
*remainder = dividend;
return ret;
} }
EXPORT_SYMBOL(iter_div_u64_rem); EXPORT_SYMBOL(iter_div_u64_rem);