1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_GENERIC_DIV64_H 3 #define _ASM_GENERIC_DIV64_H 4 /* 5 * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com> 6 * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h 7 * 8 * Optimization for constant divisors on 32-bit machines: 9 * Copyright (C) 2006-2015 Nicolas Pitre 10 * 11 * The semantics of do_div() is, in C++ notation, observing that the name 12 * is a function-like macro and the n parameter has the semantics of a C++ 13 * reference: 14 * 15 * uint32_t do_div(uint64_t &n, uint32_t base) 16 * { 17 * uint32_t remainder = n % base; 18 * n = n / base; 19 * return remainder; 20 * } 21 * 22 * NOTE: macro parameter n is evaluated multiple times, 23 * beware of side effects! 24 */ 25 26 #include "../types.h" 27 #include "compiler.h" 28 #include "bitsperlong.h" 29 #if BITS_PER_LONG == 64 30 31 /** 32 * do_div - returns 2 values: calculate remainder and update new dividend 33 * @n: uint64_t dividend (will be updated) 34 * @base: uint32_t divisor 35 * 36 * Summary: 37 * ``uint32_t remainder = n % base;`` 38 * ``n = n / base;`` 39 * 40 * Return: (uint32_t)remainder 41 * 42 * NOTE: macro parameter @n is evaluated multiple times, 43 * beware of side effects! 44 */ 45 #define do_div(n, base) \ 46 ({ \ 47 uint32_t __base = (base); \ 48 uint32_t __rem; \ 49 __rem = ((uint64_t)(n)) % __base; \ 50 (n) = ((uint64_t)(n)) / __base; \ 51 __rem; \ 52 }) 53 54 #elif BITS_PER_LONG == 32 55 56 // #include <linux/log2.h> 57 58 // /* 59 // * If the divisor happens to be constant, we determine the appropriate 60 // * inverse at compile time to turn the division into a few inline 61 // * multiplications which ought to be much faster. 62 // * 63 // * (It is unfortunate that gcc doesn't perform all this internally.) 64 // */ 65 66 // #define __div64_const32(n, ___b) \ 67 // ({ \ 68 // /* \ 69 // * Multiplication by reciprocal of b: n / b = n * (p / b) / p \ 70 // * \ 71 // * We rely on the fact that most of this code gets optimized \ 72 // * away at compile time due to constant propagation and only \ 73 // * a few multiplication instructions should remain. \ 74 // * Hence this monstrous macro (static inline doesn't always \ 75 // * do the trick here). \ 76 // */ \ 77 // uint64_t ___res, ___x, ___t, ___m, ___n = (n); \ 78 // uint32_t ___p, ___bias; \ 79 // \ 80 // /* determine MSB of b */ \ 81 // ___p = 1 << ilog2(___b); \ 82 // \ 83 // /* compute m = ((p << 64) + b - 1) / b */ \ 84 // ___m = (~0ULL / ___b) * ___p; \ 85 // ___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b; \ 86 // \ 87 // /* one less than the dividend with highest result */ \ 88 // ___x = ~0ULL / ___b * ___b - 1; \ 89 // \ 90 // /* test our ___m with res = m * x / (p << 64) */ \ 91 // ___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32; \ 92 // ___t = ___res += (___m & 0xffffffff) * (___x >> 32); \ 93 // ___res += (___x & 0xffffffff) * (___m >> 32); \ 94 // ___t = (___res < ___t) ? (1ULL << 32) : 0; \ 95 // ___res = (___res >> 32) + ___t; \ 96 // ___res += (___m >> 32) * (___x >> 32); \ 97 // ___res /= ___p; \ 98 // \ 99 // /* Now sanitize and optimize what we've got. */ \ 100 // if (~0ULL % (___b / (___b & -___b)) == 0) { \ 101 // /* special case, can be simplified to ... */ \ 102 // ___n /= (___b & -___b); \ 103 // ___m = ~0ULL / (___b / (___b & -___b)); \ 104 // ___p = 1; \ 105 // ___bias = 1; \ 106 // } else if (___res != ___x / ___b) { \ 107 // /* \ 108 // * We can't get away without a bias to compensate \ 109 // * for bit truncation errors. To avoid it we'd need an \ 110 // * additional bit to represent m which would overflow \ 111 // * a 64-bit variable. \ 112 // * \ 113 // * Instead we do m = p / b and n / b = (n * m + m) / p. \ 114 // */ \ 115 // ___bias = 1; \ 116 // /* Compute m = (p << 64) / b */ \ 117 // ___m = (~0ULL / ___b) * ___p; \ 118 // ___m += ((~0ULL % ___b + 1) * ___p) / ___b; \ 119 // } else { \ 120 // /* \ 121 // * Reduce m / p, and try to clear bit 31 of m when \ 122 // * possible, otherwise that'll need extra overflow \ 123 // * handling later. \ 124 // */ \ 125 // uint32_t ___bits = -(___m & -___m); \ 126 // ___bits |= ___m >> 32; \ 127 // ___bits = (~___bits) << 1; \ 128 // /* \ 129 // * If ___bits == 0 then setting bit 31 is unavoidable. \ 130 // * Simply apply the maximum possible reduction in that \ 131 // * case. Otherwise the MSB of ___bits indicates the \ 132 // * best reduction we should apply. \ 133 // */ \ 134 // if (!___bits) { \ 135 // ___p /= (___m & -___m); \ 136 // ___m /= (___m & -___m); \ 137 // } else { \ 138 // ___p >>= ilog2(___bits); \ 139 // ___m >>= ilog2(___bits); \ 140 // } \ 141 // /* No bias needed. */ \ 142 // ___bias = 0; \ 143 // } \ 144 // \ 145 // /* \ 146 // * Now we have a combination of 2 conditions: \ 147 // * \ 148 // * 1) whether or not we need to apply a bias, and \ 149 // * \ 150 // * 2) whether or not there might be an overflow in the cross \ 151 // * product determined by (___m & ((1 << 63) | (1 << 31))). \ 152 // * \ 153 // * Select the best way to do (m_bias + m * n) / (1 << 64). \ 154 // * From now on there will be actual runtime code generated. \ 155 // */ \ 156 // ___res = __arch_xprod_64(___m, ___n, ___bias); \ 157 // \ 158 // ___res /= ___p; \ 159 // }) 160 161 // #ifndef __arch_xprod_64 162 // /* 163 // * Default C implementation for __arch_xprod_64() 164 // * 165 // * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) 166 // * Semantic: retval = ((bias ? m : 0) + m * n) >> 64 167 // * 168 // * The product is a 128-bit value, scaled down to 64 bits. 169 // * Assuming constant propagation to optimize away unused conditional code. 170 // * Architectures may provide their own optimized assembly implementation. 171 // */ 172 // static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) 173 // { 174 // uint32_t m_lo = m; 175 // uint32_t m_hi = m >> 32; 176 // uint32_t n_lo = n; 177 // uint32_t n_hi = n >> 32; 178 // uint64_t res; 179 // uint32_t res_lo, res_hi, tmp; 180 181 // if (!bias) { 182 // res = ((uint64_t)m_lo * n_lo) >> 32; 183 // } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) { 184 // /* there can't be any overflow here */ 185 // res = (m + (uint64_t)m_lo * n_lo) >> 32; 186 // } else { 187 // res = m + (uint64_t)m_lo * n_lo; 188 // res_lo = res >> 32; 189 // res_hi = (res_lo < m_hi); 190 // res = res_lo | ((uint64_t)res_hi << 32); 191 // } 192 193 // if (!(m & ((1ULL << 63) | (1ULL << 31)))) { 194 // /* there can't be any overflow here */ 195 // res += (uint64_t)m_lo * n_hi; 196 // res += (uint64_t)m_hi * n_lo; 197 // res >>= 32; 198 // } else { 199 // res += (uint64_t)m_lo * n_hi; 200 // tmp = res >> 32; 201 // res += (uint64_t)m_hi * n_lo; 202 // res_lo = res >> 32; 203 // res_hi = (res_lo < tmp); 204 // res = res_lo | ((uint64_t)res_hi << 32); 205 // } 206 207 // res += (uint64_t)m_hi * n_hi; 208 209 // return res; 210 // } 211 // #endif 212 213 // #ifndef __div64_32 214 // extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); 215 // #endif 216 217 // /* The unnecessary pointer compare is there 218 // * to check for type safety (n must be 64bit) 219 // */ 220 // #define do_div(n, base) \ 221 // ({ \ 222 // uint32_t __base = (base); \ 223 // uint32_t __rem; \ 224 // (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \ 225 // if (__builtin_constant_p(__base) && is_power_of_2(__base)) { \ 226 // __rem = (n) & (__base - 1); \ 227 // (n) >>= ilog2(__base); \ 228 // } else if (__builtin_constant_p(__base) && __base != 0) { \ 229 // uint32_t __res_lo, __n_lo = (n); \ 230 // (n) = __div64_const32(n, __base); \ 231 // /* the remainder can be computed with 32-bit regs */ \ 232 // __res_lo = (n); \ 233 // __rem = __n_lo - __res_lo * __base; \ 234 // } else if (likely(((n) >> 32) == 0)) { \ 235 // __rem = (uint32_t)(n) % __base; \ 236 // (n) = (uint32_t)(n) / __base; \ 237 // } else { \ 238 // __rem = __div64_32(&(n), __base); \ 239 // } \ 240 // __rem; \ 241 // }) 242 243 244 #else /* BITS_PER_LONG == ?? */ 245 246 #error do_div() does not yet support the C64 247 248 #endif /* BITS_PER_LONG */ 249 250 #endif /* _ASM_GENERIC_DIV64_H */ 251