[PATCH v3 next 07/10] lib: mul_u64_u64_div_u64() optimise multiply on 32bit x86

From: David Laight
Date: Sat Jun 14 2025 - 05:55:11 EST


gcc generates horrid code for both ((u64)u32_a * u32_b) and (u64_a + u32_b).
As well as the extra instructions it can generate a lot of spills to stack
(including spills of constant zeros and even multiplies by constant zero).

mul_u32_u32() already exists to optimise the multiply.
Add a similar add_u64_32() for the addition.
Disable both for clang - it generates better code without them.

Use mul_u32_u32() and add_u64_u32() in the 64x64 => 128 multiply
in mul_u64_add_u64_div_u64().

Tested by forcing the amd64 build of test_mul_u64_u64_div_u64.ko
to use the 32bit asm code.

Signed-off-by: David Laight <david.laight.linux@xxxxxxxxx>
---

New patch for v3.

arch/x86/include/asm/div64.h | 14 ++++++++++++++
include/linux/math64.h | 11 +++++++++++
lib/math/div64.c | 18 ++++++++++++------
3 files changed, 37 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
index 7a0a916a2d7d..4a4c29e8602d 100644
--- a/arch/x86/include/asm/div64.h
+++ b/arch/x86/include/asm/div64.h
@@ -60,6 +60,7 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
}
#define div_u64_rem div_u64_rem

+#ifndef __clang__
static inline u64 mul_u32_u32(u32 a, u32 b)
{
u32 high, low;
@@ -71,6 +72,19 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
}
#define mul_u32_u32 mul_u32_u32

+static inline u64 add_u64_u32(u64 a, u32 b)
+{
+ u32 high = a >> 32, low = a;
+
+ asm ("addl %[b], %[low]; adcl $0, %[high]"
+ : [low] "+r" (low), [high] "+r" (high)
+ : [b] "rm" (b) );
+
+ return low | (u64)high << 32;
+}
+#define add_u64_u32 add_u64_u32
+#endif
+
/*
* __div64_32() is never called on x86, so prevent the
* generic definition from getting built.
diff --git a/include/linux/math64.h b/include/linux/math64.h
index e1c2e3642cec..5e497836e975 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -158,6 +158,17 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
}
#endif

+#ifndef add_u64_u32
+/*
+ * Many a GCC version also messes this up.
+ * Zero extending b and then spilling everything to stack.
+ */
+static inline u64 add_u64_u32(u64 a, u32 b)
+{
+ return a + b;
+}
+#endif
+
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)

#ifndef mul_u64_u32_shr
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 22433e5565c4..2ac7e25039a1 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -187,6 +187,12 @@ EXPORT_SYMBOL(iter_div_u64_rem);
#endif

#if !defined(mul_u64_add_u64_div_u64) || defined(test_mul_u64_add_u64_div_u64)
+
+static u64 mul_add(u32 a, u32 b, u32 c)
+{
+ return add_u64_u32(mul_u32_u32(a, b), c);
+}
+
u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
{
if (WARN_ONCE(!d, "%s: division of (%#llx * %#llx + %#llx) by zero, returning 0",
@@ -211,12 +217,12 @@ u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
u64 x, y, z;

/* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */
- x = (u64)a_lo * b_lo + (u32)c;
- y = (u64)a_lo * b_hi + (u32)(c >> 32);
- y += (u32)(x >> 32);
- z = (u64)a_hi * b_hi + (u32)(y >> 32);
- y = (u64)a_hi * b_lo + (u32)y;
- z += (u32)(y >> 32);
+ x = mul_add(a_lo, b_lo, c);
+ y = mul_add(a_lo, b_hi, c >> 32);
+ y = add_u64_u32(y, x >> 32);
+ z = mul_add(a_hi, b_hi, y >> 32);
+ y = mul_add(a_hi, b_lo, y);
+ z = add_u64_u32(z, y >> 32);
x = (y << 32) + (u32)x;

u64 n_lo = x, n_hi = z;
--
2.39.5