How to reproduce:
Configure examples/simple-integers-bgvrns
with parameters.SetKeySwitchTechnique(BV);
and parameters.SetDigitSize(32);
, and the result would be
#1 + #2 + #3: ( 5 6 9 10 15 18 21 24 27 30 33 36 ...
#1 * #2 * #3: ( -24019 19556 19948 26485 23230 27308
This is not the result of noise overflow, but the result of integer overflow in ubintnat.cpp
// src/core/include/lattice/hal/default/poly-impl.h
xDigit.SetValues(x.GetValues().GetDigitAtIndexForBase(i + 1, 1 << baseBits), x.GetFormat());
// src/core/include/math/hal/intnat/ubintnat.h
usint GetDigitAtIndexForBase(usint index, usint base) const;
Note that 1 is by default int32 and usint is also u32, so 1 << 32 will overflow. The problem is that GetDigitAtIndexForBase
should return uint64_t instead of usint. Or we should forbid the user from using digitSize >= 32 when validating cryptoparams (however, for digitSize = 0, base = qi, but log(qi) is often larger than 32 only allowing digitSize in (0, 31] or log(qi) is quite wierd).
The following patch (neglected other function signature changes of other headers) would fix the case for digitSize = 32, but fixes for digitSize > 32 would require an global API change for GetDigitAtIndexForBase
.
--- a/src/core/include/lattice/hal/default/poly-impl.h
+++ b/src/core/include/lattice/hal/default/poly-impl.h
@@ -528,7 +528,7 @@ std::vector<PolyImpl<VecType>> PolyImpl<VecType>::BaseDecompose(usint baseBits,
// TP: x is same for BACKEND 2 and 6
for (usint i = 0; i < nWindows; ++i) {
- xDigit.SetValues(x.GetValues().GetDigitAtIndexForBase(i + 1, 1 << baseBits), x.GetFormat());
+ xDigit.SetValues(x.GetValues().GetDigitAtIndexForBase(i + 1, ((uint64_t)1) << baseBits), x.GetFormat());
--- a/src/core/include/math/hal/intnat/ubintnat.h
+++ b/src/core/include/math/hal/intnat/ubintnat.h
@@ -1723,12 +1723,12 @@ public:
*/
// TODO: * i to << i
- usint GetDigitAtIndexForBase(usint index, usint base) const {
+ usint GetDigitAtIndexForBase(usint index, uint64_t base) const {
usint DigitLen = ceil(log2(base));
- usint digit = 0;
+ uint64_t digit = 0;
usint newIndex = 1 + (index - 1) * DigitLen;
- for (usint i = 1; i < base; i <<= 1) {
- digit += GetBitAtIndex(newIndex++) * i;
+ for (uint64_t i = 1; i < base; i <<= 1) {
+ digit += ((uint64_t)GetBitAtIndex(newIndex++)) * i;
}
return digit;
}