diff --git a/ec/src/models/bw6/mod.rs b/ec/src/models/bw6/mod.rs index 8bb67eb15..2a651c59c 100644 --- a/ec/src/models/bw6/mod.rs +++ b/ec/src/models/bw6/mod.rs @@ -2,11 +2,11 @@ use crate::{ models::{ModelParameters, SWModelParameters}, PairingEngine, }; -use ark_ff::fields::{ +use ark_ff::{fields::{ fp3::Fp3Parameters, fp6_2over3::{Fp6, Fp6Parameters}, BitIteratorBE, Field, PrimeField, SquareRootField, -}; +}, BigInteger}; use num_traits::One; use core::marker::PhantomData; @@ -68,7 +68,7 @@ impl BW6

{ } fn exp_by_x(mut f: Fp6) -> Fp6 { - f = f.cyclotomic_exp(&P::X); + f = f.cyclotomic_exp(&P::X.to_64x4()); if P::X_IS_NEGATIVE { f.conjugate(); } diff --git a/ec/src/models/mnt4/mod.rs b/ec/src/models/mnt4/mod.rs index 05eafa7e8..dd33d989b 100644 --- a/ec/src/models/mnt4/mod.rs +++ b/ec/src/models/mnt4/mod.rs @@ -1,3 +1,5 @@ +use ark_ff::BigInteger; + use { crate::{ models::{ModelParameters, SWModelParameters}, @@ -179,11 +181,11 @@ impl MNT4

{ let mut elt_q = *elt; elt_q.frobenius_map(1); - let w1_part = elt_q.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_1); + let w1_part = elt_q.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_1.to_64x4()); let w0_part = if P::FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG { - elt_inv_clone.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0) + elt_inv_clone.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0.to_64x4()) } else { - elt_clone.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0) + elt_clone.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0.to_64x4()) }; w1_part * &w0_part diff --git a/ec/src/models/mnt6/mod.rs b/ec/src/models/mnt6/mod.rs index 89984dd19..dac14b34a 100644 --- a/ec/src/models/mnt6/mod.rs +++ b/ec/src/models/mnt6/mod.rs @@ -1,3 +1,5 @@ +use ark_ff::BigInteger; + use { crate::{ models::{ModelParameters, SWModelParameters}, @@ -185,11 +187,11 @@ impl MNT6

{ let mut elt_q = *elt; elt_q.frobenius_map(1); - let w1_part = elt_q.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_1); + let w1_part = elt_q.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_1.to_64x4()); let w0_part = if P::FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG { - elt_inv_clone.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0) + elt_inv_clone.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0.to_64x4()) } else { - elt_clone.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0) + elt_clone.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0.to_64x4()) }; w1_part * &w0_part diff --git a/ec/src/models/short_weierstrass_jacobian.rs b/ec/src/models/short_weierstrass_jacobian.rs index ee4046dff..3c19b2b5a 100644 --- a/ec/src/models/short_weierstrass_jacobian.rs +++ b/ec/src/models/short_weierstrass_jacobian.rs @@ -11,9 +11,7 @@ use ark_std::{ }; use ark_ff::{ - bytes::{FromBytes, ToBytes}, - fields::{BitIteratorBE, Field, PrimeField, SquareRootField}, - ToConstraintField, UniformRand, + bytes::{FromBytes, ToBytes}, fields::{BitIteratorBE, Field, PrimeField, SquareRootField}, BigInteger, ToConstraintField, UniformRand }; use crate::{models::SWModelParameters as Parameters, AffineCurve, ProjectiveCurve}; @@ -223,7 +221,8 @@ impl AffineCurve for GroupAffine

{ #[inline] fn mul::BigInt>>(&self, by: S) -> GroupProjective

{ - let bits = BitIteratorBE::new(by.into()); + let inner: ::BigInt = by.into(); + let bits = ark_ff::BitIteratorBE::new(inner.to_64x4()); self.mul_bits(bits) } @@ -714,7 +713,7 @@ impl<'a, P: Parameters> SubAssign<&'a Self> for GroupProjective

{ impl MulAssign for GroupProjective

{ fn mul_assign(&mut self, other: P::ScalarField) { - *self = self.mul(other.into_repr()) + *self = self.mul(other.into_repr().to_64x4()) } } diff --git a/ec/src/models/twisted_edwards_extended.rs b/ec/src/models/twisted_edwards_extended.rs index d4a5524ec..80517c394 100644 --- a/ec/src/models/twisted_edwards_extended.rs +++ b/ec/src/models/twisted_edwards_extended.rs @@ -21,9 +21,7 @@ use num_traits::{One, Zero}; use zeroize::Zeroize; use ark_ff::{ - bytes::{FromBytes, ToBytes}, - fields::{BitIteratorBE, Field, PrimeField, SquareRootField}, - ToConstraintField, UniformRand, + bytes::{FromBytes, ToBytes}, fields::{BitIteratorBE, Field, PrimeField, SquareRootField}, BigInteger, ToConstraintField, UniformRand }; #[cfg(feature = "parallel")] @@ -138,7 +136,8 @@ impl AffineCurve for GroupAffine

{ } fn mul::BigInt>>(&self, by: S) -> GroupProjective

{ - self.mul_bits(BitIteratorBE::new(by.into())) + let inner: ::BigInt = by.into(); + self.mul_bits(ark_ff::BitIteratorBE::new(inner.to_64x4())) } fn from_random_bytes(bytes: &[u8]) -> Option { @@ -610,7 +609,7 @@ impl<'a, P: Parameters> SubAssign<&'a Self> for GroupProjective

{ impl MulAssign for GroupProjective

{ fn mul_assign(&mut self, other: P::ScalarField) { - *self = self.mul(other.into_repr()) + *self = self.mul(other.into_repr().to_64x4()) } } diff --git a/ec/src/msm/variable_base.rs b/ec/src/msm/variable_base.rs index 57f4fa4d7..f8dfe198b 100644 --- a/ec/src/msm/variable_base.rs +++ b/ec/src/msm/variable_base.rs @@ -54,7 +54,7 @@ impl VariableBaseMSM { scalar.divn(w_start as u32); // We mod the remaining bits by 2^{window size}, thus taking `c` bits. - let scalar = scalar.as_ref()[0] % (1 << c); + let scalar = scalar.to_64x4()[0] % (1 << c); // If the scalar is non-zero, we update the corresponding // bucket. diff --git a/ff/Cargo.toml b/ff/Cargo.toml index fc293a88d..979f37b8d 100644 --- a/ff/Cargo.toml +++ b/ff/Cargo.toml @@ -33,3 +33,7 @@ default = [] std = [ "ark-std/std", "ark-serialize/std" ] parallel = [ "std", "rayon", "ark-std/parallel" ] asm = [] +32x9 = [] + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(use_asm)'] } \ No newline at end of file diff --git a/ff/src/biginteger/arithmetic.rs b/ff/src/biginteger/arithmetic.rs index 1a7e18ed7..c9025138f 100644 --- a/ff/src/biginteger/arithmetic.rs +++ b/ff/src/biginteger/arithmetic.rs @@ -1,3 +1,4 @@ +#![allow(unused)] use ark_std::vec::Vec; /// Make 4 u64 multiplications, instead of 1 u128 diff --git a/ff/src/biginteger/macros.rs b/ff/src/biginteger/macros.rs index 23e936e9f..8e91fdae2 100644 --- a/ff/src/biginteger/macros.rs +++ b/ff/src/biginteger/macros.rs @@ -1,17 +1,43 @@ macro_rules! bigint_impl { ($name:ident, $num_limbs:expr) => { #[derive(Copy, Clone, PartialEq, Eq, Debug, Default, Hash, Zeroize)] - pub struct $name(pub [u64; $num_limbs]); + pub struct $name(pub(crate) [u64; $num_limbs]); impl $name { pub const fn new(value: [u64; $num_limbs]) -> Self { $name(value) } + + pub const fn to_64x4(&self) -> [u64; $num_limbs] { + self.0 + } + + pub const fn from_64x4(value: [u64; $num_limbs]) -> Self { + $name(value) + } + + #[ark_ff_asm::unroll_for_loops] + pub fn assign_bits_and(&mut self, other: &Self) { + for i in 0..$num_limbs { + self.0[i] |= other.0[i] + } + } + + pub fn to_native(&self) -> [u64; $num_limbs] { + self.0 + } } impl BigInteger for $name { const NUM_LIMBS: usize = $num_limbs; + fn to_64x4(&self) -> [u64; 4] { + self.0 + } + fn from_64x4(value: [u64; 4]) -> Self { + $name(value) + } + #[inline] #[ark_ff_asm::unroll_for_loops] fn add_nocarry(&mut self, other: &Self) -> bool { diff --git a/ff/src/biginteger/mod.rs b/ff/src/biginteger/mod.rs index ca1a28a24..55d41f9fc 100644 --- a/ff/src/biginteger/mod.rs +++ b/ff/src/biginteger/mod.rs @@ -1,7 +1,5 @@ use crate::{ - bytes::{FromBytes, ToBytes}, - fields::{BitIteratorBE, BitIteratorLE}, - UniformRand, + bytes::{FromBytes, ToBytes}, fields::{BitIteratorBE, BitIteratorLE}, UniformRand }; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; use ark_std::rand::{ @@ -31,14 +29,17 @@ pub fn signed_mod_reduction(n: u64, modulus: u64) -> i64 { } } -bigint_impl!(BigInteger64, 1); -bigint_impl!(BigInteger128, 2); -bigint_impl!(BigInteger256, 4); -bigint_impl!(BigInteger320, 5); -bigint_impl!(BigInteger384, 6); -bigint_impl!(BigInteger448, 7); -bigint_impl!(BigInteger768, 12); -bigint_impl!(BigInteger832, 13); +pub mod native_bigint { + use super::*; + bigint_impl!(BigInteger256, 4); +} +pub mod webnode; + +#[cfg(not(any(target_family = "wasm", feature = "32x9")))] +pub use native_bigint::*; + +#[cfg(any(target_family = "wasm", feature = "32x9"))] +pub use webnode::*; #[cfg(test)] mod tests; @@ -63,8 +64,6 @@ pub trait BigInteger: + 'static + UniformRand + Zeroize - + AsMut<[u64]> - + AsRef<[u64]> + From + TryFrom + Into @@ -72,6 +71,9 @@ pub trait BigInteger: /// Number of limbs. const NUM_LIMBS: usize; + fn to_64x4(&self) -> [u64; 4]; + fn from_64x4(value: [u64; 4]) -> Self; + /// Add another representation to this one, returning the carry bit. fn add_nocarry(&mut self, other: &Self) -> bool; @@ -119,13 +121,13 @@ pub trait BigInteger: /// Returns the bit representation in a big endian boolean array, /// with leading zeroes. fn to_bits_be(&self) -> Vec { - BitIteratorBE::new(self).collect::>() + BitIteratorBE::new(self.to_64x4()).collect::>() } /// Returns the bit representation in a little endian boolean array, /// with trailing zeroes. fn to_bits_le(&self) -> Vec { - BitIteratorLE::new(self).collect::>() + BitIteratorLE::new(self.to_64x4()).collect::>() } /// Returns the byte representation in a big endian byte array, @@ -143,11 +145,12 @@ pub trait BigInteger: if w >= 2 && w < 64 { let mut res = vec![]; let mut e = *self; + let e64 = self.to_64x4(); while !e.is_zero() { let z: i64; if e.is_odd() { - z = signed_mod_reduction(e.as_ref()[0], 1 << w); + z = signed_mod_reduction(e64.as_ref()[0], 1 << w); if z >= 0 { e.sub_noborrow(&Self::from(z as u64)); } else { diff --git a/ff/src/biginteger/webnode.rs b/ff/src/biginteger/webnode.rs new file mode 100644 index 000000000..7e9015461 --- /dev/null +++ b/ff/src/biginteger/webnode.rs @@ -0,0 +1,412 @@ + +use core::{convert::TryFrom, fmt::Display}; + +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::{io::{Read, Result as IoResult, Write}, rand::{distributions::Standard, prelude::Distribution, Rng}, vec::Vec}; +use num_bigint::BigUint; +use zeroize::Zeroize; + +use crate::{FromBytes, ToBytes}; + +use super::BigInteger; + + +#[derive(Copy, Clone, PartialEq, Eq, Debug, Default, Hash, Zeroize)] +pub struct BigInteger256(pub(crate) [u32; 9]); + +impl BigInteger256 { + pub const fn new(value: [u32; 9]) -> Self { + BigInteger256(value) + } + + pub const fn from_64x4(value: [u64; 4]) -> Self { + BigInteger256(crate::fields::webnode::from_64x4(value)) + } + + pub const fn to_64x4(&self) -> [u64; 4] { + crate::fields::webnode::to_64x4(self.0) + } + + #[ark_ff_asm::unroll_for_loops] + pub fn assign_bits_and(&mut self, other: &Self) { + for i in 0..9 { + self.0[i] |= other.0[i] + } + } + + pub fn to_native(&self) -> [u32; 9] { + self.0 + } +} + +impl BigInteger for BigInteger256 { + const NUM_LIMBS: usize = 9; + + fn to_64x4(&self) -> [u64; 4] { + crate::fields::webnode::to_64x4(self.0) + } + fn from_64x4(value: [u64; 4]) -> Self { + BigInteger256(crate::fields::webnode::from_64x4(value)) + } + + #[inline] + #[ark_ff_asm::unroll_for_loops] + fn add_nocarry(&mut self, other: &Self) -> bool { + let mut this = self.to_64x4(); + let other = other.to_64x4(); + + let mut carry = 0; + for i in 0..4 { + this[i] = adc!(this[i], other[i], &mut carry); + } + *self = Self::from_64x4(this); + carry != 0 + } + + #[inline] + #[ark_ff_asm::unroll_for_loops] + fn sub_noborrow(&mut self, other: &Self) -> bool { + let mut this = self.to_64x4(); + let other = other.to_64x4(); + + let mut borrow = 0; + for i in 0..4 { + this[i] = sbb!(this[i], other[i], &mut borrow); + } + *self = Self::from_64x4(this); + borrow != 0 + } + + #[inline] + #[ark_ff_asm::unroll_for_loops] + #[allow(unused)] + fn mul2(&mut self) { + let mut value = self.to_64x4(); + let mut last = 0; + for i in 0..4 { + let a = &mut value[i]; + let tmp = *a >> 63; + *a <<= 1; + *a |= last; + last = tmp; + } + *self = Self::from_64x4(value) + } + + #[inline] + #[ark_ff_asm::unroll_for_loops] + fn muln(&mut self, mut n: u32) { + let mut value = self.to_64x4(); + if n >= 64 * 4 { + *self = Self::from(0); + return; + } + + while n >= 64 { + let mut t = 0; + for i in 0..4 { + core::mem::swap(&mut t, &mut value[i]); + } + n -= 64; + } + + if n > 0 { + let mut t = 0; + #[allow(unused)] + for i in 0..4 { + let a = &mut value[i]; + let t2 = *a >> (64 - n); + *a <<= n; + *a |= t; + t = t2; + } + } + *self = Self::from_64x4(value) + } + + #[inline] + #[ark_ff_asm::unroll_for_loops] + #[allow(unused)] + fn div2(&mut self) { + let mut value = self.to_64x4(); + let mut t = 0; + for i in 0..4 { + let a = &mut value[4 - i - 1]; + let t2 = *a << 63; + *a >>= 1; + *a |= t; + t = t2; + } + *self = Self::from_64x4(value) + } + + #[inline] + #[ark_ff_asm::unroll_for_loops] + fn divn(&mut self, mut n: u32) { + let mut value = self.to_64x4(); + + if n >= 64 * 4 { + *self = Self::from(0); + return; + } + + while n >= 64 { + let mut t = 0; + for i in 0..4 { + core::mem::swap(&mut t, &mut value[4 - i - 1]); + } + n -= 64; + } + + if n > 0 { + let mut t = 0; + #[allow(unused)] + for i in 0..4 { + let a = &mut value[4 - i - 1]; + let t2 = *a << (64 - n); + *a >>= n; + *a |= t; + t = t2; + } + } + + *self = Self::from_64x4(value) + } + + #[inline] + fn is_odd(&self) -> bool { + self.0[0] & 1 == 1 + } + + #[inline] + fn is_even(&self) -> bool { + !self.is_odd() + } + + #[inline] + fn is_zero(&self) -> bool { + for i in 0..9 { + if self.0[i] != 0 { + return false; + } + } + true + } + + #[inline] + fn num_bits(&self) -> u32 { + let value = self.to_64x4(); + + let mut ret = 4 * 64; + for i in value.iter().rev() { + let leading = i.leading_zeros(); + ret -= leading; + if leading != 64 { + break; + } + } + + ret + } + + #[inline] + fn get_bit(&self, i: usize) -> bool { + let value = self.to_64x4(); + if i >= 64 * 4 { + false + } else { + let limb = i / 64; + let bit = i - (64 * limb); + (value[limb] & (1 << bit)) != 0 + } + } + + #[inline] + fn from_bits_be(bits: &[bool]) -> Self { + let mut res: [u64; 4] = <[u64; 4]>::default(); + let mut acc: u64 = 0; + + let mut bits = bits.to_vec(); + bits.reverse(); + for (i, bits64) in bits.chunks(64).enumerate() { + for bit in bits64.iter().rev() { + acc <<= 1; + acc += *bit as u64; + } + res[i] = acc; + acc = 0; + } + Self::from_64x4(res) + } + + fn from_bits_le(bits: &[bool]) -> Self { + let mut res: [u64; 4] = <[u64; 4]>::default(); + let mut acc: u64 = 0; + + let bits = bits.to_vec(); + for (i, bits64) in bits.chunks(64).enumerate() { + for bit in bits64.iter().rev() { + acc <<= 1; + acc += *bit as u64; + } + res[i] = acc; + acc = 0; + } + Self::from_64x4(res) + } + + #[inline] + fn to_bytes_be(&self) -> Vec { + let mut le_bytes = self.to_bytes_le(); + le_bytes.reverse(); + le_bytes + } + + #[inline] + fn to_bytes_le(&self) -> Vec { + let bigint = self.to_64x4(); + let array_map = bigint.iter().map(|limb| limb.to_le_bytes()); + let mut res = Vec::::with_capacity(4 * 8); + for limb in array_map { + res.extend_from_slice(&limb); + } + res + } +} + +impl CanonicalSerialize for BigInteger256 { + #[inline] + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + self.write(writer)?; + Ok(()) + } + + #[inline] + fn serialized_size(&self) -> usize { + Self::NUM_LIMBS * 8 + } +} + +impl CanonicalDeserialize for BigInteger256 { + #[inline] + fn deserialize(reader: R) -> Result { + let value = Self::read(reader)?; + Ok(value) + } +} + +impl ToBytes for BigInteger256 { + #[inline] + fn write(&self, writer: W) -> IoResult<()> { + let bigint: [u64; 4] = self.to_64x4(); + bigint.write(writer) + } +} + +impl FromBytes for BigInteger256 { + #[inline] + fn read(reader: R) -> IoResult { + <[u64; 4]>::read(reader).map(Self::from_64x4) + } +} + +impl Display for BigInteger256 { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + let this = self.to_64x4(); + for i in this.iter().rev() { + write!(f, "{:016X}", *i)?; + } + Ok(()) + } +} + +impl Ord for BigInteger256 { + #[inline] + #[ark_ff_asm::unroll_for_loops] + fn cmp(&self, other: &Self) -> ::core::cmp::Ordering { + use core::cmp::Ordering; + for i in 0..9 { + let a = &self.0[9 - i - 1]; + let b = &other.0[9 - i - 1]; + if a < b { + return Ordering::Less; + } else if a > b { + return Ordering::Greater; + } + } + Ordering::Equal + } +} + +impl PartialOrd for BigInteger256 { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option<::core::cmp::Ordering> { + Some(self.cmp(other)) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> BigInteger256 { + let rand: [u64; 4] = rng.gen(); + BigInteger256::from_64x4(rand) + } +} + +impl AsMut<[u32]> for BigInteger256 { + #[inline] + fn as_mut(&mut self) -> &mut [u32] { + &mut self.0 + } +} + +impl AsRef<[u32]> for BigInteger256 { + #[inline] + fn as_ref(&self) -> &[u32] { + &self.0 + } +} + +impl From for BigInteger256 { + #[inline] + fn from(val: u64) -> BigInteger256 { + Self::from_64x4([val, 0, 0, 0]) + } +} + +impl TryFrom for BigInteger256 { + type Error = ark_std::string::String; + + #[inline] + fn try_from(val: num_bigint::BigUint) -> Result { + let bytes = val.to_bytes_le(); + + if bytes.len() > 4 * 8 { + Err(format!( + "A BigUint of {} bytes cannot fit into a {}.", + bytes.len(), + ark_std::stringify!(BigInteger256) + )) + } else { + let mut limbs = [0u64; 4]; + + bytes + .chunks(8) + .into_iter() + .enumerate() + .for_each(|(i, chunk)| { + let mut chunk_padded = [0u8; 8]; + chunk_padded[..chunk.len()].copy_from_slice(chunk); + limbs[i] = u64::from_le_bytes(chunk_padded) + }); + + Ok(Self::from_64x4(limbs)) + } + } +} + +impl Into for BigInteger256 { + #[inline] + fn into(self) -> num_bigint::BigUint { + BigUint::from_bytes_le(&self.to_bytes_le()) + } +} diff --git a/ff/src/fields/arithmetic.rs b/ff/src/fields/arithmetic.rs index 2021676e3..50f67180c 100644 --- a/ff/src/fields/arithmetic.rs +++ b/ff/src/fields/arithmetic.rs @@ -1,3 +1,5 @@ +#![allow(unused)] + /// This modular multiplication algorithm uses Montgomery /// reduction for efficient implementation. It also additionally /// uses the "no-carry optimization" outlined diff --git a/ff/src/fields/macros.rs b/ff/src/fields/macros.rs index 524f3570e..a9112df22 100644 --- a/ff/src/fields/macros.rs +++ b/ff/src/fields/macros.rs @@ -1,3 +1,5 @@ +#![allow(unused)] + macro_rules! impl_prime_field_serializer { ($field: ident, $params: ident, $byte_size: expr) => { impl CanonicalSerializeWithFlags for $field

{ @@ -324,8 +326,8 @@ macro_rules! impl_Fp { } #[inline] - fn characteristic() -> &'static [u64] { - P::MODULUS.as_ref() + fn characteristic() -> [u64; 4] { + P::MODULUS.0 } #[inline] diff --git a/ff/src/fields/mod.rs b/ff/src/fields/mod.rs index d5e0263d6..ba8944a3b 100644 --- a/ff/src/fields/mod.rs +++ b/ff/src/fields/mod.rs @@ -123,7 +123,7 @@ pub trait Field: /// Returns the characteristic of the field, /// in little-endian representation. - fn characteristic() -> &'static [u64] { + fn characteristic() -> [u64; 4] { Self::BasePrimeField::characteristic() } @@ -547,19 +547,22 @@ impl> Iterator for BitIteratorLE { } } -use crate::biginteger::{ - BigInteger256, BigInteger320, BigInteger384, BigInteger448, BigInteger64, BigInteger768, - BigInteger832, -}; use num_bigint::BigUint; -impl_field_bigint_conv!(Fp64, BigInteger64, Fp64Parameters); -impl_field_bigint_conv!(Fp256, BigInteger256, Fp256Parameters); -impl_field_bigint_conv!(Fp320, BigInteger320, Fp320Parameters); -impl_field_bigint_conv!(Fp384, BigInteger384, Fp384Parameters); -impl_field_bigint_conv!(Fp448, BigInteger448, Fp448Parameters); -impl_field_bigint_conv!(Fp768, BigInteger768, Fp768Parameters); -impl_field_bigint_conv!(Fp832, BigInteger832, Fp832Parameters); +pub mod impl_conv { + use super::*; + use crate::biginteger::native_bigint::BigInteger256; + use crate::native_fp256::{Fp256, Fp256Parameters}; + + impl_field_bigint_conv!(Fp256, BigInteger256, Fp256Parameters); +} + +// impl_field_bigint_conv!(Fp64, BigInteger64, Fp64Parameters); +// impl_field_bigint_conv!(Fp320, BigInteger320, Fp320Parameters); +// impl_field_bigint_conv!(Fp384, BigInteger384, Fp384Parameters); +// impl_field_bigint_conv!(Fp448, BigInteger448, Fp448Parameters); +// impl_field_bigint_conv!(Fp768, BigInteger768, Fp768Parameters); +// impl_field_bigint_conv!(Fp832, BigInteger832, Fp832Parameters); // Given a vector of field elements {v_i}, compute the vector {v_i^(-1)} pub fn batch_inversion(v: &mut [F]) { diff --git a/ff/src/fields/models/fp12_2over3over2.rs b/ff/src/fields/models/fp12_2over3over2.rs index e7115fcab..257e46616 100644 --- a/ff/src/fields/models/fp12_2over3over2.rs +++ b/ff/src/fields/models/fp12_2over3over2.rs @@ -3,7 +3,7 @@ use crate::{ fields::{fp6_3over2::*, Field, Fp2, Fp2Parameters}, One, }; -use core::marker::PhantomData; +use core::{convert::TryInto, marker::PhantomData}; use core::ops::{AddAssign, SubAssign}; type Fp2Params

= <

::Fp6Params as Fp6Parameters>::Fp2Params; @@ -135,7 +135,7 @@ impl Fp12

{ // Faster Squaring in the Cyclotomic Subgroup of Sixth Degree Extensions // - Robert Granger and Michael Scott // - if characteristic_square_mod_6_is_one(Self::characteristic()) { + if characteristic_square_mod_6_is_one(&Self::characteristic()) { let fp2_nr = ::mul_fp2_by_nonresidue; let r0 = &self.c0.c0; diff --git a/ff/src/fields/models/mod.rs b/ff/src/fields/models/mod.rs index e6b48ea96..34edcbaad 100644 --- a/ff/src/fields/models/mod.rs +++ b/ff/src/fields/models/mod.rs @@ -1,3 +1,5 @@ +#![allow(unused)] + use ark_std::{ cmp::{Ord, Ordering, PartialOrd}, fmt::{Display, Formatter, Result as FmtResult}, @@ -10,63 +12,73 @@ use num_traits::{One, Zero}; use crate::{ biginteger::{ - arithmetic as fa, BigInteger as _BigInteger, BigInteger256, BigInteger320, BigInteger384, - BigInteger448, BigInteger64, BigInteger768, BigInteger832, + arithmetic as fa, BigInteger as _BigInteger, native_bigint::BigInteger256, }, bytes::{FromBytes, ToBytes}, fields::{FftField, Field, FpParameters, LegendreSymbol, PrimeField, SquareRootField}, }; use ark_serialize::*; -impl_Fp!(Fp64, Fp64Parameters, BigInteger64, BigInteger64, 1, "64"); -impl_Fp!( - Fp256, - Fp256Parameters, - BigInteger256, - BigInteger256, - 4, - "256" -); -impl_Fp!( - Fp320, - Fp320Parameters, - BigInteger320, - BigInteger320, - 5, - "320" -); -impl_Fp!( - Fp384, - Fp384Parameters, - BigInteger384, - BigInteger384, - 6, - "384" -); -impl_Fp!( - Fp448, - Fp448Parameters, - BigInteger448, - BigInteger448, - 7, - "448" -); -impl_Fp!( - Fp768, - Fp768Parameters, - BigInteger768, - BigInteger768, - 12, - "768" -); -impl_Fp!( - Fp832, - Fp832Parameters, - BigInteger832, - BigInteger832, - 13, - "832" -); +pub mod native_fp256 { + use super::*; + impl_Fp!( + Fp256, + Fp256Parameters, + BigInteger256, + BigInteger256, + 4, + "256" + ); +} +pub mod webnode; + +#[cfg(not(any(target_family = "wasm", feature = "32x9")))] +pub use native_fp256::*; + +#[cfg(any(target_family = "wasm", feature = "32x9"))] +pub use webnode::*; + +// impl_Fp!(Fp64, Fp64Parameters, BigInteger64, BigInteger64, 1, "64"); +// impl_Fp!( +// Fp320, +// Fp320Parameters, +// BigInteger320, +// BigInteger320, +// 5, +// "320" +// ); +// impl_Fp!( +// Fp384, +// Fp384Parameters, +// BigInteger384, +// BigInteger384, +// 6, +// "384" +// ); +// impl_Fp!( +// Fp448, +// Fp448Parameters, +// BigInteger448, +// BigInteger448, +// 7, +// "448" +// ); +// impl_Fp!( +// Fp768, +// Fp768Parameters, +// BigInteger768, +// BigInteger768, +// 12, +// "768" +// ); +// impl_Fp!( +// Fp832, +// Fp832Parameters, +// BigInteger832, +// BigInteger832, +// 13, +// "832" +// ); pub mod fp2; pub use self::fp2::*; diff --git a/ff/src/fields/models/webnode.rs b/ff/src/fields/models/webnode.rs new file mode 100644 index 000000000..648f7ff1a --- /dev/null +++ b/ff/src/fields/models/webnode.rs @@ -0,0 +1,1090 @@ + +use crate::{ + biginteger::{ + BigInteger as _BigInteger, webnode::BigInteger256, + }, + bytes::{FromBytes, ToBytes}, + fields::{FftField, Field, LegendreSymbol, PrimeField, SquareRootField}, +}; +use ark_serialize::*; +use ark_std::{ + cmp::{Ord, Ordering, PartialOrd}, + fmt::{Display, Formatter, Result as FmtResult}, + io::{Read, Result as IoResult, Write}, + marker::PhantomData, + ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}, + str::FromStr, One, Zero, +}; + +impl Into for Fp256 { + fn into(self) -> BigInteger256 { + self.into_repr() + } +} +impl core::convert::TryFrom for Fp256 { + type Error = crate::fields::arithmetic::InvalidBigInt; + + /// Converts `Self::BigInteger` into `Self` + /// + /// This method returns an error if `int` is larger than `P::MODULUS`. + fn try_from(int: BigInteger256) -> Result { + Self::from_repr(int).ok_or(crate::fields::arithmetic::InvalidBigInt) + } +} + +const SHIFT: u32 = 29; +const MASK: u32 = (1 << SHIFT) - 1; + +const SHIFT64: u64 = SHIFT as u64; +const MASK64: u64 = MASK as u64; + +pub const fn from_64x4(pa: [u64; 4]) -> [u32; 9] { + let mut p = [0u32; 9]; + p[0] = (pa[0] & MASK64) as u32; + p[1] = ((pa[0] >> 29) & MASK64) as u32; + p[2] = (((pa[0] >> 58) | (pa[1] << 6)) & MASK64) as u32; + p[3] = ((pa[1] >> 23) & MASK64) as u32; + p[4] = (((pa[1] >> 52) | (pa[2] << 12)) & MASK64) as u32; + p[5] = ((pa[2] >> 17) & MASK64) as u32; + p[6] = (((pa[2] >> 46) | (pa[3] << 18)) & MASK64) as u32; + p[7] = ((pa[3] >> 11) & MASK64) as u32; + p[8] = (pa[3] >> 40) as u32; + p +} +pub const fn to_64x4(pa: [u32; 9]) -> [u64; 4] { + let mut p = [0u64; 4]; + p[0] = pa[0] as u64; + p[0] |= (pa[1] as u64) << 29; + p[0] |= (pa[2] as u64) << 58; + p[1] = (pa[2] as u64) >> 6; + p[1] |= (pa[3] as u64) << 23; + p[1] |= (pa[4] as u64) << 52; + p[2] = (pa[4] as u64) >> 12; + p[2] |= (pa[5] as u64) << 17; + p[2] |= (pa[6] as u64) << 46; + p[3] = (pa[6] as u64) >> 18; + p[3] |= (pa[7] as u64) << 11; + p[3] |= (pa[8] as u64) << 40; + p +} + +const fn gte_modulus(x: &BigInteger256) -> bool { + let mut i = Fp256::::NLIMBS - 1; + loop { + // don't fix warning -- that makes it 15% slower! + #[allow(clippy::comparison_chain)] + if x.0[i] > C::MODULUS.0[i] { + return true; + } else if x.0[i] < C::MODULUS.0[i] { + return false; + } + if i == 0 { + break; + } + i -= 1; + } + true +} + +#[ark_ff_asm::unroll_for_loops] +#[inline(always)] +const fn conditional_reduce(x: &mut BigInteger256) { + if gte_modulus::(&x) { + for i in 0..9 { + x.0[i] = x.0[i].wrapping_sub(C::MODULUS.0[i]); + } + for i in 1..9 { + x.0[i] += ((x.0[i - 1] as i32) >> SHIFT) as u32; + } + for i in 0..8 { + x.0[i] &= MASK; + } + } +} + +#[ark_ff_asm::unroll_for_loops] +#[inline(always)] +fn add_assign(x: &mut BigInteger256, y: &BigInteger256) { + let y = &y.0; + let mut tmp: u32; + let mut carry: i32 = 0; + + for i in 0..9 { + tmp = x.0[i] + y[i] + (carry as u32); + carry = (tmp as i32) >> SHIFT; + x.0[i] = tmp & MASK; + } + + if gte_modulus::(x) { + carry = 0; + for i in 0..9 { + tmp = x.0[i].wrapping_sub(C::MODULUS.0[i]) + (carry as u32); + carry = (tmp as i32) >> SHIFT; + x.0[i] = tmp & MASK; + } + } +} + +#[derive(Clone, Copy, Default, Eq, PartialEq, Hash)] +pub struct Fp256 (pub BigInteger256, PhantomData); + +/// Note that this implementation of `Ord` compares field elements viewing +/// them as integers in the range 0, 1, ..., P::MODULUS - 1. However, other +/// implementations of `PrimeField` might choose a different ordering, and +/// as such, users should use this `Ord` for applications where +/// any ordering suffices (like in a BTreeMap), and not in applications +/// where a particular ordering is required. +impl Ord for Fp256

{ + #[inline(always)] + fn cmp(&self, other: &Self) -> Ordering { + self.into_repr().cmp(&other.into_repr()) + } +} +/// Note that this implementation of `PartialOrd` compares field elements viewing +/// them as integers in the range 0, 1, ..., `P::MODULUS` - 1. However, other +/// implementations of `PrimeField` might choose a different ordering, and +/// as such, users should use this `PartialOrd` for applications where +/// any ordering suffices (like in a BTreeMap), and not in applications +/// where a particular ordering is required. +impl PartialOrd for Fp256

{ + #[inline(always)] + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Display for Fp256 { + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + f.write_fmt(format_args!("{:?}", self.0)) + } +} + +impl ark_std::fmt::Debug for Fp256 { + fn fmt(&self, f: &mut ark_std::fmt::Formatter<'_>) -> ark_std::fmt::Result { + use crate::ark_std::string::ToString; + let r: BigInteger256 = self.into_repr(); + let bigint: num_bigint::BigUint = r.into(); + let s = bigint.to_string(); + + let name = match C::T.0[0] { + 0x192d30ed => "Fp", + 0xc46eb21 => "Fq", + _ => panic!(), + }; + + f.write_fmt(format_args!("{}({})", name, s)) + } +} + +impl Fp256 { + const NLIMBS: usize = 9; + + #[inline] + pub const fn new(element: BigInteger256) -> Self { + Self(element, PhantomData) + } + const fn const_is_zero(&self) -> bool { + let mut index = 0; + let mut is_zero = true; + while index < Self::NLIMBS { + is_zero &= self.0.0[index] == 0; + index += 1; + } + is_zero + } + const fn const_neg(self, modulus: BigInteger256) -> Self { + if !self.const_is_zero() { + Self::new(Self::sub_noborrow(&modulus, &self.0)) + } else { + self + } + } + + #[ark_ff_asm::unroll_for_loops] + #[allow(unused_assignments)] + const fn sub_noborrow(a: &BigInteger256, b: &BigInteger256) -> BigInteger256 { + /// Calculate a - b - borrow, returning the result and modifying + /// the borrow value. + macro_rules! sbb { + ($a:expr, $b:expr, &mut $borrow:expr$(,)?) => {{ + let tmp = (1u64 << 32) + ($a as u64) - ($b as u64) - ($borrow as u64); + $borrow = if tmp >> 32 == 0 { 1 } else { 0 }; + tmp as u32 + }}; + } + let mut a = *a; + let mut borrow = 0; + for i in 0..9 { + a.0[i] = sbb!(a.0[i], b.0[i], &mut borrow); + } + a + } + + /// Interpret a string of decimal numbers as a prime field element. + /// Does not accept unnecessary leading zeroes or a blank string. + /// For *internal* use only; please use the `field_new` macro instead + /// of this method + #[doc(hidden)] + pub const fn const_from_str( + limbs: &[u64], + is_positive: bool, + r2: BigInteger256, + modulus: BigInteger256, + inv: u64, + ) -> Self { + let repr = match limbs { + [a, b, c, d] => BigInteger256::from_64x4([*a, *b, *c, *d]), + [a, b, c] => BigInteger256::from_64x4([*a, *b, *c, 0]), + [a, b] => BigInteger256::from_64x4([*a, *b, 0, 0]), + [a] => BigInteger256::from_64x4([*a, 0, 0, 0]), + _ => panic!(), + }; + let res = Self::const_from_repr(repr, r2, modulus, inv as u32); + if is_positive { + res + } else { + res.const_neg(modulus) + } + } + + #[inline] + pub(crate) const fn const_from_repr( + repr: BigInteger256, + r2: BigInteger256, + modulus: BigInteger256, + inv: u32, + ) -> Self { + let mut r = Self::new(repr); + if r.const_is_zero() { + r + } else { + r.const_mul(&Fp256(r2, PhantomData), &modulus, inv); + r + } + } + + const U64_MODULUS: [u64; 9] = { + let mut modulus64 = [0u64; 9]; + let modulus = C::MODULUS; + let mut i = 0; + while i < 9 { + modulus64[i] = modulus.0[i] as u64; + i += 1; + } + modulus64 + }; + + /// Implementation based on https://github.com/o1-labs/proof-systems/pull/2638 + #[ark_ff_asm::unroll_for_loops] + #[inline(always)] + const fn const_mul_without_reduce(&mut self, other: &Self, _modulus: &BigInteger256, _inv: u32) { + let x = &mut self.0.0; + let y = &other.0.0; + + let mut y_local = [0u64; 9]; + for index in 0..9 { + y_local[index] = y[index] as u64; + } + + let mut xy = [0u64; 9]; + + for i in 0..9 { + let xi = x[i] as u64; + + let tmp = (xi * y_local[0]) + xy[0]; + let qi = (MASK64 + 1) - (tmp & MASK64); + let carry = (tmp + (qi * Self::U64_MODULUS[0])) >> SHIFT64; + + for j in 1..8 { + let did_carry = j == 1; + let mut xy_j = xy[j]; + if did_carry { + xy_j += carry; + } + xy[j - 1] = (xy_j + (xi * y_local[j])) + (qi * Self::U64_MODULUS[j]); + } + + let j = Self::NLIMBS - 1; + xy[j - 1] = (xi * y_local[j]) + (qi * Self::U64_MODULUS[j]); + } + + for j in 1..9 { + x[j - 1] = (xy[j - 1] as u32) & MASK; + xy[j] += xy[j - 1] >> SHIFT64; + } + x[Self::NLIMBS - 1] = xy[Self::NLIMBS - 1] as u32; + } + + #[inline(always)] + const fn const_mul(&mut self, other: &Self, modulus: &BigInteger256, inv: u32) { + self.const_mul_without_reduce(other, modulus, inv); + self.const_reduce(modulus); + } + + #[inline(always)] + const fn const_reduce(&mut self, _modulus: &BigInteger256) { + conditional_reduce::(&mut self.0); + } + + // don't fix warning -- that makes it 15% slower! + #[allow(clippy::comparison_chain)] + const fn const_is_valid(&self, _modulus: &BigInteger256) -> bool { + let mut i = Fp256::::NLIMBS - 1; + loop { + if self.0.0[i] > C::MODULUS.0[i] { + return false; + } else if self.0.0[i] < C::MODULUS.0[i] { + return true; + } + if i == 0 { + break; + } + i -= 1; + } + false + } + + /// Implementation based on https://github.com/o1-labs/proof-systems/pull/2638 + #[ark_ff_asm::unroll_for_loops] + #[inline(always)] + const fn const_square(&mut self) { + let mut x = [0u64; 9]; + for i in 0..9 { + x[i] = self.0.0[i] as u64; + } + let mut xy = [0u64; 9]; + for i in 0..9 { + let j = 0; + let tmp = if i == 0 { + x[i] * x[j] + } else { + ((x[i] * x[j]) << 1) + xy[j] + }; + let qi = (MASK64 + 1) - (tmp & MASK64); + let carry = (tmp + (qi * Self::U64_MODULUS[0])) >> SHIFT64; + for j in 1..8 { + let did_carry = j == 1; + let mut xy_j = xy[j]; + if did_carry { + xy_j += carry; + } + if j <= i { + let mut tmp = x[i] * x[j]; + if j < i { + tmp <<= 1; + } + xy_j += tmp; + } + xy[j - 1] = xy_j + (qi * Self::U64_MODULUS[j]); + } + let j = 8; + xy[j - 1] = if i == j { + (x[i] * x[j]) + (qi * Self::U64_MODULUS[j]) + } else { + qi * Self::U64_MODULUS[j] + }; + } + for j in 1..9 { + self.0.0[j - 1] = (xy[j - 1] as u32) & MASK; + xy[j] += xy[j - 1] >> SHIFT64; + } + self.0.0[9 - 1] = xy[9 - 1] as u32; + + self.const_reduce(&C::MODULUS); + } +} + +impl Fp256 { + pub(crate) fn is_valid(&self) -> bool { + self.const_is_valid(&C::MODULUS) + } + fn reduce(&mut self) { + self.const_reduce(&C::MODULUS); + } +} + +impl Zero for Fp256 { + fn zero() -> Self { + Self(BigInteger256([0; 9]), PhantomData) + } + fn is_zero(&self) -> bool { + self.0.0 == [0u32; 9] + } +} + +impl One for Fp256 { + fn one() -> Self { + Self(C::R, PhantomData) + } + fn is_one(&self) -> bool { + self.0 == C::R + } +} + +impl Neg for Fp256 { + type Output = Self; + #[must_use] + fn neg(self) -> Self { + if !self.is_zero() { + let mut tmp = C::MODULUS; + tmp.sub_noborrow(&self.0); + Fp256(tmp, PhantomData) + } else { + self + } + } +} +impl core::ops::DivAssign for Fp256 { + fn div_assign(&mut self, other: Self) { + self.div_assign(&other) + } +} +impl Add for Fp256 { + type Output = Self; + #[inline(always)] + fn add(mut self, other: Self) -> Self { + self.add_assign(other); + self + } +} +impl Sub for Fp256 { + type Output = Self; + fn sub(mut self, other: Self) -> Self { + self.sub_assign(other); + self + } +} +impl Div for Fp256 { + type Output = Self; + fn div(mut self, other: Self) -> Self { + self.div_assign(other); + self + } +} +impl core::ops::AddAssign for Fp256 { + #[inline(always)] + fn add_assign(&mut self, other: Self) { + add_assign::(&mut self.0, &other.0) + } +} +impl Mul for Fp256 { + type Output = Self; + #[inline(always)] + fn mul(mut self, other: Self) -> Self { + self.mul_assign(other); + self + } +} +impl core::ops::MulAssign for Fp256 { + #[inline(always)] + fn mul_assign(&mut self, other: Self) { + self.const_mul(&other, &C::MODULUS, C::INV as u32); + } +} +impl SubAssign for Fp256 { + fn sub_assign(&mut self, other: Self) { + if other.0 > self.0 { + self.0.add_nocarry(&C::MODULUS); + } + self.0.sub_noborrow(&other.0); + } +} +impl core::iter::Sum for Fp256 { + fn sum>(iter: I) -> Self { + iter.fold(Self::zero(), core::ops::Add::add) + } +} +impl core::iter::Product for Fp256 { + fn product>(iter: I) -> Self { + iter.fold(Self::one(), Mul::mul) + } +} + +impl<'a, C: Fp256Parameters> Div<&'a Self> for Fp256 { + type Output = Self; + fn div(mut self, other: &'a Self) -> Self { + self.div_assign(other); + self + } +} +impl<'a, C: Fp256Parameters> DivAssign<&'a Self> for Fp256 { + fn div_assign(&mut self, other: &'a Self) { + self.mul_assign(&other.inverse().unwrap()); + } +} +impl<'a, C: Fp256Parameters> SubAssign<&'a Self> for Fp256 { + fn sub_assign(&mut self, other: &'a Self) { + if other.0 > self.0 { + self.0.add_nocarry(&C::MODULUS); + } + self.0.sub_noborrow(&other.0); + } +} +impl<'a, C: Fp256Parameters> Sub<&'a Self> for Fp256 { + type Output = Self; + fn sub(mut self, other: &'a Self) -> Self { + self.sub_assign(other); + self + } +} +impl<'a, C: Fp256Parameters> core::iter::Product<&'a Self> for Fp256 { + fn product>(iter: I) -> Self { + iter.fold(Self::one(), Mul::mul) + } +} +impl<'a, C: Fp256Parameters> core::iter::Sum<&'a Self> for Fp256 { + fn sum>(iter: I) -> Self { + iter.fold(Self::zero(), core::ops::Add::add) + } +} +impl<'a, C: Fp256Parameters> Add<&'a Self> for Fp256 { + type Output = Self; + #[inline(always)] + fn add(mut self, other: &'a Self) -> Self { + self.add_assign(other); + self + } +} +impl<'a, C: Fp256Parameters> core::ops::AddAssign<&'a Self> for Fp256 { + #[inline(always)] + fn add_assign(&mut self, other: &'a Self) { + add_assign::(&mut self.0, &other.0) + } +} +impl<'a, C: Fp256Parameters> Mul<&'a Self> for Fp256 { + type Output = Self; + #[inline(always)] + fn mul(mut self, other: &'a Self) -> Self { + self.mul_assign(other); + self + } +} +impl<'a, C: Fp256Parameters> core::ops::MulAssign<&'a Self> for Fp256 { + #[inline(always)] + fn mul_assign(&mut self, other: &'a Self) { + self.const_mul(&other, &C::MODULUS, C::INV as u32) + } +} + +impl From for Fp256 { + fn from(value: u128) -> Self { + let hi = (value >> 64) as u64; + let lo = value as u64; + Self::from_repr(BigInteger256(from_64x4([lo, hi, 0, 0]))).unwrap() + } +} +impl From for Fp256 { + fn from(value: u64) -> Self { + Self::from_repr(BigInteger256::from_64x4([value, 0, 0, 0])).unwrap() + } +} +impl From for Fp256 { + fn from(value: u32) -> Self { + Self::from_repr(BigInteger256::from_64x4([value as u64, 0, 0, 0])).unwrap() + } +} +impl From for Fp256 { + fn from(value: i64) -> Self { + let abs = Self::from(value.unsigned_abs()); + if value.is_positive() { + abs + } else { + -abs + } + } +} +impl From for Fp256 { + fn from(value: i32) -> Self { + let abs = Self::from(value.unsigned_abs()); + if value.is_positive() { + abs + } else { + -abs + } + } +} +impl From for Fp256 { + fn from(value: u16) -> Self { + Self::from_repr(BigInteger256::from_64x4([value as u64, 0, 0, 0])).unwrap() + } +} +impl From for Fp256 { + fn from(value: u8) -> Self { + Self::from_repr(BigInteger256::from_64x4([value as u64, 0, 0, 0])).unwrap() + } +} +impl From for Fp256 { + fn from(value: bool) -> Self { + Self::from_repr(BigInteger256::from_64x4([value as u64, 0, 0, 0])).unwrap() + } +} + +impl CanonicalSerializeWithFlags for Fp256 { + fn serialize_with_flags( + &self, + mut writer: W, + flags: F, + ) -> Result<(), SerializationError> { + if F::BIT_SIZE > 8 { + return Err(SerializationError::NotEnoughSpace); + } + let output_byte_size = buffer_byte_size(C::MODULUS_BITS as usize + F::BIT_SIZE); + let mut bytes = [0u8; 4 * 8 + 1]; + self.write(&mut bytes[..4 * 8])?; + bytes[output_byte_size - 1] |= flags.u8_bitmask(); + writer.write_all(&bytes[..output_byte_size])?; + Ok(()) + } + fn serialized_size_with_flags(&self) -> usize { + todo!() + // buffer_byte_size(P::MODULUS_BITS as usize + F::BIT_SIZE) + } +} +impl CanonicalSerialize for Fp256 { + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + self.serialize_with_flags(writer, EmptyFlags) + } + fn serialized_size(&self) -> usize { + self.serialized_size_with_flags::() + } +} +impl CanonicalDeserializeWithFlags for Fp256 { + fn deserialize_with_flags( + mut reader: R, + ) -> Result<(Self, F), SerializationError> { + if F::BIT_SIZE > 8 { + return Err(SerializationError::NotEnoughSpace); + } + let output_byte_size = buffer_byte_size(C::MODULUS_BITS as usize + F::BIT_SIZE); + let mut masked_bytes = [0; 4 * 8 + 1]; + reader.read_exact(&mut masked_bytes[..output_byte_size])?; + let flags = F::from_u8_remove_flags(&mut masked_bytes[output_byte_size - 1]) + .ok_or(SerializationError::UnexpectedFlags)?; + Ok((Self::read(&masked_bytes[..])?, flags)) + } +} +impl CanonicalDeserialize for Fp256 { + fn deserialize(reader: R) -> Result { + Self::deserialize_with_flags::(reader).map(|(r, _)| r) + } +} + +impl PrimeField for Fp256 { + type Params = C; + type BigInt = BigInteger256; + #[inline] + fn from_repr(r: BigInteger256) -> Option { + let mut r = Self(r, PhantomData); + if r.is_zero() { + Some(r) + } else if r.is_valid() { + r *= &Self(C::R2, PhantomData); + Some(r) + } else { + None + } + } + #[inline] + #[allow(clippy::modulo_one)] + fn into_repr(&self) -> BigInteger256 { + let one = BigInteger256([1, 0, 0, 0, 0, 0, 0, 0, 0]); + self.mul(Self(one, PhantomData)).0 + } +} + +impl From for Fp256 { + fn from(val: num_bigint::BigUint) -> Self { + Self::from_le_bytes_mod_order(&val.to_bytes_le()) + } +} +impl Into for Fp256 { + fn into(self) -> num_bigint::BigUint { + self.into_repr().into() + } +} + +impl FromStr for Fp256 { + type Err = (); + /// Interpret a string of numbers as a (congruent) prime field element. + /// Does not accept unnecessary leading zeroes or a blank string. + fn from_str(s: &str) -> Result { + if s.is_empty() { + return Err(()); + } + if s == "0" { + return Ok(Self::zero()); + } + let mut res = Self::zero(); + use core::convert::TryFrom; + let ten = Self::try_from(::BigInt::from(10)).unwrap(); + let mut first_digit = true; + for c in s.chars() { + match c.to_digit(10) { + Some(c) => { + if first_digit { + if c == 0 { + return Err(()); + } + first_digit = false; + } + res.mul_assign(&ten); + let digit = Self::from(u64::from(c)); + res.add_assign(&digit); + }, + None => { + return Err(()); + }, + } + } + if !res.is_valid() { + Err(()) + } else { + Ok(res) + } + } +} + +impl ToBytes for Fp256 { + fn write(&self, writer: W) -> IoResult<()> { + self.into_repr().write(writer) + } +} +impl FromBytes for Fp256 { + fn read(reader: R) -> IoResult { + BigInteger256::read(reader).and_then(|b| match Fp256::from_repr(b) { + Some(f) => Ok(f), + None => Err(crate::error("FromBytes::read failed")), + }) + } +} + +impl Field for Fp256 { + type BasePrimeField = Self; + fn extension_degree() -> u64 { + 1 + } + fn from_base_prime_field_elems(elems: &[Self::BasePrimeField]) -> Option { + if elems.len() != (Self::extension_degree() as usize) { + return None; + } + Some(elems[0]) + } + #[inline] + fn double(&self) -> Self { + let mut temp = *self; + temp.double_in_place(); + temp + } + #[inline] + fn double_in_place(&mut self) -> &mut Self { + self.0.mul2(); + self.reduce(); + self + } + #[inline] + fn characteristic() -> [u64; 4] { + C::MODULUS.to_64x4() + } + #[inline] + fn from_random_bytes_with_flags(bytes: &[u8]) -> Option<(Self, F)> { + if F::BIT_SIZE > 8 { + return None; + } else { + let mut result_bytes = [0u8; 4 * 8 + 1]; + result_bytes + .iter_mut() + .zip(bytes) + .for_each(|(result, input)| { + *result = *input; + }); + let last_limb_mask = (u64::MAX >> C::REPR_SHAVE_BITS).to_le_bytes(); + let mut last_bytes_mask = [0u8; 9]; + last_bytes_mask[..8].copy_from_slice(&last_limb_mask); + let output_byte_size = buffer_byte_size(C::MODULUS_BITS as usize + F::BIT_SIZE); + let flag_location = output_byte_size - 1; + let flag_location_in_last_limb = flag_location - (8 * (4 - 1)); + let last_bytes = &mut result_bytes[8 * (4 - 1)..]; + let flags_mask = u8::MAX.checked_shl(8 - (F::BIT_SIZE as u32)).unwrap_or(0); + let mut flags: u8 = 0; + for (i, (b, m)) in last_bytes.iter_mut().zip(&last_bytes_mask).enumerate() { + if i == flag_location_in_last_limb { + flags = *b & flags_mask; + } + *b &= m; + } + Self::deserialize(&result_bytes[..(4 * 8)]) + .ok() + .and_then(|f| F::from_u8(flags).map(|flag| (f, flag))) + } + } + #[inline(always)] + fn square(&self) -> Self { + let mut temp = self.clone(); + temp.square_in_place(); + temp + } + #[inline(always)] + fn square_in_place(&mut self) -> &mut Self { + self.const_square(); + self + } + #[inline] + fn inverse(&self) -> Option { + if self.is_zero() { + None + } else { + let one = BigInteger256::from(1); + let mut u = self.0; + let mut v = C::MODULUS; + let mut b = Self(C::R2, PhantomData); + let mut c = Self::zero(); + while u != one && v != one { + while u.is_even() { + u.div2(); + if b.0.is_even() { + b.0.div2(); + } else { + b.0.add_nocarry(&C::MODULUS); + b.0.div2(); + } + } + while v.is_even() { + v.div2(); + if c.0.is_even() { + c.0.div2(); + } else { + c.0.add_nocarry(&C::MODULUS); + c.0.div2(); + } + } + if v < u { + u.sub_noborrow(&v); + b.sub_assign(&c); + } else { + v.sub_noborrow(&u); + c.sub_assign(&b); + } + } + if u == one { + Some(b) + } else { + Some(c) + } + } + } + fn inverse_in_place(&mut self) -> Option<&mut Self> { + if let Some(inverse) = self.inverse() { + *self = inverse; + Some(self) + } else { + None + } + } + /// The Frobenius map has no effect in a prime field. + #[inline] + fn frobenius_map(&mut self, _: usize) {} +} + +#[cfg(not(feature = "32x9"))] +impl ark_std::rand::distributions::Distribution> + for ark_std::rand::distributions::Standard +{ + #[inline] + fn sample(&self, rng: &mut R) -> Fp256 { + loop { + if !(C::REPR_SHAVE_BITS <= 64) { + panic!("assertion failed: P::REPR_SHAVE_BITS <= 64") + } + let mask = if C::REPR_SHAVE_BITS == 64 { + 0 + } else { + core::u64::MAX >> C::REPR_SHAVE_BITS + }; + + let mut tmp: [u64; 4] = rng.sample(ark_std::rand::distributions::Standard); + tmp.as_mut().last_mut().map(|val| *val &= mask); + + let tmp = Fp256(BigInteger256::from_64x4(tmp), PhantomData); + if tmp.is_valid() { + return tmp; + } + } + } +} + +// During tests, we want to generate the same fields than on native (to test witness generation etc) +#[cfg(feature = "32x9")] +impl ark_std::rand::distributions::Distribution> + for ark_std::rand::distributions::Standard +{ + #[inline] + fn sample(&self, rng: &mut R) -> Fp256 { + loop { + if !(C::REPR_SHAVE_BITS <= 64) { + panic!("assertion failed: P::REPR_SHAVE_BITS <= 64") + } + let mask = if C::REPR_SHAVE_BITS == 64 { + 0 + } else { + core::u64::MAX >> C::REPR_SHAVE_BITS + }; + let mut tmp: [u64; 4] = rng.sample(ark_std::rand::distributions::Standard); + tmp.as_mut().last_mut().map(|val| *val &= mask); + let is_fp = match C::T.0[0] { + 0x192d30ed => true, + 0xc46eb21 => false, + _ => panic!(), + }; + const FP_MODULUS: [u64; 4] = [ + 0x992d30ed00000001, + 0x224698fc094cf91b, + 0x0, + 0x4000000000000000, + ]; + const FQ_MODULUS: [u64; 4] = [ + 0x8c46eb2100000001, + 0x224698fc0994a8dd, + 0x0, + 0x4000000000000000, + ]; + let (modulus, inv) = if is_fp { + (FP_MODULUS, 11037532056220336127) + } else { + (FQ_MODULUS, 10108024940646105087) + }; + let is_valid = || { + for (random, modulus) in tmp.iter().copied().zip(modulus).rev() { + if random > modulus { + return false; + } else if random < modulus { + return true; + } + } + false + }; + if !is_valid() { + continue; + } + let mut r = tmp; + // Montgomery Reduction + for i in 0..4 { + let k = r[i].wrapping_mul(inv); + let mut carry = 0; + mac_with_carry!(r[i], k, modulus[0] as _, &mut carry); + for j in 1..4 { + r[(j + i) % 4] = mac_with_carry!(r[(j + i) % 4], k, modulus[j], &mut carry); + } + r[i % 4] = carry; + } + tmp = r; + return Fp256::::from_repr(BigInteger256::from_64x4(tmp)).unwrap(); + } + } +} + +pub struct NewFpParameters; + +impl zeroize::DefaultIsZeroes for Fp256 {} + +impl FftField for Fp256 { + type FftParams = C; + fn two_adic_root_of_unity() -> Self { + Fp256::(C::TWO_ADIC_ROOT_OF_UNITY, PhantomData) + } + fn large_subgroup_root_of_unity() -> Option { + Some(Fp256::(C::LARGE_SUBGROUP_ROOT_OF_UNITY?, PhantomData)) + } + fn multiplicative_generator() -> Self { + Fp256::(C::GENERATOR, PhantomData) + } +} + +impl SquareRootField for Fp256 { + #[inline] + fn legendre(&self) -> LegendreSymbol { + use crate::fields::LegendreSymbol::*; + + let modulus_minus_one_div_two = C::MODULUS_MINUS_ONE_DIV_TWO.to_64x4(); + let s = self.pow(modulus_minus_one_div_two); + if s.is_zero() { + Zero + } else if s.is_one() { + QuadraticResidue + } else { + QuadraticNonResidue + } + } + #[inline] + fn sqrt(&self) -> Option { + { + let t_minus_one_div_two = C::T_MINUS_ONE_DIV_TWO.to_64x4(); + + if self.is_zero() { + return Some(Self::zero()); + } + let mut z = Self::qnr_to_t(); + let mut w = self.pow(t_minus_one_div_two); + let mut x = w * self; + let mut b = x * &w; + let mut v = C::TWO_ADICITY as usize; + while !b.is_one() { + let mut k = 0usize; + let mut b2k = b; + while !b2k.is_one() { + b2k.square_in_place(); + k += 1; + } + if k == (C::TWO_ADICITY as usize) { + return None; + } + let j = v - k; + w = z; + for _ in 1..j { + w.square_in_place(); + } + z = w.square(); + b *= &z; + x *= &w; + v = k; + } + if x.square() == *self { + return Some(x); + } else { + #[cfg(debug_assertions)] + { + use crate::fields::LegendreSymbol::*; + if self.legendre() != QuadraticNonResidue { + panic!( + "Input has a square root per its legendre symbol, but it was not found", + ) + } + } + None + } + } + } + fn sqrt_in_place(&mut self) -> Option<&mut Self> { + (*self).sqrt().map(|sqrt| { + *self = sqrt; + self + }) + } +} + +pub trait Fp256Parameters: + crate::FpParameters + + ark_std::fmt::Debug + + Clone + + Copy + + Default + + Eq + + PartialEq + + PartialOrd + + Ord + + core::hash::Hash + + 'static + + Send + + Sync + + Sized +{ +} diff --git a/ff/src/lib.rs b/ff/src/lib.rs index b5c2ac0db..da4bee511 100644 --- a/ff/src/lib.rs +++ b/ff/src/lib.rs @@ -1,6 +1,6 @@ #![cfg_attr(not(feature = "std"), no_std)] #![warn(unused, future_incompatible, nonstandard_style, rust_2018_idioms)] -#![allow(clippy::op_ref, clippy::suspicious_op_assign_impl)] +#![allow(clippy::op_ref, clippy::suspicious_op_assign_impl, ambiguous_glob_reexports)] #![cfg_attr(not(feature = "asm"), forbid(unsafe_code))] #![cfg_attr(use_asm, feature(llvm_asm))] #![cfg_attr(feature = "asm", deny(unsafe_code))]