Created
May 16, 2023 13:36
-
-
Save ulerdogan/2ff36487fe5e4f5e5a8d5ff299e82fa2 to your computer and use it in GitHub Desktop.
r1 verifier
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
//********************************************************************************************/ | |
// ___ _ ___ _ _ _ _ | |
// | __| _ ___ __| |_ / __|_ _ _ _ _ __| |_ ___ | | (_) |__ | |
// | _| '_/ -_|_-< ' \ | (__| '_| || | '_ \ _/ _ \ | |__| | '_ \ | |
// |_||_| \___/__/_||_| \___|_| \_, | .__/\__\___/ |____|_|_.__/ | |
// |__/|_| | |
///* Copyright (C) 2022 - Renaud Dubois - This file is part of FCL (Fresh CryptoLib) project | |
///* License: This software is licensed under MIT License | |
///* This Code may be reused including license and copyright notice. | |
///* See LICENSE file at the root folder of the project. | |
///* FILE: FCL_elliptic.sol | |
///* | |
///* | |
///* DESCRIPTION: modified XYZZ system coordinates for EVM elliptic point multiplication | |
///* optimization | |
///* | |
//**************************************************************************************/ | |
//* WARNING: this code SHALL not be used for non prime order curves for security reasons. | |
// Code is optimized for a=-3 only curves with prime order, constant like -1, -2 shall be replaced | |
// if ever used for other curve than sec256R1 | |
// SPDX-License-Identifier: MIT | |
pragma solidity ^0.8.0; | |
//import "hardhat/console.sol"; | |
library P256Verifier { | |
// Set parameters for curve sec256r1. | |
//curve prime field modulus | |
uint256 constant p = 0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF; | |
//short weierstrass first coefficient | |
uint256 constant a = 0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC; | |
//short weierstrass second coefficient | |
uint256 constant b = 0x5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B; | |
//generating point affine coordinates | |
uint256 constant gx = 0x6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296; | |
uint256 constant gy = 0x4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5; | |
//curve order (number of points) | |
uint256 constant n = 0xFFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551; | |
/* -2 mod p constant, used to speed up inversion and doubling (avoid negation)*/ | |
uint256 constant minus_2 = 0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFD; | |
/* -2 mod n constant, used to speed up inversion*/ | |
uint256 constant minus_2modn = 0xFFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC63254F; | |
uint256 constant minus_1 = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF; | |
/** | |
* /* inversion mod n via a^(n-2), use of precompiled using little Fermat theorem | |
*/ | |
function FCL_nModInv(uint256 u) internal view returns (uint256 result) { | |
uint256[6] memory pointer; | |
assembly { | |
// Define length of base, exponent and modulus. 0x20 == 32 bytes | |
mstore(pointer, 0x20) | |
mstore(add(pointer, 0x20), 0x20) | |
mstore(add(pointer, 0x40), 0x20) | |
// Define variables base, exponent and modulus | |
mstore(add(pointer, 0x60), u) | |
mstore(add(pointer, 0x80), minus_2modn) | |
mstore(add(pointer, 0xa0), n) | |
// Call the precompiled contract 0x05 = ModExp | |
if iszero(staticcall(not(0), 0x05, pointer, 0xc0, pointer, 0x20)) { revert(0, 0) } | |
result := mload(pointer) | |
} | |
} | |
/** | |
* /* @dev inversion mod nusing little Fermat theorem via a^(n-2), use of precompiled | |
*/ | |
function FCL_pModInv(uint256 u) internal view returns (uint256 result) { | |
uint256[6] memory pointer; | |
assembly { | |
// Define length of base, exponent and modulus. 0x20 == 32 bytes | |
mstore(pointer, 0x20) | |
mstore(add(pointer, 0x20), 0x20) | |
mstore(add(pointer, 0x40), 0x20) | |
// Define variables base, exponent and modulus | |
mstore(add(pointer, 0x60), u) | |
mstore(add(pointer, 0x80), minus_2) | |
mstore(add(pointer, 0xa0), p) | |
// Call the precompiled contract 0x05 = ModExp | |
if iszero(staticcall(not(0), 0x05, pointer, 0xc0, pointer, 0x20)) { revert(0, 0) } | |
result := mload(pointer) | |
} | |
} | |
/** | |
* /* @dev Convert from affine rep to XYZZ rep | |
*/ | |
function ecAff_SetZZ(uint256 x0, uint256 y0) internal pure returns (uint256[4] memory P) { | |
unchecked { | |
P[2] = 1; //ZZ | |
P[3] = 1; //ZZZ | |
P[0] = x0; | |
P[1] = y0; | |
} | |
} | |
/** | |
* /* @dev Convert from XYZZ rep to affine rep | |
*/ | |
/* https://hyperelliptic.org/EFD/g1p/auto-shortw-xyzz-3.html#addition-add-2008-s*/ | |
function ecZZ_SetAff(uint256 x, uint256 y, uint256 zz, uint256 zzz) | |
internal | |
view | |
returns (uint256 x1, uint256 y1) | |
{ | |
uint256 zzzInv = FCL_pModInv(zzz); //1/zzz | |
y1 = mulmod(y, zzzInv, p); //Y/zzz | |
uint256 _b = mulmod(zz, zzzInv, p); //1/z | |
zzzInv = mulmod(_b, _b, p); //1/zz | |
x1 = mulmod(x, zzzInv, p); //X/zz | |
} | |
/** | |
* /* @dev Sutherland2008 doubling | |
*/ | |
/* The "dbl-2008-s-1" doubling formulas */ | |
function ecZZ_Dbl(uint256 x, uint256 y, uint256 zz, uint256 zzz) | |
internal | |
pure | |
returns (uint256 P0, uint256 P1, uint256 P2, uint256 P3) | |
{ | |
unchecked { | |
assembly { | |
P0 := mulmod(2, y, p) //U = 2*Y1 | |
P2 := mulmod(P0, P0, p) // V=U^2 | |
P3 := mulmod(x, P2, p) // S = X1*V | |
P1 := mulmod(P0, P2, p) // W=UV | |
P2 := mulmod(P2, zz, p) //zz3=V*ZZ1 | |
zz := mulmod(3, mulmod(addmod(x, sub(p, zz), p), addmod(x, zz, p), p), p) //M=3*(X1-ZZ1)*(X1+ZZ1) | |
P0 := addmod(mulmod(zz, zz, p), mulmod(minus_2, P3, p), p) //X3=M^2-2S | |
x := mulmod(zz, addmod(P3, sub(p, P0), p), p) //M(S-X3) | |
P3 := mulmod(P1, zzz, p) //zzz3=W*zzz1 | |
P1 := addmod(x, sub(p, mulmod(P1, y, p)), p) //Y3= M(S-X3)-W*Y1 | |
} | |
} | |
return (P0, P1, P2, P3); | |
} | |
/** | |
* @dev Sutherland2008 add a ZZ point with a normalized point and greedy formulae | |
* warning: assume that P1(x1,y1)!=P2(x2,y2), true in multiplication loop with prime order (cofactor 1) | |
*/ | |
//tbd: return -x1 and -Y1 in double to avoid two substractions | |
function ecZZ_AddN(uint256 x1, uint256 y1, uint256 zz1, uint256 zzz1, uint256 x2, uint256 y2) | |
internal | |
pure | |
returns (uint256 P0, uint256 P1, uint256 P2, uint256 P3) | |
{ | |
unchecked { | |
if (y1 == 0) { | |
return (x2, y2, 1, 1); | |
} | |
assembly { | |
y1 := sub(p, y1) | |
y2 := addmod(mulmod(y2, zzz1, p), y1, p) | |
x2 := addmod(mulmod(x2, zz1, p), sub(p, x1), p) | |
P0 := mulmod(x2, x2, p) //PP = P^2 | |
P1 := mulmod(P0, x2, p) //PPP = P*PP | |
P2 := mulmod(zz1, P0, p) ////ZZ3 = ZZ1*PP | |
P3 := mulmod(zzz1, P1, p) ////ZZZ3 = ZZZ1*PPP | |
zz1 := mulmod(x1, P0, p) //Q = X1*PP | |
P0 := addmod(addmod(mulmod(y2, y2, p), sub(p, P1), p), mulmod(minus_2, zz1, p), p) //R^2-PPP-2*Q | |
P1 := addmod(mulmod(addmod(zz1, sub(p, P0), p), y2, p), mulmod(y1, P1, p), p) //R*(Q-X3) | |
} | |
//end assembly | |
} //end unchecked | |
return (P0, P1, P2, P3); | |
} | |
/** | |
* @dev Return the zero curve in XYZZ coordinates. | |
*/ | |
function ecZZ_SetZero() internal pure returns (uint256 x, uint256 y, uint256 zz, uint256 zzz) { | |
return (0, 0, 0, 0); | |
} | |
/** | |
* @dev Check if point is the neutral of the curve | |
*/ | |
function ecZZ_IsZero(uint256 y0) internal pure returns (bool) { | |
if ((y0 == 0)) { | |
return true; | |
} | |
return false; | |
} | |
/** | |
* @dev Return the zero curve in affine coordinates. Compatible with the double formulae (no special case) | |
*/ | |
function ecAff_SetZero() internal pure returns (uint256 x, uint256 y) { | |
return (0, 0); | |
} | |
/** | |
* @dev Check if the curve is the zero curve in affine rep. | |
*/ | |
function ecAff_IsZero(uint256 y) internal pure returns (bool flag) { | |
return (y == 0); | |
} | |
/** | |
* @dev Check if a point in affine coordinates is on the curve (reject Neutral that is indeed on the curve). | |
*/ | |
function ecAff_isOnCurve(uint256 x, uint256 y) internal pure returns (bool) { | |
if (0 == x || x == p || 0 == y || y == p) { | |
return false; | |
} | |
unchecked { | |
uint256 LHS = mulmod(y, y, p); // y^2 | |
uint256 RHS = addmod(mulmod(mulmod(x, x, p), x, p), mulmod(x, a, p), p); // x^3+ax | |
RHS = addmod(RHS, b, p); // x^3 + a*x + b | |
return LHS == RHS; | |
} | |
} | |
/** | |
* @dev Add two elliptic curve points in affine coordinates. | |
*/ | |
function ecAff_add(uint256 x0, uint256 y0, uint256 x1, uint256 y1) internal view returns (uint256, uint256) { | |
uint256 zz0; | |
uint256 zzz0; | |
if (ecAff_IsZero(y0)) return (x1, y1); | |
if (ecAff_IsZero(y1)) return (x1, y1); | |
(x0, y0, zz0, zzz0) = ecZZ_AddN(x0, y0, 1, 1, x1, y1); | |
return ecZZ_SetAff(x0, y0, zz0, zzz0); | |
} | |
/** | |
* @dev Computation of uG+vQ using Strauss-Shamir's trick, G basepoint, Q public key | |
*/ | |
function ecZZ_mulmuladd_S_asm( | |
uint256 Q0, | |
uint256 Q1, // Point G and Q stored in one memory for stack optimization | |
uint256 scalar_u, | |
uint256 scalar_v | |
) internal view returns (uint256 X) { | |
uint256 zz; | |
uint256 zzz; | |
uint256 Y; | |
uint256 index = 255; | |
uint256[6] memory T; | |
uint256 H0; | |
uint256 H1; | |
unchecked { | |
if (scalar_u == 0 && scalar_v == 0) return 0; | |
(H0, H1) = ecAff_add(gx, gy, Q0, Q1); //will not work if Q=P, obvious forbidden private key | |
/* | |
while( ( ((scalar_u>>index)&1)+2*((scalar_v>>index)&1) ) ==0){ | |
index=index-1; | |
} | |
*/ | |
assembly { | |
for { let T4 := add(shl(1, and(shr(index, scalar_v), 1)), and(shr(index, scalar_u), 1)) } eq(T4, 0) { | |
index := sub(index, 1) | |
T4 := add(shl(1, and(shr(index, scalar_v), 1)), and(shr(index, scalar_u), 1)) | |
} {} | |
zz := add(shl(1, and(shr(index, scalar_v), 1)), and(shr(index, scalar_u), 1)) | |
if eq(zz, 1) { | |
X := gx | |
Y := gy | |
} | |
if eq(zz, 2) { | |
X := Q0 | |
Y := Q1 | |
} | |
if eq(zz, 3) { | |
X := H0 | |
Y := H1 | |
} | |
index := sub(index, 1) | |
zz := 1 | |
zzz := 1 | |
for {} gt(minus_1, index) { index := sub(index, 1) } { | |
// inlined EcZZ_Dbl | |
let T1 := mulmod(2, Y, p) //U = 2*Y1, y free | |
let T2 := mulmod(T1, T1, p) // V=U^2 | |
let T3 := mulmod(X, T2, p) // S = X1*V | |
T1 := mulmod(T1, T2, p) // W=UV | |
let T4 := mulmod(3, mulmod(addmod(X, sub(p, zz), p), addmod(X, zz, p), p), p) //M=3*(X1-ZZ1)*(X1+ZZ1) | |
zzz := mulmod(T1, zzz, p) //zzz3=W*zzz1 | |
zz := mulmod(T2, zz, p) //zz3=V*ZZ1, V free | |
X := addmod(mulmod(T4, T4, p), mulmod(minus_2, T3, p), p) //X3=M^2-2S | |
//T2:=mulmod(T4,addmod(T3, sub(p, X),p),p)//M(S-X3) | |
T2 := mulmod(T4, addmod(X, sub(p, T3), p), p) //-M(S-X3)=M(X3-S) | |
//Y:= addmod(T2, sub(p, mulmod(T1, Y ,p)),p )//Y3= M(S-X3)-W*Y1 | |
Y := addmod(mulmod(T1, Y, p), T2, p) //-Y3= W*Y1-M(S-X3), we replace Y by -Y to avoid a sub in ecAdd | |
{ | |
//value of dibit | |
T4 := add(shl(1, and(shr(index, scalar_v), 1)), and(shr(index, scalar_u), 1)) | |
if iszero(T4) { | |
Y := sub(p, Y) //restore the -Y inversion | |
continue | |
} // if T4!=0 | |
if eq(T4, 1) { | |
T1 := gx | |
T2 := gy | |
} | |
if eq(T4, 2) { | |
T1 := Q0 | |
T2 := Q1 | |
} | |
if eq(T4, 3) { | |
T1 := H0 | |
T2 := H1 | |
} | |
// inlined EcZZ_AddN | |
//T3:=sub(p, Y) | |
//T3:=Y | |
let y2 := addmod(mulmod(T2, zzz, p), Y, p) //R | |
T2 := addmod(mulmod(T1, zz, p), sub(p, X), p) //P | |
//special extremely rare case accumulator where EcAdd is replaced by EcDbl, no need to optimize this | |
//todo : construct edge vector case | |
if eq(y2, 0) { | |
if eq(T2, 0) { | |
T1 := mulmod(2, Y, p) //U = 2*Y1, y free | |
T2 := mulmod(T1, T1, p) // V=U^2 | |
T3 := mulmod(X, T2, p) // S = X1*V | |
let TT1 := mulmod(T1, T2, p) // W=UV | |
y2 := addmod(X, zz, p) | |
TT1 := addmod(X, sub(p, zz), p) | |
y2 := mulmod(y2, TT1, p) | |
T2 := addmod(X, zz, p) | |
T1 := addmod(X, sub(p, zz), p) | |
T2 := mulmod(T1, T2, p) | |
T4 := mulmod(3, T2, p) | |
zzz := mulmod(T1, zzz, p) //zzz3=W*zzz1 | |
zz := mulmod(T2, zz, p) //zz3=V*ZZ1, V free | |
X := addmod(mulmod(T4, T4, p), mulmod(minus_2, T3, p), p) //X3=M^2-2S | |
T2 := mulmod(T4, addmod(T3, sub(p, X), p), p) //M(S-X3) | |
Y := addmod(T2, sub(p, mulmod(T1, Y, p)), p) //Y3= M(S-X3)-W*Y1 | |
continue | |
} | |
} | |
T4 := mulmod(T2, T2, p) //PP | |
let TT1 := mulmod(T4, T2, p) //PPP, this one could be spared, but adding this register spare gas | |
zz := mulmod(zz, T4, p) | |
zzz := mulmod(zzz, TT1, p) //zz3=V*ZZ1 | |
let TT2 := mulmod(X, T4, p) | |
T4 := addmod(addmod(mulmod(y2, y2, p), sub(p, TT1), p), mulmod(minus_2, TT2, p), p) | |
Y := addmod(mulmod(addmod(TT2, sub(p, T4), p), y2, p), mulmod(Y, TT1, p), p) | |
X := T4 | |
} | |
} //end loop | |
mstore(add(T, 0x60), zz) | |
//(X,Y)=ecZZ_SetAff(X,Y,zz, zzz); | |
//T[0] = inverseModp_Hard(T[0], p); //1/zzz, inline modular inversion using precompile: | |
// Define length of base, exponent and modulus. 0x20 == 32 bytes | |
mstore(T, 0x20) | |
mstore(add(T, 0x20), 0x20) | |
mstore(add(T, 0x40), 0x20) | |
// Define variables base, exponent and modulus | |
//mstore(add(pointer, 0x60), u) | |
mstore(add(T, 0x80), minus_2) | |
mstore(add(T, 0xa0), p) | |
// Call the precompiled contract 0x05 = ModExp | |
if iszero(staticcall(not(0), 0x05, T, 0xc0, T, 0x20)) { revert(0, 0) } | |
//Y:=mulmod(Y,zzz,p)//Y/zzz | |
//zz :=mulmod(zz, mload(T),p) //1/z | |
//zz:= mulmod(zz,zz,p) //1/zz | |
X := mulmod(X, mload(T), p) //X/zz | |
} //end assembly | |
} //end unchecked | |
return X; | |
} | |
//8 dimensions Shamir's trick, using precomputations stored in Shamir8, stored as Bytecode of an external | |
//contract at given address dataPointer | |
//(thx to Lakhdar https://github.com/Kelvyne for EVM storage explanations and tricks) | |
// the external tool to generate tables from public key is in the /sage directory | |
function ecZZ_mulmuladd_S8_extcode(uint256 scalar_u, uint256 scalar_v, address dataPointer) | |
internal | |
view | |
returns (uint256 X /*, uint Y*/ ) | |
{ | |
unchecked { | |
uint256 zz; // third and coordinates of the point | |
uint256[6] memory T; | |
zz = 256; //start index | |
while (T[0] == 0) { | |
zz = zz - 1; | |
//tbd case of msb octobit is null | |
T[0] = 64 | |
* ( | |
128 * ((scalar_v >> zz) & 1) + 64 * ((scalar_v >> (zz - 64)) & 1) | |
+ 32 * ((scalar_v >> (zz - 128)) & 1) + 16 * ((scalar_v >> (zz - 192)) & 1) | |
+ 8 * ((scalar_u >> zz) & 1) + 4 * ((scalar_u >> (zz - 64)) & 1) | |
+ 2 * ((scalar_u >> (zz - 128)) & 1) + ((scalar_u >> (zz - 192)) & 1) | |
); | |
} | |
assembly { | |
extcodecopy(dataPointer, T, mload(T), 64) | |
X := mload(T) | |
let Y := mload(add(T, 32)) | |
let zzz := 1 | |
zz := 1 | |
//loop over 1/4 of scalars thx to Shamir's trick over 8 points | |
for { let index := 254 } gt(index, 191) { index := add(index, 191) } { | |
{ | |
let TT1 := mulmod(2, Y, p) //U = 2*Y1, y free | |
let T2 := mulmod(TT1, TT1, p) // V=U^2 | |
let T3 := mulmod(X, T2, p) // S = X1*V | |
let T1 := mulmod(TT1, T2, p) // W=UV | |
let T4 := mulmod(3, mulmod(addmod(X, sub(p, zz), p), addmod(X, zz, p), p), p) //M=3*(X1-ZZ1)*(X1+ZZ1) | |
zzz := mulmod(T1, zzz, p) //zzz3=W*zzz1 | |
zz := mulmod(T2, zz, p) //zz3=V*ZZ1, V free | |
X := addmod(mulmod(T4, T4, p), mulmod(minus_2, T3, p), p) //X3=M^2-2S | |
//T2:=mulmod(T4,addmod(T3, sub(p, X),p),p)//M(S-X3) | |
let T5 := mulmod(T4, addmod(X, sub(p, T3), p), p) //-M(S-X3)=M(X3-S) | |
//Y:= addmod(T2, sub(p, mulmod(T1, Y ,p)),p )//Y3= M(S-X3)-W*Y1 | |
Y := addmod(mulmod(T1, Y, p), T5, p) //-Y3= W*Y1-M(S-X3), we replace Y by -Y to avoid a sub in ecAdd | |
/* compute element to access in precomputed table */ | |
} | |
{ | |
let T4 := add(shl(13, and(shr(index, scalar_v), 1)), shl(9, and(shr(index, scalar_u), 1))) | |
let index2 := sub(index, 64) | |
let T3 := | |
add(T4, add(shl(12, and(shr(index2, scalar_v), 1)), shl(8, and(shr(index2, scalar_u), 1)))) | |
let index3 := sub(index2, 64) | |
let T2 := | |
add(T3, add(shl(11, and(shr(index3, scalar_v), 1)), shl(7, and(shr(index3, scalar_u), 1)))) | |
index := sub(index3, 64) | |
let T1 := | |
add(T2, add(shl(10, and(shr(index, scalar_v), 1)), shl(6, and(shr(index, scalar_u), 1)))) | |
//index:=add(index,192), restore index, interleaved with loop | |
//tbd: check validity of formulae with (0,1) to remove conditional jump | |
if iszero(T1) { | |
Y := sub(p, Y) | |
continue | |
} | |
extcodecopy(dataPointer, T, T1, 64) | |
} | |
{ | |
/* Access to precomputed table using extcodecopy hack */ | |
// inlined EcZZ_AddN | |
let y2 := addmod(mulmod(mload(add(T, 32)), zzz, p), Y, p) | |
let T2 := addmod(mulmod(mload(T), zz, p), sub(p, X), p) | |
//special case ecAdd(P,P)=EcDbl | |
if eq(y2, 0) { | |
if eq(T2, 0) { | |
let T1 := mulmod(2, Y, p) //U = 2*Y1, y free | |
T2 := mulmod(T1, T1, p) // V=U^2 | |
let T3 := mulmod(X, T2, p) // S = X1*V | |
let TT1 := mulmod(T1, T2, p) // W=UV | |
y2 := addmod(X, zz, p) | |
TT1 := addmod(X, sub(p, zz), p) | |
y2 := mulmod(y2, TT1, p) | |
T2 := addmod(X, zz, p) | |
T1 := addmod(X, sub(p, zz), p) | |
T2 := mulmod(T1, T2, p) | |
let T4 := mulmod(3, T2, p) | |
zzz := mulmod(T1, zzz, p) //zzz3=W*zzz1 | |
zz := mulmod(T2, zz, p) //zz3=V*ZZ1, V free | |
X := addmod(mulmod(T4, T4, p), mulmod(minus_2, T3, p), p) //X3=M^2-2S | |
T2 := mulmod(T4, addmod(T3, sub(p, X), p), p) //M(S-X3) | |
Y := addmod(T2, sub(p, mulmod(T1, Y, p)), p) //Y3= M(S-X3)-W*Y1 | |
continue | |
} | |
} | |
let T4 := mulmod(T2, T2, p) | |
let T1 := mulmod(T4, T2, p) // | |
zz := mulmod(zz, T4, p) | |
//zzz3=V*ZZ1 | |
zzz := mulmod(zzz, T1, p) // W=UV/ | |
let zz1 := mulmod(X, T4, p) | |
X := addmod(addmod(mulmod(y2, y2, p), sub(p, T1), p), mulmod(minus_2, zz1, p), p) | |
Y := addmod(mulmod(addmod(zz1, sub(p, X), p), y2, p), mulmod(Y, T1, p), p) | |
} | |
} //end loop | |
mstore(add(T, 0x60), zz) | |
//(X,Y)=ecZZ_SetAff(X,Y,zz, zzz); | |
//T[0] = inverseModp_Hard(T[0], p); //1/zzz, inline modular inversion using precompile: | |
// Define length of base, exponent and modulus. 0x20 == 32 bytes | |
mstore(T, 0x20) | |
mstore(add(T, 0x20), 0x20) | |
mstore(add(T, 0x40), 0x20) | |
// Define variables base, exponent and modulus | |
//mstore(add(pointer, 0x60), u) | |
mstore(add(T, 0x80), minus_2) | |
mstore(add(T, 0xa0), p) | |
// Call the precompiled contract 0x05 = ModExp | |
if iszero(staticcall(not(0), 0x05, T, 0xc0, T, 0x20)) { revert(0, 0) } | |
zz := mload(T) | |
X := mulmod(X, zz, p) //X/zz | |
} | |
} //end unchecked | |
} | |
// improving the extcodecopy trick : append array at end of contract | |
function ecZZ_mulmuladd_S8_hackmem(uint256 scalar_u, uint256 scalar_v, uint256 dataPointer) | |
internal | |
view | |
returns (uint256 X /*, uint Y*/ ) | |
{ | |
uint256 zz; // third and coordinates of the point | |
uint256[6] memory T; | |
zz = 256; //start index | |
unchecked { | |
while (T[0] == 0) { | |
zz = zz - 1; | |
//tbd case of msb octobit is null | |
T[0] = 64 | |
* ( | |
128 * ((scalar_v >> zz) & 1) + 64 * ((scalar_v >> (zz - 64)) & 1) | |
+ 32 * ((scalar_v >> (zz - 128)) & 1) + 16 * ((scalar_v >> (zz - 192)) & 1) | |
+ 8 * ((scalar_u >> zz) & 1) + 4 * ((scalar_u >> (zz - 64)) & 1) | |
+ 2 * ((scalar_u >> (zz - 128)) & 1) + ((scalar_u >> (zz - 192)) & 1) | |
); | |
} | |
assembly { | |
codecopy(T, add(mload(T), dataPointer), 64) | |
X := mload(T) | |
let Y := mload(add(T, 32)) | |
let zzz := 1 | |
zz := 1 | |
//loop over 1/4 of scalars thx to Shamir's trick over 8 points | |
for { let index := 254 } gt(index, 191) { index := add(index, 191) } { | |
let T1 := mulmod(2, Y, p) //U = 2*Y1, y free | |
let T2 := mulmod(T1, T1, p) // V=U^2 | |
let T3 := mulmod(X, T2, p) // S = X1*V | |
T1 := mulmod(T1, T2, p) // W=UV | |
let T4 := mulmod(3, mulmod(addmod(X, sub(p, zz), p), addmod(X, zz, p), p), p) //M=3*(X1-ZZ1)*(X1+ZZ1) | |
zzz := mulmod(T1, zzz, p) //zzz3=W*zzz1 | |
zz := mulmod(T2, zz, p) //zz3=V*ZZ1, V free | |
X := addmod(mulmod(T4, T4, p), mulmod(minus_2, T3, p), p) //X3=M^2-2S | |
//T2:=mulmod(T4,addmod(T3, sub(p, X),p),p)//M(S-X3) | |
T2 := mulmod(T4, addmod(X, sub(p, T3), p), p) //-M(S-X3)=M(X3-S) | |
//Y:= addmod(T2, sub(p, mulmod(T1, Y ,p)),p )//Y3= M(S-X3)-W*Y1 | |
Y := addmod(mulmod(T1, Y, p), T2, p) //-Y3= W*Y1-M(S-X3), we replace Y by -Y to avoid a sub in ecAdd | |
/* compute element to access in precomputed table */ | |
T4 := add(shl(13, and(shr(index, scalar_v), 1)), shl(9, and(shr(index, scalar_u), 1))) | |
index := sub(index, 64) | |
T4 := add(T4, add(shl(12, and(shr(index, scalar_v), 1)), shl(8, and(shr(index, scalar_u), 1)))) | |
index := sub(index, 64) | |
T4 := add(T4, add(shl(11, and(shr(index, scalar_v), 1)), shl(7, and(shr(index, scalar_u), 1)))) | |
index := sub(index, 64) | |
T4 := add(T4, add(shl(10, and(shr(index, scalar_v), 1)), shl(6, and(shr(index, scalar_u), 1)))) | |
//index:=add(index,192), restore index, interleaved with loop | |
//tbd: check validity of formulae with (0,1) to remove conditional jump | |
if iszero(T4) { | |
Y := sub(p, Y) | |
continue | |
} | |
{ | |
/* Access to precomputed table using extcodecopy hack */ | |
codecopy(T, add(T4, dataPointer), 64) | |
// inlined EcZZ_AddN | |
let y2 := addmod(mulmod(mload(add(T, 32)), zzz, p), Y, p) | |
T2 := addmod(mulmod(mload(T), zz, p), sub(p, X), p) | |
T4 := mulmod(T2, T2, p) | |
T1 := mulmod(T4, T2, p) | |
T2 := mulmod(zz, T4, p) // W=UV | |
zzz := mulmod(zzz, T1, p) //zz3=V*ZZ1 | |
let zz1 := mulmod(X, T4, p) | |
T4 := addmod(addmod(mulmod(y2, y2, p), sub(p, T1), p), mulmod(minus_2, zz1, p), p) | |
Y := addmod(mulmod(addmod(zz1, sub(p, T4), p), y2, p), mulmod(Y, T1, p), p) | |
zz := T2 | |
X := T4 | |
} | |
} //end loop | |
mstore(add(T, 0x60), zz) | |
//(X,Y)=ecZZ_SetAff(X,Y,zz, zzz); | |
//T[0] = inverseModp_Hard(T[0], p); //1/zzz, inline modular inversion using precompile: | |
// Define length of base, exponent and modulus. 0x20 == 32 bytes | |
mstore(T, 0x20) | |
mstore(add(T, 0x20), 0x20) | |
mstore(add(T, 0x40), 0x20) | |
// Define variables base, exponent and modulus | |
//mstore(add(pointer, 0x60), u) | |
mstore(add(T, 0x80), minus_2) | |
mstore(add(T, 0xa0), p) | |
// Call the precompiled contract 0x05 = ModExp | |
if iszero(staticcall(not(0), 0x05, T, 0xc0, T, 0x20)) { revert(0, 0) } | |
zz := mload(T) | |
X := mulmod(X, zz, p) //X/zz | |
} | |
} //end unchecked | |
} | |
/** | |
* @dev ECDSA verification, given , signature, and public key. | |
*/ | |
function ecdsa_verify(bytes32 message, uint256[2] memory rs, uint256[2] memory Q) internal view returns (bool) { | |
if (rs[0] == 0 || rs[0] >= n || rs[1] == 0 || rs[1] >= n) { | |
return false; | |
} | |
if (!ecAff_isOnCurve(Q[0], Q[1])) { | |
return false; | |
} | |
uint256 sInv = FCL_nModInv(n - rs[1]); | |
uint256 scalar_u = mulmod(uint256(message), sInv, n); | |
uint256 scalar_v = mulmod(rs[0], sInv, n); | |
uint256 x1; | |
x1 = ecZZ_mulmuladd_S_asm(Q[0], Q[1], scalar_u, scalar_v); | |
assembly { | |
x1 := addmod(x1, sub(n, mload(rs)), n) | |
} | |
//return true; | |
return x1 == 0; | |
} | |
/** | |
* @dev ECDSA verification using a precomputed table of multiples of P and Q stored in contract at address Shamir8 | |
* generation of contract bytecode for precomputations is done using sagemath code | |
* (see sage directory, WebAuthn_precompute.sage) | |
*/ | |
function ecdsa_precomputed_verify(bytes32 message, uint256[2] calldata rs, address Shamir8) | |
internal | |
view | |
returns (bool) | |
{ | |
if (rs[0] == 0 || rs[0] >= n || rs[1] == 0) { | |
return false; | |
} | |
/* Q is pushed via bytecode assumed to be correct | |
if (!isOnCurve(Q[0], Q[1])) { | |
return false; | |
}*/ | |
uint256 sInv = FCL_nModInv(rs[1]); | |
//uint sInv =2; | |
uint256 X; | |
//Shamir 8 dimensions | |
X = ecZZ_mulmuladd_S8_extcode(mulmod(uint256(message), sInv, n), mulmod(rs[0], sInv, n), Shamir8); | |
assembly { | |
X := addmod(X, sub(n, calldataload(rs)), n) | |
} | |
return X == 0; | |
} //end ecdsa_precomputed_verify() | |
/** | |
* @dev ECDSA verification using a precomputed table of multiples of P and Q appended at end of contract at address endcontract | |
* generation of contract bytecode for precomputations is done using sagemath code | |
* (see sage directory, WebAuthn_precompute.sage) | |
*/ | |
function ecdsa_precomputed_hackmem(bytes32 message, uint256[2] calldata rs, uint256 endcontract) | |
internal | |
view | |
returns (bool) | |
{ | |
if (rs[0] == 0 || rs[0] >= n || rs[1] == 0) { | |
return false; | |
} | |
/* Q is pushed via bytecode assumed to be correct | |
if (!isOnCurve(Q[0], Q[1])) { | |
return false; | |
}*/ | |
uint256 sInv = FCL_nModInv(rs[1]); | |
uint256 X; | |
//Shamir 8 dimensions | |
X = ecZZ_mulmuladd_S8_hackmem(mulmod(uint256(message), sInv, n), mulmod(rs[0], sInv, n), endcontract); | |
assembly { | |
X := addmod(X, sub(n, calldataload(rs)), n) | |
} | |
return X == 0; | |
} //end ecdsa_precomputed_verify() | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment