diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go index a7f1894018..a79496824a 100644 --- a/cmd/opdoc/opdoc.go +++ b/cmd/opdoc/opdoc.go @@ -28,7 +28,7 @@ import ( "github.com/algorand/go-algorand/protocol" ) -var docVersion = 8 +var docVersion = 9 func opGroupMarkdownTable(names []string, out io.Writer) { fmt.Fprint(out, `| Opcode | Description | diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md index 324cd7d536..a35200cb5a 100644 --- a/data/transactions/logic/README.md +++ b/data/transactions/logic/README.md @@ -296,6 +296,12 @@ return stack matches the name of the input value. | `ecdsa_pk_recover v` | for (data A, recovery id B, signature C, D) recover a public key | | `ecdsa_pk_decompress v` | decompress pubkey A into components X, Y | | `vrf_verify s` | Verify the proof B of message A against pubkey C. Returns vrf output and verification flag. | +| `ec_add g` | for curve points A and B, return the curve point A + B | +| `ec_scalar_mul g` | for curve point A and scalar B, return the curve point BA, the point A multiplied by the scalar B. | +| `ec_pairing_check g` | 1 if the product of the pairing of each point in A with its respective point in B is equal to the identity element of the target group Gt, else 0 | +| `ec_multi_exp g` | for curve points A and scalars B, return curve point B0A0 + B1A1 + B2A2 + ... + BnAn | +| `ec_subgroup_check g` | 1 if A is in the main prime-order subgroup of G (including the point at infinity) else 0. Program fails if A is not in G at all. | +| `ec_map_to g` | maps field element A to group G | | `+` | A plus B. Fail on overflow. | | `-` | A minus B. Fail if B > A. | | `/` | A divided by B (truncated division). Fail if B == 0. | diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md index 54a80db8f9..18379a0281 100644 --- a/data/transactions/logic/TEAL_opcodes.md +++ b/data/transactions/logic/TEAL_opcodes.md @@ -1560,3 +1560,72 @@ For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `bo | 0 | BlkSeed | []byte | | | 1 | BlkTimestamp | uint64 | | + +## ec_add g + +- Opcode: 0xe0 {uint8 curve} +- Stack: ..., A: []byte, B: []byte → ..., []byte +- for curve points A and B, return the curve point A + B +- **Cost**: BN254g1=310 BN254g2=430 BLS12_381g1=540 BLS12_381g2=750 +- Availability: v9 + +`EC` Groups: + +| Index | Name | Notes | +| - | ------ | --------- | +| 0 | BN254g1 | G1 of the BN254 curve. Points encoded as 32 byte X following by 32 byte Y | +| 1 | BN254g2 | G2 of the BN254 curve. Points encoded as 64 byte X following by 64 byte Y | +| 2 | BLS12_381g1 | G1 of the BLS 12-381 curve. Points encoded as 48 byte X following by 48 byte Y | +| 3 | BLS12_381g2 | G2 of the BLS 12-381 curve. Points encoded as 96 byte X following by 48 byte Y | + + +A and B are curve points in affine representation: X concatenated with Y. Fails if A or B is not in G. Does _not_ check if A and B are in the main prime-order subgroup. + +## ec_scalar_mul g + +- Opcode: 0xe1 {uint8 curve} +- Stack: ..., A: []byte, B: []byte → ..., []byte +- for curve point A and scalar B, return the curve point BA, the point A multiplied by the scalar B. +- **Cost**: BN254g1=2200 BN254g2=4460 BLS12_381g1=3640 BLS12_381g2=8530 +- Availability: v9 + +A is a curve point encoded and checked as described in `ec_add`. Scalar B is interpreted as a big-endian unsigned integer. Fails if B exceeds 32 bytes. + +## ec_pairing_check g + +- Opcode: 0xe2 {uint8 curve} +- Stack: ..., A: []byte, B: []byte → ..., uint64 +- 1 if the product of the pairing of each point in A with its respective point in B is equal to the identity element of the target group Gt, else 0 +- **Cost**: BN254g1=18000 BN254g2=18000 BLS12_381g1=15000 BLS12_381g2=15000 +- Availability: v9 + +A and B are concatenated points, encoded and checked as described in `ec_add`. A contains points of the group G, B contains points of the associated group (G2 if G is G1, and vice versa). Fails if A and B have a different number of points, or if any point is not in its described group or outside the main prime-order subgroup - a stronger condition than other opcodes. + +## ec_multi_exp g + +- Opcode: 0xe3 {uint8 curve} +- Stack: ..., A: []byte, B: []byte → ..., []byte +- for curve points A and scalars B, return curve point B0A0 + B1A1 + B2A2 + ... + BnAn +- **Cost**: BN254g1=800 BN254g2=1800 BLS12_381g1=1400 BLS12_381g2=3500 +- Availability: v9 + +A is a list of concatenated points, encoded and checked as described in `ec_add`. B is a list of concatenated scalars which, unlike ec_scalar_mul, must all be exactly 32 bytes long. +The name `ec_multi_exp` was chosen to reflect common usage, but a more consistent name would be `ec_multi_scalar_mul` + +## ec_subgroup_check g + +- Opcode: 0xe4 {uint8 curve} +- Stack: ..., A: []byte → ..., uint64 +- 1 if A is in the main prime-order subgroup of G (including the point at infinity) else 0. Program fails if A is not in G at all. +- **Cost**: BN254g1=50 BN254g2=11500 BLS12_381g1=5600 BLS12_381g2=7100 +- Availability: v9 + +## ec_map_to g + +- Opcode: 0xe5 {uint8 curve} +- Stack: ..., A: []byte → ..., []byte +- maps field element A to group G +- **Cost**: BN254g1=1700 BN254g2=11000 BLS12_381g1=5600 BLS12_381g2=43000 +- Availability: v9 + +BN254 points are mapped by the SVDW map. BLS12-381 points are mapped by the SSWU map. G1 element inputs are encoded single big-endian byte-array of length <= n. G2 inputs are encoded as concatenated n-byte big-endian encoded integers. n == 32 for BN254 and 48 for BLS12-381. No input may exceed the modulus of the group. diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go index 31a0412fa3..5e11bf6889 100644 --- a/data/transactions/logic/doc.go +++ b/data/transactions/logic/doc.go @@ -32,9 +32,13 @@ var opDocByName = map[string]string{ "ecdsa_verify": "for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1}", "ecdsa_pk_decompress": "decompress pubkey A into components X, Y", "ecdsa_pk_recover": "for (data A, recovery id B, signature C, D) recover a public key", - "bn256_add": "for (curve points A and B) return the curve point A + B", - "bn256_scalar_mul": "for (curve point A, scalar K) return the curve point KA", - "bn256_pairing": "for (points in G1 group G1s, points in G2 group G2s), return whether they are paired => {0 or 1}", + + "ec_add": "for curve points A and B, return the curve point A + B", + "ec_scalar_mul": "for curve point A and scalar B, return the curve point BA, the point A multiplied by the scalar B.", + "ec_pairing_check": "1 if the product of the pairing of each point in A with its respective point in B is equal to the identity element of the target group Gt, else 0", + "ec_multi_exp": "for curve points A and scalars B, return curve point B0A0 + B1A1 + B2A2 + ... + BnAn", + "ec_subgroup_check": "1 if A is in the main prime-order subgroup of G (including the point at infinity) else 0. Program fails if A is not in G at all.", + "ec_map_to": "maps field element A to group G", "+": "A plus B. Fail on overflow.", "-": "A minus B. Fail if B > A.", @@ -292,6 +296,13 @@ var opcodeImmediateNotes = map[string]string{ "frame_bury": "{int8 frame slot}", "popn": "{uint8 stack depth}", "dupn": "{uint8 copy count}", + + "ec_add": "{uint8 curve}", + "ec_scalar_mul": "{uint8 curve}", + "ec_pairing_check": "{uint8 curve}", + "ec_multi_exp": "{uint8 curve}", + "ec_subgroup_check": "{uint8 curve}", + "ec_map_to": "{uint8 curve}", } // OpImmediateNote returns a short string about immediate data which follows the op byte @@ -306,55 +317,59 @@ var opDocExtras = map[string]string{ "ecdsa_verify": "The 32 byte Y-component of a public key is the last element on the stack, preceded by X-component of a pubkey, preceded by S and R components of a signature, preceded by the data that is fifth element on the stack. All values are big-endian encoded. The signed data must be 32 bytes long, and signatures in lower-S form are only accepted.", "ecdsa_pk_decompress": "The 33 byte public key in a compressed form to be decompressed into X and Y (top) components. All values are big-endian encoded.", "ecdsa_pk_recover": "S (top) and R elements of a signature, recovery id and data (bottom) are expected on the stack and used to deriver a public key. All values are big-endian encoded. The signed data must be 32 bytes long.", - "bn256_add": "A, B are curve points in G1 group. Each point consists of (X, Y) where X and Y are 256 bit integers, big-endian encoded. The encoded point is 64 bytes from concatenation of 32 byte X and 32 byte Y.", - "bn256_scalar_mul": "A is a curve point in G1 Group and encoded as described in `bn256_add`. Scalar K is a big-endian encoded big integer that has no padding zeros.", - "bn256_pairing": "G1s are encoded by the concatenation of encoded G1 points, as described in `bn256_add`. G2s are encoded by the concatenation of encoded G2 points. Each G2 is in form (XA0+i*XA1, YA0+i*YA1) and encoded by big-endian field element XA0, XA1, YA0 and YA1 in sequence.", - "bnz": "The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Starting at v4, the offset is treated as a signed 16 bit integer allowing for backward branches and looping. In prior version (v1 to v3), branch offsets are limited to forward branches only, 0-0x7fff.\n\nAt v2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before v2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)", - "bz": "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.", - "b": "See `bnz` for details on how branches work. `b` always jumps to the offset.", - "callsub": "The call stack is separate from the data stack. Only `callsub`, `retsub`, and `proto` manipulate it.", - "proto": "Fails unless the last instruction executed was a `callsub`.", - "retsub": "If the current frame was prepared by `proto A R`, `retsub` will remove the 'A' arguments from the stack, move the `R` return values down, and pop any stack locations above the relocated return values.", - "intcblock": "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script.", - "bytecblock": "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.", - "*": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `mulw`.", - "+": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `addw`.", - "/": "`divmodw` is available to divide the two-element values produced by `mulw` and `addw`.", - "bitlen": "bitlen interprets arrays as big-endian integers, unlike setbit/getbit", - "divw": "The notation A,B indicates that A and B are interpreted as a uint128 value, with A as the high uint64 and B the low.", - "divmodw": "The notation J,K indicates that two uint64 values J and K are interpreted as a uint128 value, with J as the high uint64 and K the low.", - "gtxn": "for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`.", - "gtxns": "for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction.", - "gload": "`gload` fails unless the requested transaction is an ApplicationCall and T < GroupIndex.", - "gloads": "`gloads` fails unless the requested transaction is an ApplicationCall and A < GroupIndex.", - "gaid": "`gaid` fails unless the requested transaction created an asset or application and T < GroupIndex.", - "gaids": "`gaids` fails unless the requested transaction created an asset or application and A < GroupIndex.", - "btoi": "`btoi` fails if the input is longer than 8 bytes.", - "concat": "`concat` fails if the result would be greater than 4096 bytes.", - "pushbytes": "pushbytes args are not added to the bytecblock during assembly processes", - "pushbytess": "pushbytess args are not added to the bytecblock during assembly processes", - "pushint": "pushint args are not added to the intcblock during assembly processes", - "pushints": "pushints args are not added to the intcblock during assembly processes", - "getbit": "see explanation of bit ordering in setbit", - "setbit": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.", - "balance": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.", - "min_balance": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.", - "app_opted_in": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: 1 if opted in and 0 otherwise.", - "app_local_get": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key. Return: value. The value is zero (of type uint64) if the key does not exist.", - "app_local_get_ex": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.", - "app_global_get_ex": "params: Txn.ForeignApps offset (or, since v4, an _available_ application id), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.", - "app_global_get": "params: state key. Return: value. The value is zero (of type uint64) if the key does not exist.", - "app_local_put": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key, value.", - "app_local_del": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key.\n\nDeleting a key which is already absent has no effect on the application local state. (In particular, it does _not_ cause the program to fail.)", - "app_global_del": "params: state key.\n\nDeleting a key which is already absent has no effect on the application global state. (In particular, it does _not_ cause the program to fail.)", - "asset_holding_get": "params: Txn.Accounts offset (or, since v4, an _available_ address), asset id (or, since v4, a Txn.ForeignAssets offset). Return: did_exist flag (1 if the asset existed and 0 otherwise), value.", - "asset_params_get": "params: Txn.ForeignAssets offset (or, since v4, an _available_ asset id. Return: did_exist flag (1 if the asset existed and 0 otherwise), value.", - "app_params_get": "params: Txn.ForeignApps offset or an _available_ app id. Return: did_exist flag (1 if the application existed and 0 otherwise), value.", - "log": "`log` fails if called more than MaxLogCalls times in a program, or if the sum of logged bytes exceeds 1024 bytes.", - "itxn_begin": "`itxn_begin` initializes Sender to the application address; Fee to the minimum allowable, taking into account MinTxnFee and credit from overpaying in earlier transactions; FirstValid/LastValid to the values in the invoking transaction, and all other fields to zero or empty values.", - "itxn_next": "`itxn_next` initializes the transaction exactly as `itxn_begin` does", - "itxn_field": "`itxn_field` fails if A is of the wrong type for F, including a byte array of the wrong size for use as an address when F is an address field. `itxn_field` also fails if A is an account, asset, or app that is not _available_, or an attempt is made extend an array field beyond the limit imposed by consensus parameters. (Addresses set into asset params of acfg transactions need not be _available_.)", - "itxn_submit": "`itxn_submit` resets the current transaction so that it can not be resubmitted. A new `itxn_begin` is required to prepare another inner transaction.", + + "ec_add": "A and B are curve points in affine representation: X concatenated with Y. Fails if A or B is not in G. Does _not_ check if A and B are in the main prime-order subgroup.", + "ec_scalar_mul": "A is a curve point encoded and checked as described in `ec_add`. Scalar B is interpreted as a big-endian unsigned integer. Fails if B exceeds 32 bytes.", + "ec_pairing_check": "A and B are concatenated points, encoded and checked as described in `ec_add`. A contains points of the group G, B contains points of the associated group (G2 if G is G1, and vice versa). Fails if A and B have a different number of points, or if any point is not in its described group or outside the main prime-order subgroup - a stronger condition than other opcodes.", + "ec_multi_exp": "A is a list of concatenated points, encoded and checked as described in `ec_add`. B is a list of concatenated scalars which, unlike ec_scalar_mul, must all be exactly 32 bytes long.\nThe name `ec_multi_exp` was chosen to reflect common usage, but a more consistent name would be `ec_multi_scalar_mul`", + "ec_map_to": "BN254 points are mapped by the SVDW map. BLS12-381 points are mapped by the SSWU map. G1 element inputs are encoded single big-endian byte-array of length <= n. G2 inputs are encoded as concatenated n-byte big-endian encoded integers. n == 32 for BN254 and 48 for BLS12-381. No input may exceed the modulus of the group. ", + + "bnz": "The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Starting at v4, the offset is treated as a signed 16 bit integer allowing for backward branches and looping. In prior version (v1 to v3), branch offsets are limited to forward branches only, 0-0x7fff.\n\nAt v2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before v2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)", + "bz": "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.", + "b": "See `bnz` for details on how branches work. `b` always jumps to the offset.", + "callsub": "The call stack is separate from the data stack. Only `callsub`, `retsub`, and `proto` manipulate it.", + "proto": "Fails unless the last instruction executed was a `callsub`.", + "retsub": "If the current frame was prepared by `proto A R`, `retsub` will remove the 'A' arguments from the stack, move the `R` return values down, and pop any stack locations above the relocated return values.", + "intcblock": "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script.", + "bytecblock": "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.", + "*": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `mulw`.", + "+": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `addw`.", + "/": "`divmodw` is available to divide the two-element values produced by `mulw` and `addw`.", + "bitlen": "bitlen interprets arrays as big-endian integers, unlike setbit/getbit", + "divw": "The notation A,B indicates that A and B are interpreted as a uint128 value, with A as the high uint64 and B the low.", + "divmodw": "The notation J,K indicates that two uint64 values J and K are interpreted as a uint128 value, with J as the high uint64 and K the low.", + "gtxn": "for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`.", + "gtxns": "for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction.", + "gload": "`gload` fails unless the requested transaction is an ApplicationCall and T < GroupIndex.", + "gloads": "`gloads` fails unless the requested transaction is an ApplicationCall and A < GroupIndex.", + "gaid": "`gaid` fails unless the requested transaction created an asset or application and T < GroupIndex.", + "gaids": "`gaids` fails unless the requested transaction created an asset or application and A < GroupIndex.", + "btoi": "`btoi` fails if the input is longer than 8 bytes.", + "concat": "`concat` fails if the result would be greater than 4096 bytes.", + "pushbytes": "pushbytes args are not added to the bytecblock during assembly processes", + "pushbytess": "pushbytess args are not added to the bytecblock during assembly processes", + "pushint": "pushint args are not added to the intcblock during assembly processes", + "pushints": "pushints args are not added to the intcblock during assembly processes", + "getbit": "see explanation of bit ordering in setbit", + "setbit": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.", + "balance": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.", + "min_balance": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.", + "app_opted_in": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: 1 if opted in and 0 otherwise.", + "app_local_get": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key. Return: value. The value is zero (of type uint64) if the key does not exist.", + "app_local_get_ex": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.", + "app_global_get_ex": "params: Txn.ForeignApps offset (or, since v4, an _available_ application id), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.", + "app_global_get": "params: state key. Return: value. The value is zero (of type uint64) if the key does not exist.", + "app_local_put": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key, value.", + "app_local_del": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key.\n\nDeleting a key which is already absent has no effect on the application local state. (In particular, it does _not_ cause the program to fail.)", + "app_global_del": "params: state key.\n\nDeleting a key which is already absent has no effect on the application global state. (In particular, it does _not_ cause the program to fail.)", + "asset_holding_get": "params: Txn.Accounts offset (or, since v4, an _available_ address), asset id (or, since v4, a Txn.ForeignAssets offset). Return: did_exist flag (1 if the asset existed and 0 otherwise), value.", + "asset_params_get": "params: Txn.ForeignAssets offset (or, since v4, an _available_ asset id. Return: did_exist flag (1 if the asset existed and 0 otherwise), value.", + "app_params_get": "params: Txn.ForeignApps offset or an _available_ app id. Return: did_exist flag (1 if the application existed and 0 otherwise), value.", + "log": "`log` fails if called more than MaxLogCalls times in a program, or if the sum of logged bytes exceeds 1024 bytes.", + "itxn_begin": "`itxn_begin` initializes Sender to the application address; Fee to the minimum allowable, taking into account MinTxnFee and credit from overpaying in earlier transactions; FirstValid/LastValid to the values in the invoking transaction, and all other fields to zero or empty values.", + "itxn_next": "`itxn_next` initializes the transaction exactly as `itxn_begin` does", + "itxn_field": "`itxn_field` fails if A is of the wrong type for F, including a byte array of the wrong size for use as an address when F is an address field. `itxn_field` also fails if A is an account, asset, or app that is not _available_, or an attempt is made extend an array field beyond the limit imposed by consensus parameters. (Addresses set into asset params of acfg transactions need not be _available_.)", + "itxn_submit": "`itxn_submit` resets the current transaction so that it can not be resubmitted. A new `itxn_begin` is required to prepare another inner transaction.", "base64_decode": "*Warning*: Usage should be restricted to very rare use cases. In almost all cases, smart contracts should directly handle non-encoded byte-strings. This opcode should only be used in cases where base64 is the only available option, e.g. interoperability with a third-party that only signs base64 strings.\n\n Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See [RFC 4648 sections 4 and 5](https://rfc-editor.org/rfc/rfc4648.html#section-4). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\\n` and `\\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\\r`, or `\\n`.", "json_ref": "*Warning*: Usage should be restricted to very rare use cases, as JSON decoding is expensive and quite limited. In addition, JSON objects are large and not optimized for size.\n\nAlmost all smart contracts should use simpler and smaller methods (such as the [ABI](https://arc.algorand.foundation/ARCs/arc-0004). This opcode should only be used in cases where JSON is only available option, e.g. when a third-party only signs JSON.", @@ -375,7 +390,7 @@ func OpDocExtra(opName string) string { // here is the order args opcodes are presented, so place related // opcodes consecutively, even if their opcode values are not. var OpGroups = map[string][]string{ - "Arithmetic": {"sha256", "keccak256", "sha512_256", "sha3_256", "ed25519verify", "ed25519verify_bare", "ecdsa_verify", "ecdsa_pk_recover", "ecdsa_pk_decompress", "vrf_verify", "bn256_add", "bn256_scalar_mul", "bn256_pairing", "+", "-", "/", "*", "<", ">", "<=", ">=", "&&", "||", "shl", "shr", "sqrt", "bitlen", "exp", "==", "!=", "!", "len", "itob", "btoi", "%", "|", "&", "^", "~", "mulw", "addw", "divw", "divmodw", "expw", "getbit", "setbit", "getbyte", "setbyte", "concat"}, + "Arithmetic": {"sha256", "keccak256", "sha512_256", "sha3_256", "ed25519verify", "ed25519verify_bare", "ecdsa_verify", "ecdsa_pk_recover", "ecdsa_pk_decompress", "vrf_verify", "ec_add", "ec_scalar_mul", "ec_pairing_check", "ec_multi_exp", "ec_subgroup_check", "ec_map_to", "+", "-", "/", "*", "<", ">", "<=", ">=", "&&", "||", "shl", "shr", "sqrt", "bitlen", "exp", "==", "!=", "!", "len", "itob", "btoi", "%", "|", "&", "^", "~", "mulw", "addw", "divw", "divmodw", "expw", "getbit", "setbit", "getbyte", "setbyte", "concat"}, "Byte Array Manipulation": {"substring", "substring3", "extract", "extract3", "extract_uint16", "extract_uint32", "extract_uint64", "replace2", "replace3", "base64_decode", "json_ref"}, "Byte Array Arithmetic": {"b+", "b-", "b/", "b*", "b<", "b>", "b<=", "b>=", "b==", "b!=", "b%", "bsqrt"}, "Byte Array Logic": {"b|", "b&", "b^", "b~"}, diff --git a/data/transactions/logic/evalCrypto_test.go b/data/transactions/logic/evalCrypto_test.go index 5dae0bdbf9..cfc5c13ecf 100644 --- a/data/transactions/logic/evalCrypto_test.go +++ b/data/transactions/logic/evalCrypto_test.go @@ -25,11 +25,9 @@ import ( "encoding/hex" "fmt" "math/big" - mrand "math/rand" "strconv" "testing" - "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/crypto" @@ -622,7 +620,7 @@ ecdsa_verify Secp256k1 ! assert global OpcodeBudget -int ` + fmt.Sprintf("%d", 20_000-1700-8) + ` +int ` + fmt.Sprintf("%d", testLogicBudget-1700-8) + ` == ` testAccepts(t, source, 6) // Secp256k1 was 5, but OpcodeBudget is 6 @@ -637,7 +635,7 @@ ecdsa_verify Secp256r1 ! assert global OpcodeBudget -int ` + fmt.Sprintf("%d", 20_000-2500-8) + ` +int ` + fmt.Sprintf("%d", testLogicBudget-2500-8) + ` == ` testAccepts(t, source, fidoVersion) @@ -871,132 +869,3 @@ int 1` benchmarkEcdsa(b, source, Secp256k1) }) } - -type benchmarkBn256Data struct { - a []byte - k []byte - g1 []byte - g2 []byte - programs []byte -} - -func benchmarkBn256DataGenData(b *testing.B) (data []benchmarkBn256Data) { - data = make([]benchmarkBn256Data, b.N) - var g1Gen bn254.G1Jac - var g1GenAff bn254.G1Affine - g1Gen.X.SetString("1") - g1Gen.Y.SetString("2") - g1Gen.Z.SetString("1") - g1GenAff.FromJacobian(&g1Gen) - var a bn254.G1Affine - a.ScalarMultiplication(&g1GenAff, new(big.Int).SetUint64(mrand.Uint64())) - - for i := 0; i < b.N; i++ { - var a bn254.G1Affine - a.ScalarMultiplication(&g1GenAff, new(big.Int).SetUint64(mrand.Uint64())) - - data[i].a = bn254G1ToBytes(&a) - data[i].k = new(big.Int).SetUint64(mrand.Uint64()).Bytes() - - // Pair one g1 and one g2 - data[i].g1, _ = hex.DecodeString("0ebc9fc712b13340c800793386a88385e40912a21bacad2cc7db17d36e54c802238449426931975cced7200f08681ab9a86a2e5c2336cf625451cf2413318e32") - data[i].g2, _ = hex.DecodeString("217fbd9a9db5719cfbe3580e3d8750cada058fdfffe95c440a0528ffc608f36e05d6a67604658d40b3e4cac3c46150f2702d87739b7774d79a8147f7271773b420f9429ee13c1843404bfd70e75efa886c173e57dde32970274d8bc53dfd562403f6276318990d053785b4ca342ebc4581a23a39285804bb74e079aa2ef3ba66") - } - return data -} - -func benchmarkBn256(b *testing.B, source string) { - data := benchmarkBn256DataGenData(b) - ops, err := AssembleStringWithVersion(source, pairingVersion) - require.NoError(b, err) - for i := 0; i < b.N; i++ { - data[i].programs = ops.Program - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - var txn transactions.SignedTxn - txn.Lsig.Logic = data[i].programs - txn.Lsig.Args = [][]byte{data[i].a, data[i].k, data[i].g1, data[i].g2} - ep := defaultEvalParams(txn) - pass, err := EvalSignature(0, ep) - if !pass { - b.Log(hex.EncodeToString(data[i].programs)) - b.Log(ep.Trace.String()) - } - if err != nil { - require.NoError(b, err) - } - if !pass { - require.True(b, pass) - } - } -} - -func BenchmarkBn256AddRaw(b *testing.B) { - data := benchmarkBn256DataGenData(b) - a1, _ := bytesToBN254G1(data[0].g1, false) - a2, _ := bytesToBN254G1(data[0].g1, false) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = new(bn254.G1Affine).Add(&a1, &a2) - } -} - -func BenchmarkBn256AddWithMarshal(b *testing.B) { - b.ResetTimer() - var v [][]byte - v = make([][]byte, b.N) - g1, _ := hex.DecodeString("0ebc9fc712b13340c800793386a88385e40912a21bacad2cc7db17d36e54c802238449426931975cced7200f08681ab9a86a2e5c2336cf625451cf2413318e32") - - for i := 0; i < b.N; i++ { - a1, _ := bytesToBN254G1(g1, true) - a2, _ := bytesToBN254G1(g1, true) - r := new(bn254.G1Affine).Add(&a1, &a2) - v[i] = r.Marshal() - } -} - -func BenchmarkBn256PairingRaw(b *testing.B) { - data := benchmarkBn256DataGenData(b) - g1s, _ := bytesToBN254G1s(data[0].g1, false) - g2s, _ := bytesToBN254G2s(data[0].g2, false) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - ok, _ := bn254.PairingCheck(g1s, g2s) - require.False(b, ok) - } -} - -func BenchmarkBn256(b *testing.B) { - if pairingVersion > LogicVersion { - b.Skip() - } - b.Run("bn256 add", func(b *testing.B) { - benchmarkOperation(b, "byte 0x0ebc9fc712b13340c800793386a88385e40912a21bacad2cc7db17d36e54c802238449426931975cced7200f08681ab9a86a2e5c2336cf625451cf2413318e32", "dup; bn256_add", "pop; int 1") - }) - - b.Run("bn256 scalar mul", func(b *testing.B) { - source := ` -arg 0 -arg 1 -bn256_scalar_mul -pop -int 1 -` - benchmarkBn256(b, source) - }) - - b.Run("bn256 pairing", func(b *testing.B) { - source := ` -arg 2 -arg 3 -bn256_pairing -pop -int 1 -` - benchmarkBn256(b, source) - }) -} diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go index a2f33cc909..18166e85bc 100644 --- a/data/transactions/logic/evalStateful_test.go +++ b/data/transactions/logic/evalStateful_test.go @@ -2436,9 +2436,13 @@ func TestReturnTypes(t *testing.T) { "frame_dig": true, // would need a "proto" subroutine "frame_bury": true, // would need a "proto" subroutine - "bn256_add": true, - "bn256_scalar_mul": true, - "bn256_pairing": true, + // These should not remain here, we should be able to construct example + "ec_add": true, + "ec_scalar_mul": true, + "ec_pairing_check": true, + "ec_multi_exp": true, + "ec_subgroup_check": true, + "ec_map_to": true, } byName := OpsByName[LogicVersion] diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go index 016940605e..aa2eea770a 100644 --- a/data/transactions/logic/eval_test.go +++ b/data/transactions/logic/eval_test.go @@ -45,10 +45,12 @@ func makeTestProto() *config.ConsensusParams { return makeTestProtoV(LogicVersion) } +var testLogicBudget = 25_000 // In a var so that we can temporarily change it + func makeTestProtoV(version uint64) *config.ConsensusParams { return &config.ConsensusParams{ LogicSigVersion: version, - LogicSigMaxCost: 20000, + LogicSigMaxCost: uint64(testLogicBudget), Application: version >= appsEnabledVersion, MaxAppProgramCost: 700, @@ -2933,9 +2935,9 @@ func TestSlowLogic(t *testing.T) { testAccepts(t, source, 1) // in v1, each repeat costs 30 - v1overspend := fragment + strings.Repeat(fragment+"&&; ", 20000/30) + v1overspend := fragment + strings.Repeat(fragment+"&&; ", testLogicBudget/30) // in v2,v3 each repeat costs 134 - v2overspend := fragment + strings.Repeat(fragment+"&&; ", 20000/134) + v2overspend := fragment + strings.Repeat(fragment+"&&; ", testLogicBudget/134) // v1overspend fails (on v1) ops := testProg(t, v1overspend, 1) @@ -3650,12 +3652,17 @@ int 142791994204213819 + ` -func evalLoop(b *testing.B, runs int, program []byte) { +func evalLoop(b *testing.B, runs int, programs ...[]byte) { + program := programs[0] + final := programs[len(programs)-1] b.Helper() b.ResetTimer() for i := 0; i < runs; i++ { var txn transactions.SignedTxn txn.Lsig.Logic = program + if i == runs-1 { + txn.Lsig.Logic = final + } pass, err := EvalSignature(0, benchmarkEvalParams(txn)) if !pass { // rerun to trace it. tracing messes up timing too much @@ -3687,11 +3694,18 @@ func benchmarkBasicProgram(b *testing.B, source string) { // the idea is that you can subtract that out from the reported speed func benchmarkOperation(b *testing.B, prefix string, operation string, suffix string) { b.Helper() - runs := 1 + b.N/2000 + runs := b.N / 2000 inst := strings.Count(operation, ";") + strings.Count(operation, "\n") source := prefix + ";" + strings.Repeat(operation+"\n", 2000) + ";" + suffix ops := testProg(b, source, AssemblerMaxVersion) - evalLoop(b, runs, ops.Program) + finalOps := ops + + if b.N%2000 != 0 { + runs++ + finalSource := prefix + ";" + strings.Repeat(operation+"\n", b.N%2000) + ";" + suffix + finalOps = testProg(b, finalSource, AssemblerMaxVersion) + } + evalLoop(b, runs, ops.Program, finalOps.Program) b.ReportMetric(float64(inst), "extra/op") } @@ -5309,7 +5323,7 @@ byte "" base64_decode URLEncoding pop global OpcodeBudget -int ` + fmt.Sprintf("%d", 20_000-3-1) + ` // base64_decode cost = 1 +int ` + fmt.Sprintf("%d", testLogicBudget-3-1) + ` // base64_decode cost = 1 == ` testAccepts(t, source, fidoVersion) @@ -5319,7 +5333,7 @@ byte "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" base64_decode URLEncoding pop global OpcodeBudget -int ` + fmt.Sprintf("%d", 20_000-3-5) + ` // base64_decode cost = 5 (64 bytes -> 1 + 64/16) +int ` + fmt.Sprintf("%d", testLogicBudget-3-5) + ` // base64_decode cost = 5 (64 bytes -> 1 + 64/16) == ` testAccepts(t, source, fidoVersion) @@ -5329,7 +5343,7 @@ byte "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567" base64_decode URLEncoding pop global OpcodeBudget -int ` + fmt.Sprintf("%d", 20_000-3-5) + ` // base64_decode cost = 5 (60 bytes -> 1 + ceil(60/16)) +int ` + fmt.Sprintf("%d", testLogicBudget-3-5) + ` // base64_decode cost = 5 (60 bytes -> 1 + ceil(60/16)) == ` testAccepts(t, source, fidoVersion) @@ -5339,7 +5353,7 @@ byte "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_AA==" base64_decode URLEncoding pop global OpcodeBudget -int ` + fmt.Sprintf("%d", 20_000-3-6) + ` // base64_decode cost = 6 (68 bytes -> 1 + ceil(68/16)) +int ` + fmt.Sprintf("%d", testLogicBudget-3-6) + ` // base64_decode cost = 6 (68 bytes -> 1 + ceil(68/16)) == ` testAccepts(t, source, fidoVersion) diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go index 0af2079a35..73e0051993 100644 --- a/data/transactions/logic/fields.go +++ b/data/transactions/logic/fields.go @@ -23,7 +23,7 @@ import ( "github.com/algorand/go-algorand/protocol" ) -//go:generate stringer -type=TxnField,GlobalField,AssetParamsField,AppParamsField,AcctParamsField,AssetHoldingField,OnCompletionConstType,EcdsaCurve,Base64Encoding,JSONRefType,VrfStandard,BlockField -output=fields_string.go +//go:generate stringer -type=TxnField,GlobalField,AssetParamsField,AppParamsField,AcctParamsField,AssetHoldingField,OnCompletionConstType,EcdsaCurve,EcGroup,Base64Encoding,JSONRefType,VrfStandard,BlockField -output=fields_string.go // FieldSpec unifies the various specs for assembly, disassembly, and doc generation. type FieldSpec interface { @@ -676,6 +676,74 @@ var EcdsaCurves = FieldGroup{ ecdsaCurveSpecByName, } +// EcGroup is an enum for `ec_` opcodes +type EcGroup int + +const ( + // BN254g1 is the G1 group of BN254 + BN254g1 EcGroup = iota + // BN254g2 is the G2 group of BN254 + BN254g2 + // BLS12_381g1 specifies the G1 group of BLS 12-381 + BLS12_381g1 + // BLS12_381g2 specifies the G2 group of BLS 12-381 + BLS12_381g2 + invalidEcGroup // compile-time constant for number of fields +) + +var ecGroupNames [invalidEcGroup]string + +type ecGroupSpec struct { + field EcGroup + doc string +} + +func (fs ecGroupSpec) Field() byte { + return byte(fs.field) +} +func (fs ecGroupSpec) Type() StackType { + return StackNone // Will not show, since all are untyped +} +func (fs ecGroupSpec) OpVersion() uint64 { + return pairingVersion +} +func (fs ecGroupSpec) Version() uint64 { + return pairingVersion +} +func (fs ecGroupSpec) Note() string { + return fs.doc +} + +var ecGroupSpecs = [...]ecGroupSpec{ + {BN254g1, "G1 of the BN254 curve. Points encoded as 32 byte X following by 32 byte Y"}, + {BN254g2, "G2 of the BN254 curve. Points encoded as 64 byte X following by 64 byte Y"}, + {BLS12_381g1, "G1 of the BLS 12-381 curve. Points encoded as 48 byte X following by 48 byte Y"}, + {BLS12_381g2, "G2 of the BLS 12-381 curve. Points encoded as 96 byte X following by 48 byte Y"}, +} + +func ecGroupSpecByField(c EcGroup) (ecGroupSpec, bool) { + if int(c) >= len(ecGroupSpecs) { + return ecGroupSpec{}, false + } + return ecGroupSpecs[c], true +} + +var ecGroupSpecByName = make(ecGroupNameSpecMap, len(ecGroupNames)) + +type ecGroupNameSpecMap map[string]ecGroupSpec + +func (s ecGroupNameSpecMap) get(name string) (FieldSpec, bool) { + fs, ok := s[name] + return fs, ok +} + +// EcGroups collects details about the constants used to describe EcGroups +var EcGroups = FieldGroup{ + "EC", "Groups", + ecGroupNames[:], + ecGroupSpecByName, +} + // Base64Encoding is an enum for the `base64decode` opcode type Base64Encoding int @@ -1323,6 +1391,13 @@ func init() { ecdsaCurveSpecByName[s.field.String()] = s } + equal(len(ecGroupSpecs), len(ecGroupNames)) + for i, s := range ecGroupSpecs { + equal(int(s.field), i) + ecGroupNames[s.field] = s.field.String() + ecGroupSpecByName[s.field.String()] = s + } + equal(len(base64EncodingSpecs), len(base64EncodingNames)) for i, s := range base64EncodingSpecs { equal(int(s.field), i) diff --git a/data/transactions/logic/fields_string.go b/data/transactions/logic/fields_string.go index 44531c2bdf..49f05bbb50 100644 --- a/data/transactions/logic/fields_string.go +++ b/data/transactions/logic/fields_string.go @@ -1,4 +1,4 @@ -// Code generated by "stringer -type=TxnField,GlobalField,AssetParamsField,AppParamsField,AcctParamsField,AssetHoldingField,OnCompletionConstType,EcdsaCurve,Base64Encoding,JSONRefType,VrfStandard,BlockField -output=fields_string.go"; DO NOT EDIT. +// Code generated by "stringer -type=TxnField,GlobalField,AssetParamsField,AppParamsField,AcctParamsField,AssetHoldingField,OnCompletionConstType,EcdsaCurve,EcGroup,Base64Encoding,JSONRefType,VrfStandard,BlockField -output=fields_string.go"; DO NOT EDIT. package logic @@ -265,6 +265,27 @@ func (i EcdsaCurve) String() string { } return _EcdsaCurve_name[_EcdsaCurve_index[i]:_EcdsaCurve_index[i+1]] } +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[BN254g1-0] + _ = x[BN254g2-1] + _ = x[BLS12_381g1-2] + _ = x[BLS12_381g2-3] + _ = x[invalidEcGroup-4] +} + +const _EcGroup_name = "BN254g1BN254g2BLS12_381g1BLS12_381g2invalidEcGroup" + +var _EcGroup_index = [...]uint8{0, 7, 14, 25, 36, 50} + +func (i EcGroup) String() string { + if i < 0 || i >= EcGroup(len(_EcGroup_index)-1) { + return "EcGroup(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _EcGroup_name[_EcGroup_index[i]:_EcGroup_index[i+1]] +} func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. diff --git a/data/transactions/logic/langspec.json b/data/transactions/logic/langspec.json index cbfb4b0d95..f2cbbf646e 100644 --- a/data/transactions/logic/langspec.json +++ b/data/transactions/logic/langspec.json @@ -1,5 +1,5 @@ { - "EvalMaxVersion": 8, + "EvalMaxVersion": 9, "LogicSigVersion": 8, "Ops": [ { @@ -2702,6 +2702,89 @@ "Groups": [ "State Access" ] + }, + { + "Opcode": 224, + "Name": "ec_add", + "Args": "BB", + "Returns": "B", + "Size": 2, + "Doc": "for curve points A and B, return the curve point A + B", + "DocExtra": "A and B are curve points in affine representation: X concatenated with Y. Fails if A or B is not in G. Does _not_ check if A and B are in the main prime-order subgroup.", + "ImmediateNote": "{uint8 curve}", + "IntroducedVersion": 9, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 225, + "Name": "ec_scalar_mul", + "Args": "BB", + "Returns": "B", + "Size": 2, + "Doc": "for curve point A and scalar B, return the curve point BA, the point A multiplied by the scalar B.", + "DocExtra": "A is a curve point encoded and checked as described in `ec_add`. Scalar B is interpreted as a big-endian unsigned integer. Fails if B exceeds 32 bytes.", + "ImmediateNote": "{uint8 curve}", + "IntroducedVersion": 9, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 226, + "Name": "ec_pairing_check", + "Args": "BB", + "Returns": "U", + "Size": 2, + "Doc": "1 if the product of the pairing of each point in A with its respective point in B is equal to the identity element of the target group Gt, else 0", + "DocExtra": "A and B are concatenated points, encoded and checked as described in `ec_add`. A contains points of the group G, B contains points of the associated group (G2 if G is G1, and vice versa). Fails if A and B have a different number of points, or if any point is not in its described group or outside the main prime-order subgroup - a stronger condition than other opcodes.", + "ImmediateNote": "{uint8 curve}", + "IntroducedVersion": 9, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 227, + "Name": "ec_multi_exp", + "Args": "BB", + "Returns": "B", + "Size": 2, + "Doc": "for curve points A and scalars B, return curve point B0A0 + B1A1 + B2A2 + ... + BnAn", + "DocExtra": "A is a list of concatenated points, encoded and checked as described in `ec_add`. B is a list of concatenated scalars which, unlike ec_scalar_mul, must all be exactly 32 bytes long.\nThe name `ec_multi_exp` was chosen to reflect common usage, but a more consistent name would be `ec_multi_scalar_mul`", + "ImmediateNote": "{uint8 curve}", + "IntroducedVersion": 9, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 228, + "Name": "ec_subgroup_check", + "Args": "B", + "Returns": "U", + "Size": 2, + "Doc": "1 if A is in the main prime-order subgroup of G (including the point at infinity) else 0. Program fails if A is not in G at all.", + "ImmediateNote": "{uint8 curve}", + "IntroducedVersion": 9, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 229, + "Name": "ec_map_to", + "Args": "B", + "Returns": "B", + "Size": 2, + "Doc": "maps field element A to group G", + "DocExtra": "BN254 points are mapped by the SVDW map. BLS12-381 points are mapped by the SSWU map. G1 element inputs are encoded single big-endian byte-array of length \u003c= n. G2 inputs are encoded as concatenated n-byte big-endian encoded integers. n == 32 for BN254 and 48 for BLS12-381. No input may exceed the modulus of the group. ", + "ImmediateNote": "{uint8 curve}", + "IntroducedVersion": 9, + "Groups": [ + "Arithmetic" + ] } ] } diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go index 15710e50cb..217f3bdd8e 100644 --- a/data/transactions/logic/opcodes.go +++ b/data/transactions/logic/opcodes.go @@ -584,10 +584,6 @@ var OpSpecs = []OpSpec{ {0x98, "sha3_256", opSHA3_256, proto("b:b"), unlimitedStorage, costByLength(58, 4, 8)},}, */ - {0x99, "bn256_add", opBN254G1Add, proto("bb:b"), pairingVersion, costly(70)}, - {0x9a, "bn256_scalar_mul", opBN254G1ScalarMul, proto("bb:b"), pairingVersion, costly(970)}, - {0x9b, "bn256_pairing", opBN254Pairing, proto("bb:i"), pairingVersion, costly(8700)}, - // Byteslice math. {0xa0, "b+", opBytesPlus, proto("bb:b"), 4, costly(10)}, {0xa1, "b-", opBytesMinus, proto("bb:b"), 4, costly(10)}, @@ -638,6 +634,45 @@ var OpSpecs = []OpSpec{ // randomness support {0xd0, "vrf_verify", opVrfVerify, proto("bbb:bi"), randomnessVersion, field("s", &VrfStandards).costs(5700)}, {0xd1, "block", opBlock, proto("i:a"), randomnessVersion, field("f", &BlockFields)}, + + {0xe0, "ec_add", opEcAdd, proto("bb:b"), pairingVersion, + costByField("g", &EcGroups, []int{ + BN254g1: 310, BN254g2: 430, + BLS12_381g1: 540, BLS12_381g2: 750})}, // eip: 500, 800 + {0xe1, "ec_scalar_mul", opEcScalarMul, proto("bb:b"), pairingVersion, + costByField("g", &EcGroups, []int{ + BN254g1: 2200, BN254g2: 4460, + BLS12_381g1: 3640, BLS12_381g2: 8530})}, // eip: 12000, 45000 + + // BN cost is 18k per elt, BLS is 45k + 40k per elt. Not putting those + // costs in yet because 1) that is bigger than allowed in logicsigs, so + // tests would fail. 2) We don't yet have support for field specific costs + // that _also_ depend on input sizes. + {0xe2, "ec_pairing_check", opEcPairingCheck, proto("bb:i"), pairingVersion, + costByField("g", &EcGroups, []int{ + BN254g1: 18_000, BN254g2: 18_000, + BLS12_381g1: 15_000, BLS12_381g2: 15_000})}, // eip: 43000*k + 65000 + + // This cost must be based on number of points. EIP proposes a complicated + // "discount" scheme. At any rate, as noted above, we don't yet have + // support for input variable costs that vary based on fields. + // bnG1 seems to be 8000 + 300 /elt + // bnG2 seems to be 18000 + 900 /elt (VERY ERRATIC TIMINGS) + // blsG1 seems to be 14000 + 400 /elt + // blsG2 seems to be 35000 + 1800 /elt + {0xe3, "ec_multi_exp", opEcMultiExp, proto("bb:b"), pairingVersion, + costByField("g", &EcGroups, []int{ + BN254g1: 800, BN254g2: 1800, // LIES + BLS12_381g1: 1400, BLS12_381g2: 3500})}, // LIES + + {0xe4, "ec_subgroup_check", opEcSubgroupCheck, proto("b:i"), pairingVersion, + costByField("g", &EcGroups, []int{ + BN254g1: 50, BN254g2: 11500, // How is the g1 subgroup check so much faster? + BLS12_381g1: 5600, BLS12_381g2: 7100})}, + {0xe5, "ec_map_to", opEcMapTo, proto("b:b"), pairingVersion, + costByField("g", &EcGroups, []int{ + BN254g1: 1700, BN254g2: 11000, + BLS12_381g1: 5600, BLS12_381g2: 43000})}, // eip: 5500, 75000 } type sortByOpcode []OpSpec diff --git a/data/transactions/logic/pairing.go b/data/transactions/logic/pairing.go index 9ca6002593..2a645cff7c 100644 --- a/data/transactions/logic/pairing.go +++ b/data/transactions/logic/pairing.go @@ -22,52 +22,266 @@ import ( "math/big" "github.com/consensys/gnark-crypto/ecc" - BLS12381fr "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" "github.com/consensys/gnark-crypto/ecc/bn254" - BN254fp "github.com/consensys/gnark-crypto/ecc/bn254/fp" - BN254fr "github.com/consensys/gnark-crypto/ecc/bn254/fr" + bn254fp "github.com/consensys/gnark-crypto/ecc/bn254/fp" + bn254fr "github.com/consensys/gnark-crypto/ecc/bn254/fr" bls12381 "github.com/consensys/gnark-crypto/ecc/bls12-381" - BLS12381fp "github.com/consensys/gnark-crypto/ecc/bls12-381/fp" + bls12381fp "github.com/consensys/gnark-crypto/ecc/bls12-381/fp" + bls12381fr "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" ) -/*Remaining questions -->What conditions should cause pairing to error vs put false on stack vs ignore point? -->Empty inputs (currently pairing and multiexp panic on empty inputs) -->Is subgroup check necessary for multiexp? Precompile does not seem to think so, but should ask Fabris -->Confirm with gnark whether or not IsInSubgroup() also checks if point on curve. If not, they have a problem -->For now our code is written as if IsInSubgroup() does not check if point is on curve but is set up to be easily changed -*/ - -// Note: comments are generally only listed once even if they apply to multiple different lines to avoid congestion from bls/bn/g1/g2 quadruplication -// Effectively this means input/output explanations are only given for bls12-381 g1 versions of funcs -// The input/output comments start around line 178 +type sError string + +func (s sError) Error() string { return string(s) } + +const ( + errNotOnCurve = sError("point not on curve") + errWrongSubgroup = sError("wrong subgroup") + errEmptyInput = sError("empty input") +) + +// Input: Two byte slices at top of stack, each an uncompressed point +// Output: Single byte slice on top of stack which is the uncompressed sum of inputs +func opEcAdd(cx *EvalContext) error { + group := EcGroup(cx.program[cx.pc+1]) + fs, ok := ecGroupSpecByField(group) + if !ok { // no version check yet, both appeared at once + return fmt.Errorf("invalid ec_add group %s", group) + } + + last := len(cx.stack) - 1 + prev := last - 1 + a := cx.stack[prev].Bytes + b := cx.stack[last].Bytes + + var res []byte + var err error + switch fs.field { + case BN254g1: + res, err = bn254G1Add(a, b) + case BN254g2: + res, err = bn254G2Add(a, b) + case BLS12_381g1: + res, err = bls12381G1Add(a, b) + case BLS12_381g2: + res, err = bls12381G2Add(a, b) + default: + err = fmt.Errorf("invalid ec_add group %s", group) + } + cx.stack[prev].Bytes = res + cx.stack = cx.stack[:last] + return err +} + +// Input: ToS is a scalar, encoded as an unsigned big-endian, second to top is +// uncompressed bytes for g1 point +// Output: Single byte slice on top of stack which contains uncompressed bytes +// for product of scalar and point +func opEcScalarMul(cx *EvalContext) error { + group := EcGroup(cx.program[cx.pc+1]) + fs, ok := ecGroupSpecByField(group) + if !ok { // no version check yet, both appeared at once + return fmt.Errorf("invalid ec_scalar_mul group %s", group) + } + + last := len(cx.stack) - 1 + prev := last - 1 + aBytes := cx.stack[prev].Bytes + kBytes := cx.stack[last].Bytes + if len(kBytes) > 32 { + return fmt.Errorf("ec_scalar_mul scalar len is %d, exceeds 32", len(kBytes)) + } + k := new(big.Int).SetBytes(kBytes) + + var res []byte + var err error + switch fs.field { + case BN254g1: + res, err = bn254G1ScalarMul(aBytes, k) + case BN254g2: + res, err = bn254G2ScalarMul(aBytes, k) + case BLS12_381g1: + res, err = bls12381G1ScalarMul(aBytes, k) + case BLS12_381g2: + res, err = bls12381G2ScalarMul(aBytes, k) + default: + err = fmt.Errorf("invalid ec_scalar_mul group %s", group) + } + + cx.stack = cx.stack[:last] + cx.stack[prev].Bytes = res + return err +} + +// Input: Two byte slices, top is concatenated uncompressed bytes for k g2 points, and second to top is same for g1 +// Output: Single uint at top representing bool for whether pairing of inputs was identity +func opEcPairingCheck(cx *EvalContext) error { + /* + Q: Should pairing fail is the supplied points are not canonical? + Q: Should pairing fail if there are no inputs? + */ + + group := EcGroup(cx.program[cx.pc+1]) + fs, ok := ecGroupSpecByField(group) + if !ok { // no version check yet, both appeared at once + return fmt.Errorf("invalid ec_pairing_check group %s", group) + } + + last := len(cx.stack) - 1 + prev := last - 1 + g1Bytes := cx.stack[prev].Bytes + g2Bytes := cx.stack[last].Bytes + + var err error + ok = false + switch fs.field { + case BN254g2: + g1Bytes, g2Bytes = g2Bytes, g1Bytes + fallthrough + case BN254g1: + ok, err = bn254PairingCheck(g1Bytes, g2Bytes) + case BLS12_381g2: + g1Bytes, g2Bytes = g2Bytes, g1Bytes + fallthrough + case BLS12_381g1: + ok, err = bls12381PairingCheck(g1Bytes, g2Bytes) + default: + err = fmt.Errorf("invalid ec_pairing_check group %s", group) + } + + cx.stack = cx.stack[:last] + cx.stack[prev] = boolToSV(ok) + return err +} + +// Input: Top of stack is slice of k scalars, second to top is slice of k group points as uncompressed bytes +// Output: Single byte slice that contains uncompressed bytes for point equivalent to p_1^e_1 * p_2^e_2 * ... * p_k^e_k, where p_i is i'th point from input and e_i is i'th scalar +func opEcMultiExp(cx *EvalContext) error { + last := len(cx.stack) - 1 + prev := last - 1 + pointBytes := cx.stack[prev].Bytes + scalarBytes := cx.stack[last].Bytes + + group := EcGroup(cx.program[cx.pc+1]) + fs, ok := ecGroupSpecByField(group) + if !ok { // no version check yet, both appeared at once + return fmt.Errorf("invalid ec_multiexp group %s", group) + } + + var res []byte + var err error + switch fs.field { + case BN254g1: + res, err = bn254G1MultiExp(pointBytes, scalarBytes) + case BN254g2: + res, err = bn254G2MultiExp(pointBytes, scalarBytes) + case BLS12_381g1: + res, err = bls12381G1MultiExp(pointBytes, scalarBytes) + case BLS12_381g2: + res, err = bls12381G2MultiExp(pointBytes, scalarBytes) + default: + err = fmt.Errorf("invalid ec_multiexp group %s", group) + } + + cx.stack = cx.stack[:last] + cx.stack[prev].Bytes = res + return err +} + +// Input: Single byte slice on top of stack containing uncompressed bytes for g1 point +// Output: Single uint on stack top representing bool for whether the input was in the correct subgroup or not +func opEcSubgroupCheck(cx *EvalContext) error { + last := len(cx.stack) - 1 + pointBytes := cx.stack[last].Bytes + + group := EcGroup(cx.program[cx.pc+1]) + fs, ok := ecGroupSpecByField(group) + if !ok { // no version check yet, both appeared at once + return fmt.Errorf("invalid ec_pairing_check group %s", group) + } + + var err error + ok = false + switch fs.field { + case BN254g1: + ok, err = bn254G1SubgroupCheck(pointBytes) + case BN254g2: + ok, err = bn254G2SubgroupCheck(pointBytes) + case BLS12_381g1: + ok, err = bls12381G1SubgroupCheck(pointBytes) + case BLS12_381g2: + ok, err = bls12381G2SubgroupCheck(pointBytes) + default: + err = fmt.Errorf("invalid ec_pairing_check group %s", group) + } + + cx.stack[last] = boolToSV(ok) + return err +} + +// Input: Single byte slice on top of stack representing single field element +// Output: Single byte slice on top of stack which contains uncompressed bytes +// for corresponding point (mapped to by input) +func opEcMapTo(cx *EvalContext) error { + last := len(cx.stack) - 1 + fpBytes := cx.stack[last].Bytes + + group := EcGroup(cx.program[cx.pc+1]) + fs, ok := ecGroupSpecByField(group) + if !ok { // no version check yet, both appeared at once + return fmt.Errorf("invalid ec_pairing_check group %s", group) + } + + var res []byte + var err error + switch fs.field { + case BN254g1: + res, err = bn254MapToG1(fpBytes) + case BN254g2: + res, err = bn254MapToG2(fpBytes) + case BLS12_381g1: + res, err = bls12381MapToG1(fpBytes) + case BLS12_381g2: + res, err = bls12381MapToG2(fpBytes) + default: + err = fmt.Errorf("invalid ec_pairing_check group %s", group) + } + cx.stack[last].Bytes = res + return err +} + const ( bls12381fpSize = 48 bls12381g1Size = 2 * bls12381fpSize bls12381fp2Size = 2 * bls12381fpSize bls12381g2Size = 2 * bls12381fp2Size - bn254fpSize = 32 - bn254g1Size = 2 * bn254fpSize - bn254fp2Size = 2 * bn254fpSize - bn254g2Size = 2 * bn254fp2Size - scalarSize = 32 + + bn254fpSize = 32 + bn254g1Size = 2 * bn254fpSize + bn254fp2Size = 2 * bn254fpSize + bn254g2Size = 2 * bn254fp2Size + + scalarSize = 32 ) -func bytesToBLS12381Field(b []byte) (BLS12381fp.Element, error) { - intRepresentation := new(big.Int).SetBytes(b) - if intRepresentation.Cmp(BLS12381fp.Modulus()) >= 0 { - return BLS12381fp.Element{}, errors.New("Field element larger than modulus") +var bls12381Modulus = bls12381fp.Modulus() + +func bytesToBLS12381Field(b []byte) (bls12381fp.Element, error) { + var big big.Int + big.SetBytes(b) + if big.Cmp(bls12381Modulus) >= 0 { + return bls12381fp.Element{}, fmt.Errorf("field element %s larger than modulus %s", &big, bls12381Modulus) } - return *new(BLS12381fp.Element).SetBigInt(intRepresentation), nil + return *new(bls12381fp.Element).SetBigInt(&big), nil } -func bytesToBLS12381G1(b []byte, checkCurve bool) (bls12381.G1Affine, error) { - var point bls12381.G1Affine - var err error +func bytesToBLS12381G1(b []byte) (bls12381.G1Affine, error) { if len(b) != bls12381g1Size { - return point, errors.New("Improper encoding") + return bls12381.G1Affine{}, fmt.Errorf("bad length %d. Expected %d", len(b), bls12381g1Size) } + var point bls12381.G1Affine + var err error point.X, err = bytesToBLS12381Field(b[:bls12381fpSize]) if err != nil { return bls12381.G1Affine{}, err @@ -76,39 +290,36 @@ func bytesToBLS12381G1(b []byte, checkCurve bool) (bls12381.G1Affine, error) { if err != nil { return bls12381.G1Affine{}, err } - if checkCurve && !point.IsOnCurve() { - return bls12381.G1Affine{}, errors.New("Point not on curve") + if !point.IsOnCurve() { + return bls12381.G1Affine{}, errNotOnCurve } return point, nil } func bytesToBLS12381G1s(b []byte, checkSubgroup bool) ([]bls12381.G1Affine, error) { - if len(b)%(bls12381g1Size) != 0 { - return nil, errors.New("Improper encoding") + if len(b)%bls12381g1Size != 0 { + return nil, fmt.Errorf("bad length %d. Expected %d multiple", len(b), bls12381g1Size) } if len(b) == 0 { - return nil, errors.New("Empty input") + return nil, errors.New("empty input") } - points := make([]bls12381.G1Affine, len(b)/(bls12381g1Size)) - for i := 0; i < len(b)/(bls12381g1Size); i++ { - // If IsInSubgroup() checks if point is on curve as well, the following line should replace the line after it - // point, err := bytesToBLS12381G1(b[i*bls12381g1Size:(i+1)*bls12381g1Size], !checkSubgroup) - point, err := bytesToBLS12381G1(b[i*bls12381g1Size:(i+1)*bls12381g1Size], true) + points := make([]bls12381.G1Affine, len(b)/bls12381g1Size) + for i := range points { + var err error + points[i], err = bytesToBLS12381G1(b[i*bls12381g1Size : (i+1)*bls12381g1Size]) if err != nil { - // revisit later to see if way to check in one step if any errored instead of having to check each one return nil, err } - if checkSubgroup && !point.IsInSubGroup() { - return nil, errors.New("Wrong subgroup") + if checkSubgroup && !points[i].IsInSubGroup() { + return nil, errWrongSubgroup } - points[i] = point } return points, nil } -func bytesToBLS12381G2(b []byte, checkCurve bool) (bls12381.G2Affine, error) { +func bytesToBLS12381G2(b []byte) (bls12381.G2Affine, error) { if len(b) != bls12381g2Size { - return bls12381.G2Affine{}, errors.New("Improper encoding") + return bls12381.G2Affine{}, fmt.Errorf("bad length %d. Expected %d", len(b), bls12381g2Size) } var err error var point bls12381.G2Affine @@ -128,30 +339,29 @@ func bytesToBLS12381G2(b []byte, checkCurve bool) (bls12381.G2Affine, error) { if err != nil { return bls12381.G2Affine{}, err } - if checkCurve && !point.IsOnCurve() { - return bls12381.G2Affine{}, errors.New("Point not on curve") + if !point.IsOnCurve() { + return bls12381.G2Affine{}, errNotOnCurve } return point, nil } func bytesToBLS12381G2s(b []byte, checkSubgroup bool) ([]bls12381.G2Affine, error) { - if len(b)%(bls12381g2Size) != 0 { - return nil, errors.New("Improper encoding") + if len(b)%bls12381g2Size != 0 { + return nil, fmt.Errorf("bad length %d. Expected %d multiple", len(b), bls12381g2Size) } if len(b) == 0 { - return nil, errors.New("Empty input") + return nil, errors.New("empty input") } points := make([]bls12381.G2Affine, len(b)/bls12381g2Size) - for i := 0; i < len(b)/bls12381g2Size; i++ { - // point, err := bytesToBLS12381G2(b[i*bls12381g2Size : (i+1)*bls12381g2Size], !checkSubgroup) - point, err := bytesToBLS12381G2(b[i*bls12381g2Size:(i+1)*bls12381g2Size], true) + for i := range points { + var err error + points[i], err = bytesToBLS12381G2(b[i*bls12381g2Size : (i+1)*bls12381g2Size]) if err != nil { return nil, err } - if checkSubgroup && !point.IsInSubGroup() { - return nil, errors.New("Wrong subgroup") + if checkSubgroup && !points[i].IsInSubGroup() { + return nil, errWrongSubgroup } - points[i] = point } return points, nil } @@ -175,244 +385,163 @@ func bls12381G2ToBytes(g2 *bls12381.G2Affine) []byte { return pointBytes } -// Input: Two byte slices at top of stack, each an uncompressed point -// Output: Single byte slice on top of stack which is the uncompressed sum of inputs -func opBLS12381G1Add(cx *EvalContext) error { - last := len(cx.stack) - 1 - prev := last - 1 - aBytes := cx.stack[prev].Bytes - bBytes := cx.stack[last].Bytes - a, err := bytesToBLS12381G1(aBytes, true) +func bls12381G1Add(aBytes, bBytes []byte) ([]byte, error) { + a, err := bytesToBLS12381G1(aBytes) if err != nil { - return err + return nil, err } - b, err := bytesToBLS12381G1(bBytes, true) + b, err := bytesToBLS12381G1(bBytes) if err != nil { - return err + return nil, err } - // Would be slightly more efficient to use global variable instead of constantly creating new points - // But would mess with parallelization - res := new(bls12381.G1Affine).Add(&a, &b) - // It's possible it's more efficient to only check if the sum is on the curve as opposed to the summands, - // but I doubt that's safe - resBytes := bls12381G1ToBytes(res) - cx.stack = cx.stack[:last] - cx.stack[prev].Bytes = resBytes - return nil + return bls12381G1ToBytes(a.Add(&a, &b)), nil } -func opBLS12381G2Add(cx *EvalContext) error { - last := len(cx.stack) - 1 - prev := last - 1 - aBytes := cx.stack[prev].Bytes - bBytes := cx.stack[last].Bytes - a, err := bytesToBLS12381G2(aBytes, true) +func bls12381G2Add(aBytes, bBytes []byte) ([]byte, error) { + a, err := bytesToBLS12381G2(aBytes) if err != nil { - return err + return nil, err } - b, err := bytesToBLS12381G2(bBytes, true) + b, err := bytesToBLS12381G2(bBytes) if err != nil { - return err + return nil, err } - res := new(bls12381.G2Affine).Add(&a, &b) - resBytes := bls12381G2ToBytes(res) - cx.stack = cx.stack[:last] - cx.stack[prev].Bytes = resBytes - return nil + + return bls12381G2ToBytes(a.Add(&a, &b)), nil } -// Input: Two byte slices, top is bytes for scalar, second to top is uncompressed bytes for g1 point -// Output: Single byte slice on top of stack which contains uncompressed bytes for product of scalar and point -func opBLS12381G1ScalarMul(cx *EvalContext) error { - last := len(cx.stack) - 1 - prev := last - 1 - aBytes := cx.stack[prev].Bytes - a, err := bytesToBLS12381G1(aBytes, true) +func bls12381G1ScalarMul(aBytes []byte, k *big.Int) ([]byte, error) { + a, err := bytesToBLS12381G1(aBytes) if err != nil { - return err + return nil, err } - kBytes := cx.stack[last].Bytes - if len(kBytes) != scalarSize { - return fmt.Errorf("Scalars must be %d bytes long", scalarSize) - } - // Would probably be more efficient to use uint32 - k := new(big.Int).SetBytes(kBytes[:]) // what is purpose of slicing to self? Keeping it just b/c it was in original implementation - res := new(bls12381.G1Affine).ScalarMultiplication(&a, k) - resBytes := bls12381G1ToBytes(res) - cx.stack = cx.stack[:last] - cx.stack[prev].Bytes = resBytes - return nil + return bls12381G1ToBytes(a.ScalarMultiplication(&a, k)), nil } -func opBLS12381G2ScalarMul(cx *EvalContext) error { - last := len(cx.stack) - 1 - prev := last - 1 - aBytes := cx.stack[prev].Bytes - a, err := bytesToBLS12381G2(aBytes, true) +func bls12381G2ScalarMul(aBytes []byte, k *big.Int) ([]byte, error) { + a, err := bytesToBLS12381G2(aBytes) if err != nil { - return err + return nil, err } - kBytes := cx.stack[last].Bytes - if len(kBytes) != scalarSize { - return fmt.Errorf("Scalars must be %d bytes long", scalarSize) - } - k := new(big.Int).SetBytes(kBytes[:]) - res := new(bls12381.G2Affine).ScalarMultiplication(&a, k) - resBytes := bls12381G2ToBytes(res) - cx.stack = cx.stack[:last] - cx.stack[prev].Bytes = resBytes - return nil + return bls12381G2ToBytes(a.ScalarMultiplication(&a, k)), nil } -// Input: Two byte slices, top is concatenated uncompressed bytes for k g2 points, and second to top is same for g1 -// Output: Single uint at top representing bool for whether pairing of inputs was identity -func opBLS12381Pairing(cx *EvalContext) error { - last := len(cx.stack) - 1 - prev := last - 1 - g1Bytes := cx.stack[prev].Bytes - g2Bytes := cx.stack[last].Bytes +func bls12381PairingCheck(g1Bytes, g2Bytes []byte) (bool, error) { g1, err := bytesToBLS12381G1s(g1Bytes, true) if err != nil { - return err + return false, err } g2, err := bytesToBLS12381G2s(g2Bytes, true) if err != nil { - return err + return false, err } ok, err := bls12381.PairingCheck(g1, g2) - cx.stack = cx.stack[:last] - cx.stack[prev].Uint = boolToUint(ok) - cx.stack[prev].Bytes = nil - // I'm assuming it's significantly more likely that err is nil than not - return err + if err != nil { + return false, err + } + return ok, nil } -// Input: Top of stack is slice of k scalars, second to top is slice of k G1 points as uncompressed bytes -// Output: Single byte slice that contains uncompressed bytes for g1 point equivalent to p_1^e_1 * p_2^e_2 * ... * p_k^e_k, where p_i is i'th point from input and e_i is i'th scalar -func opBLS12381G1MultiExponentiation(cx *EvalContext) error { - last := len(cx.stack) - 1 - prev := last - 1 - g1Bytes := cx.stack[prev].Bytes - scalarBytes := cx.stack[last].Bytes - // Precompile does not list subgroup check as mandatory for multiexponentiation, but should ask Fabris about this +var eccMontgomery = ecc.MultiExpConfig{ScalarsMont: true} + +func bls12381G1MultiExp(g1Bytes, scalarBytes []byte) ([]byte, error) { g1Points, err := bytesToBLS12381G1s(g1Bytes, false) if err != nil { - return err + return nil, err } - if len(scalarBytes)%scalarSize != 0 || len(scalarBytes)/scalarSize != len(g1Points) { - return errors.New("Bad input") + if len(scalarBytes) != scalarSize*len(g1Points) { + return nil, fmt.Errorf("bad scalars length %d. Expected %d", len(scalarBytes), scalarSize*len(g1Points)) } - scalars := make([]BLS12381fr.Element, len(g1Points)) - for i := 0; i < len(g1Points); i++ { + scalars := make([]bls12381fr.Element, len(g1Points)) + for i := range scalars { scalars[i].SetBytes(scalarBytes[i*scalarSize : (i+1)*scalarSize]) } - res, _ := new(bls12381.G1Affine).MultiExp(g1Points, scalars, ecc.MultiExpConfig{}) - cx.stack = cx.stack[:last] - cx.stack[prev].Bytes = bls12381G1ToBytes(res) - return nil + res, err := new(bls12381.G1Affine).MultiExp(g1Points, scalars, eccMontgomery) + if err != nil { + return nil, err + } + return bls12381G1ToBytes(res), nil } -func opBLS12381G2MultiExponentiation(cx *EvalContext) error { - last := len(cx.stack) - 1 - prev := last - 1 - g2Bytes := cx.stack[prev].Bytes - scalarBytes := cx.stack[last].Bytes +func bls12381G2MultiExp(g2Bytes, scalarBytes []byte) ([]byte, error) { g2Points, err := bytesToBLS12381G2s(g2Bytes, false) if err != nil { - return err + return nil, err } - if len(scalarBytes)%scalarSize != 0 || len(scalarBytes)/scalarSize != len(g2Points) { - return errors.New("Bad input") + if len(scalarBytes) != scalarSize*len(g2Points) { + return nil, fmt.Errorf("bad scalars length %d. Expected %d", len(scalarBytes), scalarSize*len(g2Points)) } - scalars := make([]BLS12381fr.Element, len(g2Points)) - for i := 0; i < len(g2Points); i++ { + scalars := make([]bls12381fr.Element, len(g2Points)) + for i := range scalars { scalars[i].SetBytes(scalarBytes[i*scalarSize : (i+1)*scalarSize]) } - res, _ := new(bls12381.G2Affine).MultiExp(g2Points, scalars, ecc.MultiExpConfig{}) - cx.stack = cx.stack[:last] - cx.stack[prev].Bytes = bls12381G2ToBytes(res) - return nil + res, err := new(bls12381.G2Affine).MultiExp(g2Points, scalars, eccMontgomery) + if err != nil { + return nil, err + } + return bls12381G2ToBytes(res), nil } -// Input: Single byte slice on top of stack representing single g1 field element -// Output: Single byte slice on top of stack which contains uncompressed bytes for g1 point (mapped to by input) -func opBLS12381MapFpToG1(cx *EvalContext) error { - last := len(cx.stack) - 1 - fpBytes := cx.stack[last].Bytes - if len(fpBytes) != bls12381fpSize { - return errors.New("Bad input") - } +func bls12381MapToG1(fpBytes []byte) ([]byte, error) { fp, err := bytesToBLS12381Field(fpBytes) if err != nil { - return err + return nil, err } point := bls12381.MapToG1(fp) - cx.stack[last].Bytes = bls12381G1ToBytes(&point) - return nil + return bls12381G1ToBytes(&point), nil } -func opBLS12381MapFp2ToG2(cx *EvalContext) error { - last := len(cx.stack) - 1 - fpBytes := cx.stack[last].Bytes +func bls12381MapToG2(fpBytes []byte) ([]byte, error) { if len(fpBytes) != bls12381fp2Size { - return errors.New("Bad input") + return nil, fmt.Errorf("bad encoded element length: %d", len(fpBytes)) } - fp2 := new(bls12381.G2Affine).X + g2 := bls12381.G2Affine{} var err error - fp2.A0, err = bytesToBLS12381Field(fpBytes[0:bls12381fpSize]) + g2.X.A0, err = bytesToBLS12381Field(fpBytes[0:bls12381fpSize]) if err != nil { - return err + return nil, err } - fp2.A1, err = bytesToBLS12381Field(fpBytes[bls12381fpSize:]) + g2.X.A1, err = bytesToBLS12381Field(fpBytes[bls12381fpSize:]) if err != nil { - return err + return nil, err } - point := bls12381.MapToG2(fp2) - cx.stack[last].Bytes = bls12381G2ToBytes(&point) - return nil + point := bls12381.MapToG2(g2.X) + return bls12381G2ToBytes(&point), nil } -// Input: Single byte slice on top of stack containing uncompressed bytes for g1 point -// Output: Single uint on stack top representing bool for whether the input was in the correct subgroup or not -func opBLS12381G1SubgroupCheck(cx *EvalContext) error { - last := len(cx.stack) - 1 - pointBytes := cx.stack[last].Bytes - // checkCurve should be false if turns out that IsInSubgroup checks if point is on curve - point, err := bytesToBLS12381G1(pointBytes, true) +func bls12381G1SubgroupCheck(pointBytes []byte) (bool, error) { + point, err := bytesToBLS12381G1(pointBytes) if err != nil { - return err + return false, err } - cx.stack[last].Uint = boolToUint(point.IsInSubGroup()) - cx.stack[last].Bytes = nil - return err + return point.IsInSubGroup(), nil } -func opBLS12381G2SubgroupCheck(cx *EvalContext) error { - last := len(cx.stack) - 1 - pointBytes := cx.stack[last].Bytes - point, err := bytesToBLS12381G2(pointBytes, true) +func bls12381G2SubgroupCheck(pointBytes []byte) (bool, error) { + point, err := bytesToBLS12381G2(pointBytes) if err != nil { - return err + return false, err } - cx.stack[last].Uint = boolToUint(point.IsInSubGroup()) - cx.stack[last].Bytes = nil - return err + return point.IsInSubGroup(), nil } -func bytesToBN254Field(b []byte) (BN254fp.Element, error) { - intRepresentation := new(big.Int).SetBytes(b) - if intRepresentation.Cmp(BN254fp.Modulus()) >= 0 { - return BN254fp.Element{}, errors.New("Field element larger than modulus") +var bn254Modulus = bn254fp.Modulus() + +func bytesToBN254Field(b []byte) (bn254fp.Element, error) { + var big big.Int + big.SetBytes(b) + if big.Cmp(bn254Modulus) >= 0 { + return bn254fp.Element{}, fmt.Errorf("field element %s larger than modulus %s", &big, bn254Modulus) } - return *new(BN254fp.Element).SetBigInt(intRepresentation), nil + return *new(bn254fp.Element).SetBigInt(&big), nil } -func bytesToBN254G1(b []byte, checkCurve bool) (bn254.G1Affine, error) { - var point bn254.G1Affine - var err error +func bytesToBN254G1(b []byte) (bn254.G1Affine, error) { if len(b) != bn254g1Size { - return point, errors.New("Improper encoding") + return bn254.G1Affine{}, fmt.Errorf("bad length %d. Expected %d", len(b), bn254g1Size) } + var point bn254.G1Affine + var err error point.X, err = bytesToBN254Field(b[:bn254fpSize]) if err != nil { return bn254.G1Affine{}, err @@ -421,37 +550,36 @@ func bytesToBN254G1(b []byte, checkCurve bool) (bn254.G1Affine, error) { if err != nil { return bn254.G1Affine{}, err } - if checkCurve && !point.IsOnCurve() { - return bn254.G1Affine{}, errors.New("Point not on curve") + if !point.IsOnCurve() { + return bn254.G1Affine{}, errNotOnCurve } return point, nil } func bytesToBN254G1s(b []byte, checkSubgroup bool) ([]bn254.G1Affine, error) { - if len(b)%(bn254g1Size) != 0 { - return nil, errors.New("Improper encoding") + if len(b)%bn254g1Size != 0 { + return nil, fmt.Errorf("bad length %d. Expected %d multiple", len(b), bn254g1Size) } if len(b) == 0 { - return nil, errors.New("Empty input") + return nil, errors.New("empty input") } - points := make([]bn254.G1Affine, len(b)/(bn254g1Size)) - for i := 0; i < len(b)/(bn254g1Size); i++ { - // point, err := bytesToBN254G1(b[i*bn254g1Size : (i+1)*bn254g1Size], !checkSubgroup) - point, err := bytesToBN254G1(b[i*bn254g1Size:(i+1)*bn254g1Size], true) + points := make([]bn254.G1Affine, len(b)/bn254g1Size) + for i := range points { + var err error + points[i], err = bytesToBN254G1(b[i*bn254g1Size : (i+1)*bn254g1Size]) if err != nil { return nil, err } - if checkSubgroup && !point.IsInSubGroup() { - return nil, errors.New("Wrong subgroup") + if checkSubgroup && !points[i].IsInSubGroup() { + return nil, errWrongSubgroup } - points[i] = point } return points, nil } -func bytesToBN254G2(b []byte, checkCurve bool) (bn254.G2Affine, error) { +func bytesToBN254G2(b []byte) (bn254.G2Affine, error) { if len(b) != bn254g2Size { - return bn254.G2Affine{}, errors.New("Improper encoding") + return bn254.G2Affine{}, fmt.Errorf("bad length %d. Expected %d", len(b), bn254g2Size) } var err error var point bn254.G2Affine @@ -471,30 +599,29 @@ func bytesToBN254G2(b []byte, checkCurve bool) (bn254.G2Affine, error) { if err != nil { return bn254.G2Affine{}, err } - if checkCurve && !point.IsOnCurve() { - return bn254.G2Affine{}, errors.New("Point not on curve") + if !point.IsOnCurve() { + return bn254.G2Affine{}, errNotOnCurve } return point, nil } func bytesToBN254G2s(b []byte, checkSubgroup bool) ([]bn254.G2Affine, error) { - if len(b)%(bn254g2Size) != 0 { - return nil, errors.New("Improper encoding") + if len(b)%bn254g2Size != 0 { + return nil, fmt.Errorf("bad length %d. Expected %d multiple", len(b), bn254g2Size) } if len(b) == 0 { - return nil, errors.New("Empty input") + return nil, errEmptyInput } points := make([]bn254.G2Affine, len(b)/bn254g2Size) - for i := 0; i < len(b)/bn254g2Size; i++ { - // point, err := bytesToBN254G2(b[i*bn254g2Size : (i+1)*bn254g2Size], !checkSubgroup) - point, err := bytesToBN254G2(b[i*bn254g2Size:(i+1)*bn254g2Size], true) + for i := range points { + var err error + points[i], err = bytesToBN254G2(b[i*bn254g2Size : (i+1)*bn254g2Size]) if err != nil { return nil, err } - if checkSubgroup && !point.IsInSubGroup() { - return nil, errors.New("Wrong subgroup") + if checkSubgroup && !points[i].IsInSubGroup() { + return nil, errWrongSubgroup } - points[i] = point } return points, nil } @@ -518,206 +645,139 @@ func bn254G2ToBytes(g2 *bn254.G2Affine) []byte { return pointBytes } -func opBN254G1Add(cx *EvalContext) error { - last := len(cx.stack) - 1 - prev := last - 1 - aBytes := cx.stack[prev].Bytes - bBytes := cx.stack[last].Bytes - a, err := bytesToBN254G1(aBytes, true) +func bn254G1Add(aBytes, bBytes []byte) ([]byte, error) { + a, err := bytesToBN254G1(aBytes) if err != nil { - return err + return nil, err } - b, err := bytesToBN254G1(bBytes, true) + b, err := bytesToBN254G1(bBytes) if err != nil { - return err + return nil, err } - res := new(bn254.G1Affine).Add(&a, &b) - resBytes := bn254G1ToBytes(res) - cx.stack = cx.stack[:last] - cx.stack[prev].Bytes = resBytes - return nil + return bn254G1ToBytes(a.Add(&a, &b)), nil } -func opBN254G2Add(cx *EvalContext) error { - last := len(cx.stack) - 1 - prev := last - 1 - aBytes := cx.stack[prev].Bytes - bBytes := cx.stack[last].Bytes - a, err := bytesToBN254G2(aBytes, true) +func bn254G2Add(aBytes, bBytes []byte) ([]byte, error) { + a, err := bytesToBN254G2(aBytes) if err != nil { - return err + return nil, err } - b, err := bytesToBN254G2(bBytes, true) + b, err := bytesToBN254G2(bBytes) if err != nil { - return err + return nil, err } - res := new(bn254.G2Affine).Add(&a, &b) - resBytes := bn254G2ToBytes(res) - cx.stack = cx.stack[:last] - cx.stack[prev].Bytes = resBytes - return nil + return bn254G2ToBytes(a.Add(&a, &b)), nil } -func opBN254G1ScalarMul(cx *EvalContext) error { - last := len(cx.stack) - 1 - prev := last - 1 - aBytes := cx.stack[prev].Bytes - a, err := bytesToBN254G1(aBytes, true) +func bn254G1ScalarMul(aBytes []byte, k *big.Int) ([]byte, error) { + a, err := bytesToBN254G1(aBytes) if err != nil { - return err + return nil, err } - kBytes := cx.stack[last].Bytes - if len(kBytes) != scalarSize { - return fmt.Errorf("Scalars must be %d bytes long", scalarSize) - } - k := new(big.Int).SetBytes(kBytes[:]) - res := new(bn254.G1Affine).ScalarMultiplication(&a, k) - resBytes := bn254G1ToBytes(res) - cx.stack = cx.stack[:last] - cx.stack[prev].Bytes = resBytes - return nil + return bn254G1ToBytes(a.ScalarMultiplication(&a, k)), nil } -func opBN254G2ScalarMul(cx *EvalContext) error { - last := len(cx.stack) - 1 - prev := last - 1 - aBytes := cx.stack[prev].Bytes - a, err := bytesToBN254G2(aBytes, true) +func bn254G2ScalarMul(aBytes []byte, k *big.Int) ([]byte, error) { + a, err := bytesToBN254G2(aBytes) if err != nil { - return err + return nil, err } - kBytes := cx.stack[last].Bytes - if len(kBytes) != scalarSize { - return fmt.Errorf("Scalars must be %d bytes long", scalarSize) - } - k := new(big.Int).SetBytes(kBytes[:]) - res := new(bn254.G2Affine).ScalarMultiplication(&a, k) - resBytes := bn254G2ToBytes(res) - cx.stack = cx.stack[:last] - cx.stack[prev].Bytes = resBytes - return nil + return bn254G2ToBytes(a.ScalarMultiplication(&a, k)), nil } -func opBN254Pairing(cx *EvalContext) error { - last := len(cx.stack) - 1 - prev := last - 1 - g1Bytes := cx.stack[prev].Bytes - g2Bytes := cx.stack[last].Bytes +func bn254PairingCheck(g1Bytes, g2Bytes []byte) (bool, error) { g1, err := bytesToBN254G1s(g1Bytes, true) if err != nil { - return err + return false, err } g2, err := bytesToBN254G2s(g2Bytes, true) if err != nil { - return err + return false, err } ok, err := bn254.PairingCheck(g1, g2) - cx.stack = cx.stack[:last] - cx.stack[prev] = boolToSV(ok) - return err + if err != nil { + return false, err + } + return ok, nil } -func opBN254G1MultiExponentiation(cx *EvalContext) error { - last := len(cx.stack) - 1 - prev := last - 1 - g1Bytes := cx.stack[prev].Bytes - scalarBytes := cx.stack[last].Bytes +func bn254G1MultiExp(g1Bytes, scalarBytes []byte) ([]byte, error) { g1Points, err := bytesToBN254G1s(g1Bytes, false) if err != nil { - return err + return nil, err } - if len(scalarBytes)%scalarSize != 0 || len(scalarBytes)/scalarSize != len(g1Points) { - return errors.New("Bad input") + if len(scalarBytes) != scalarSize*len(g1Points) { + return nil, fmt.Errorf("bad scalars length %d. Expected %d", len(scalarBytes), scalarSize*len(g1Points)) } - scalars := make([]BN254fr.Element, len(g1Points)) - for i := 0; i < len(g1Points); i++ { + scalars := make([]bn254fr.Element, len(g1Points)) + for i := range scalars { scalars[i].SetBytes(scalarBytes[i*scalarSize : (i+1)*scalarSize]) } - res, _ := new(bn254.G1Affine).MultiExp(g1Points, scalars, ecc.MultiExpConfig{}) - cx.stack = cx.stack[:last] - cx.stack[prev].Bytes = bn254G1ToBytes(res) - return nil + res, err := new(bn254.G1Affine).MultiExp(g1Points, scalars, eccMontgomery) + if err != nil { + return nil, err + } + return bn254G1ToBytes(res), nil } -func opBN254G2MultiExponentiation(cx *EvalContext) error { - last := len(cx.stack) - 1 - prev := last - 1 - g2Bytes := cx.stack[prev].Bytes - scalarBytes := cx.stack[last].Bytes +func bn254G2MultiExp(g2Bytes, scalarBytes []byte) ([]byte, error) { g2Points, err := bytesToBN254G2s(g2Bytes, false) if err != nil { - return err + return nil, err } - if len(scalarBytes)%scalarSize != 0 || len(scalarBytes)/scalarSize != len(g2Points) { - return errors.New("Bad input") + if len(scalarBytes) != scalarSize*len(g2Points) { + return nil, fmt.Errorf("bad scalars length %d. Expected %d", len(scalarBytes), scalarSize*len(g2Points)) } - scalars := make([]BN254fr.Element, len(g2Points)) - for i := 0; i < len(g2Points); i++ { + scalars := make([]bn254fr.Element, len(g2Points)) + for i := range scalars { scalars[i].SetBytes(scalarBytes[i*scalarSize : (i+1)*scalarSize]) } - res, _ := new(bn254.G2Affine).MultiExp(g2Points, scalars, ecc.MultiExpConfig{}) - cx.stack = cx.stack[:last] - cx.stack[prev].Bytes = bn254G2ToBytes(res) - return nil + res, err := new(bn254.G2Affine).MultiExp(g2Points, scalars, eccMontgomery) + if err != nil { + return nil, err + } + return bn254G2ToBytes(res), nil } -func opBN254MapFpToG1(cx *EvalContext) error { - last := len(cx.stack) - 1 - fpBytes := cx.stack[last].Bytes - if len(fpBytes) != bn254fpSize { - return errors.New("Bad input") - } - // should be MapToG1 in most recent version +func bn254MapToG1(fpBytes []byte) ([]byte, error) { fp, err := bytesToBN254Field(fpBytes) if err != nil { - return err + return nil, err } point := bn254.MapToG1(fp) - cx.stack[last].Bytes = bn254G1ToBytes(&point) - return nil + return bn254G1ToBytes(&point), nil } -func opBN254MapFp2ToG2(cx *EvalContext) error { - last := len(cx.stack) - 1 - fpBytes := cx.stack[last].Bytes +func bn254MapToG2(fpBytes []byte) ([]byte, error) { if len(fpBytes) != bn254fp2Size { - return errors.New("Bad input") + return nil, fmt.Errorf("bad encoded element length: %d", len(fpBytes)) } - fp2 := new(bn254.G2Affine).X + fp2 := bn254.G2Affine{}.X // no way to declare an fptower.E2 var err error fp2.A0, err = bytesToBN254Field(fpBytes[0:bn254fpSize]) if err != nil { - return err + return nil, err } fp2.A1, err = bytesToBN254Field(fpBytes[bn254fpSize:]) if err != nil { - return err + return nil, err } point := bn254.MapToG2(fp2) - cx.stack[last].Bytes = bn254G2ToBytes(&point) - return nil + return bn254G2ToBytes(&point), nil } -func opBN254G1SubgroupCheck(cx *EvalContext) error { - last := len(cx.stack) - 1 - pointBytes := cx.stack[last].Bytes - point, err := bytesToBN254G1(pointBytes, true) +func bn254G1SubgroupCheck(pointBytes []byte) (bool, error) { + point, err := bytesToBN254G1(pointBytes) if err != nil { - return err + return false, err } - cx.stack[last].Uint = boolToUint(point.IsInSubGroup()) - cx.stack[last].Bytes = nil - return err + return point.IsInSubGroup(), nil } -func opBN254G2SubgroupCheck(cx *EvalContext) error { - last := len(cx.stack) - 1 - pointBytes := cx.stack[last].Bytes - point, err := bytesToBN254G2(pointBytes, true) +func bn254G2SubgroupCheck(pointBytes []byte) (bool, error) { + point, err := bytesToBN254G2(pointBytes) if err != nil { - return err + return false, err } - cx.stack[last].Uint = boolToUint(point.IsInSubGroup()) - cx.stack[last].Bytes = nil - return err + return point.IsInSubGroup(), nil } diff --git a/data/transactions/logic/pairing_test.go b/data/transactions/logic/pairing_test.go index c96bd67197..3890c35e49 100644 --- a/data/transactions/logic/pairing_test.go +++ b/data/transactions/logic/pairing_test.go @@ -16,14 +16,543 @@ package logic +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "strings" + "testing" + + "github.com/algorand/go-algorand/test/partitiontest" + bls12381 "github.com/consensys/gnark-crypto/ecc/bls12-381" + bls12381fp "github.com/consensys/gnark-crypto/ecc/bls12-381/fp" + "github.com/consensys/gnark-crypto/ecc/bn254" + bn254fp "github.com/consensys/gnark-crypto/ecc/bn254/fp" +) + const pairingNonsense = ` pushbytes 0x012345 dup - bn256_add + ec_add BN254g1 dup - bn256_scalar_mul + ec_scalar_mul BLS12_381g2 dup - bn256_pairing + ec_pairing_check BN254g1 + ec_multi_exp BLS12_381g2 + ec_subgroup_check BLS12_381g1 + ec_map_to BN254g2 ` -const pairingCompiled = "80030123454999499a499b" +const pairingCompiled = "800301234549e00049e10349e200e303e402e501" + +func bn254G1sToBytes(g1s []bn254.G1Affine) []byte { + var out []byte + for i := range g1s { + out = append(out, bn254G1ToBytes(&g1s[i])...) + } + return out +} + +func bn254G2sToBytes(g2s []bn254.G2Affine) []byte { + var out []byte + for i := range g2s { + out = append(out, bn254G2ToBytes(&g2s[i])...) + } + return out +} + +func bls12381G1sToBytes(g1s []bls12381.G1Affine) []byte { + var out []byte + for i := range g1s { + out = append(out, bls12381G1ToBytes(&g1s[i])...) + } + return out +} + +func bls12381G2sToBytes(g2s []bls12381.G2Affine) []byte { + var out []byte + for i := range g2s { + out = append(out, bls12381G2ToBytes(&g2s[i])...) + } + return out +} + +func TestEcAdd(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + // Parameterize this to check all curves + + g1point := bn254RandomG1() + g1bytes1 := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(bn254G1ToBytes(&g1point))) + g1point = bn254RandomG1() + g1bytes2 := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(bn254G1ToBytes(&g1point))) + testAccepts(t, g1bytes1+g1bytes2+"ec_add BN254g1; len", pairingVersion) + testAccepts(t, g1bytes1+"int 64; bzero; ec_add BN254g1;"+g1bytes1+"==", pairingVersion) +} + +func TestEcScalarMul(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + // Parameterize this to check all curves + + g1point := bn254RandomG1() + g1bytes1 := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(bn254G1ToBytes(&g1point))) + testAccepts(t, g1bytes1+"int 0; itob; ec_scalar_mul BN254g1; int 64; bzero; ==", pairingVersion) + testPanics(t, g1bytes1+"int 33; bzero; ec_scalar_mul BN254g1; int 64; bzero; ==", pairingVersion, "ec_scalar_mul scalar len is 33") + testAccepts(t, g1bytes1+"int 1; itob; ec_scalar_mul BN254g1;"+g1bytes1+"==", pairingVersion) + testAccepts(t, g1bytes1+` +dup +int 32; bzero; int 7; itob; b| +ec_scalar_mul BN254g1 +swap +int 7; itob +ec_scalar_mul BN254g1 +== +`, pairingVersion) +} + +func TestPairCheck(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + t.Run("bn254", func(t *testing.T) { + t.Parallel() + var g1GenNeg bn254.G1Affine + g1GenNeg.Neg(&bnG1Gen) + g1points := []bn254.G1Affine{g1GenNeg, bnG1Gen} + g2points := []bn254.G2Affine{bnG2Gen, bnG2Gen} + // -1 g1 g2 + g1 g2 = 0 + g1bytes := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(bn254G1sToBytes(g1points))) + g2bytes := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(bn254G2sToBytes(g2points))) + + testAccepts(t, g1bytes+g2bytes+`ec_pairing_check BN254g1`, pairingVersion) + testAccepts(t, g2bytes+g1bytes+`ec_pairing_check BN254g2`, pairingVersion) + }) + + t.Run("bls12-381", func(t *testing.T) { + t.Parallel() + var g1GenNeg bls12381.G1Affine + g1GenNeg.Neg(&blsG1Gen) + g1points := []bls12381.G1Affine{g1GenNeg, blsG1Gen} + g2points := []bls12381.G2Affine{blsG2Gen, blsG2Gen} + // -1 g1 g2 + g1 g2 = 0 + g1bytes := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(bls12381G1sToBytes(g1points))) + g2bytes := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(bls12381G2sToBytes(g2points))) + + testAccepts(t, g1bytes+g2bytes+`ec_pairing_check BLS12_381g1`, pairingVersion) + testAccepts(t, g2bytes+g1bytes+`ec_pairing_check BLS12_381g2`, pairingVersion) + }) +} + +func TestEcMultiExp(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + // Parameterize this to check all curves + + g1point := bn254RandomG1() + g1bytes1 := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(bn254G1ToBytes(&g1point))) + testAccepts(t, g1bytes1+"int 32; bzero; ec_multi_exp BN254g1; int 64; bzero; ==", pairingVersion) + testAccepts(t, g1bytes1+"int 32; bzero; int 1; itob; b|; ec_multi_exp BN254g1;"+g1bytes1+"==", pairingVersion) +} + +// TestAgreement ensures that scalar muls and adds is the same as multi_exp +func TestAgreement(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + // Parameterize this to check all curves + + g1point1 := bn254RandomG1() + g1point2 := bn254RandomG1() + k1 := "2F53" // any old int + + g1bytes1 := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(bn254G1ToBytes(&g1point1))) + g1bytes2 := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(bn254G1ToBytes(&g1point2))) + + // Try a normal k2 and one that's bigger than prime order + for _, k2 := range []string{"372D82", strings.Repeat("FE", 32)} { + testAccepts(t, fmt.Sprintf(` + %s + byte 0x%s + ec_scalar_mul BN254g1 + %s + byte 0x%s + ec_scalar_mul BN254g1 + ec_add BN254g1 + %s; %s; concat + int 32; bzero; byte 0x%s; b|; + int 32; bzero; byte 0x%s; b|; + concat + ec_multi_exp BN254g1 + == +`, + g1bytes1, k1, g1bytes2, k2, + g1bytes1, g1bytes2, k1, k2), pairingVersion) + } +} + +func TestSubgroupCheckInfinity(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + testAccepts(t, "int 64; bzero; ec_subgroup_check BN254g1;", pairingVersion) + testAccepts(t, "int 128; bzero; ec_subgroup_check BN254g2;", pairingVersion) + testAccepts(t, "int 96; bzero; ec_subgroup_check BLS12_381g1;", pairingVersion) + testAccepts(t, "int 192; bzero; ec_subgroup_check BLS12_381g2;", pairingVersion) +} + +func TestSubgroupCheck(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + // Parameterize this to check all curves + + g1point := bn254RandomG1() + g1bytes1 := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(bn254G1ToBytes(&g1point))) + testAccepts(t, g1bytes1+"ec_subgroup_check BN254g1;", pairingVersion) + + /* On BN curve, subgroup == on curve, we can't create a g1bytes that makes this Accept + g1bytes1 = ??? + testAccepts(t, g1bytes1+"ec_subgroup_check BN254g1; !", pairingVersion) + */ + + g1point.X[0]++ // surely now out of subgroup, but also out of main curve, so panics + g1bytes1 = fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(bn254G1ToBytes(&g1point))) + testPanics(t, g1bytes1+"ec_subgroup_check BN254g1; !", pairingVersion, "point not on curve") +} + +func TestMapTo(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + for _, curve := range []string{"BN254g1", "BLS12_381g1"} { + testAccepts(t, fmt.Sprintf("int 27; itob; ec_map_to %s; ec_subgroup_check %s", + curve, curve), pairingVersion) + } + +} + +// TestSlowMapTo tests the G2 MapTo functions, which require more budget, and +// therefore mess with a global and prevent t.Parallel. +func TestSlowMapTo(t *testing.T) { + partitiontest.PartitionTest(t) + //nolint:paralleltest // Not parallel because it modifies testLogicBudget + + was := testLogicBudget + testLogicBudget = 1_000_000 + defer func() { testLogicBudget = was }() + for _, curve := range []string{"BN254g2", "BLS12_381g2"} { + testPanics(t, fmt.Sprintf("int 27; itob; ec_map_to %s; ec_subgroup_check %s", + curve, curve), pairingVersion, "bad encoded element length") + } + + testAccepts(t, ` +int 32; bzero +int 67; itob; b| +int 32; bzero +int 2783; itob; b| +concat +ec_map_to BN254g2 +ec_subgroup_check BN254g2`, pairingVersion) + + testAccepts(t, ` +int 48; bzero +int 67; itob; b| +int 48; bzero +int 2783; itob; b| +concat +ec_map_to BLS12_381g2 +ec_subgroup_check BLS12_381g2`, pairingVersion) + +} + +func BenchmarkBn254(b *testing.B) { + if pairingVersion > LogicVersion { + b.Skip() + } + + was := eccMontgomery.NbTasks + eccMontgomery.NbTasks = 1 + defer func() { eccMontgomery.NbTasks = was }() + + fpbytes := fmt.Sprintf("byte 0x%s\n", + strings.Repeat("00", 1)+strings.Repeat("22", bn254fpSize-1)) + fp2bytes := fpbytes + fpbytes + "concat\n" + + kbytes := make([]byte, scalarSize) + rand.Read(kbytes) + byteK := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(kbytes)) + + g1point := bn254RandomG1() + g1bytes := bn254G1ToBytes(&g1point) + byteG1 := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(g1bytes)) + + g2point := bn254RandomG2() + g2bytes := bn254G2ToBytes(&g2point) + byteG2 := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(g2bytes)) + + b.Run("g1 add", func(b *testing.B) { + benchmarkOperation(b, byteG1, "dup; ec_add BN254g1", "pop; int 1") + }) + b.Run("g2 add", func(b *testing.B) { + benchmarkOperation(b, byteG2, "dup; ec_add BN254g2", "pop; int 1") + }) + + b.Run("g1 scalar_mul", func(b *testing.B) { + benchmarkOperation(b, byteG1, byteK+"ec_scalar_mul BN254g1", "pop; int 1") + }) + + b.Run("g2 scalar_mul", func(b *testing.B) { + benchmarkOperation(b, byteG2, byteK+"ec_scalar_mul BN254g2", "pop; int 1") + }) + + b.Run("g1 pairing f", func(b *testing.B) { + benchmarkOperation(b, "", byteG1+byteG2+"ec_pairing_check BN254g1; !; assert", "int 1") + }) + b.Run("g2 pairing f", func(b *testing.B) { + benchmarkOperation(b, "", byteG2+byteG1+"ec_pairing_check BN254g2; !; assert", "int 1") + }) + + var g1GenNeg bn254.G1Affine + g1GenNeg.Neg(&bnG1Gen) + g1points := []bn254.G1Affine{g1GenNeg, bnG1Gen} + g2points := []bn254.G2Affine{bnG2Gen, bnG2Gen} + // -1 g1 g2 + g1 g2 = 0 + g1pbytes := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(bn254G1sToBytes(g1points))) + g2pbytes := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(bn254G2sToBytes(g2points))) + + b.Run("g1 pairing t2", func(b *testing.B) { + benchmarkOperation(b, "", g1pbytes+g2pbytes+"ec_pairing_check BN254g1; assert", "int 1") + }) + b.Run("g2 pairing t2", func(b *testing.B) { + benchmarkOperation(b, "", g2pbytes+g1pbytes+"ec_pairing_check BN254g2; assert", "int 1") + }) + b.Run("g1 pairing t4", func(b *testing.B) { + benchmarkOperation(b, "", + g1pbytes+g1pbytes+"concat;"+g2pbytes+g2pbytes+"concat; ec_pairing_check BN254g1; assert", "int 1") + }) + b.Run("g2 pairing t4", func(b *testing.B) { + benchmarkOperation(b, "", + g2pbytes+g2pbytes+"concat;"+g1pbytes+g1pbytes+"concat; ec_pairing_check BN254g2; assert", "int 1") + }) + + for _, size := range []int{1, 10, 20, 30, 40, 50} { + g1s := byteRepeat(g1bytes, size) + ks := byteRepeat(kbytes, size) + b.Run(fmt.Sprintf("g1 multi_exp %d", size), func(b *testing.B) { + benchmarkOperation(b, "", g1s+ks+"ec_multi_exp BN254g1; pop", "int 1") + }) + } + for _, size := range []int{1, 5, 10, 15, 20, 25} { + g2s := byteRepeat(g2bytes, size) + ks := byteRepeat(kbytes, size) + b.Run(fmt.Sprintf("g2 multi_exp %d", size), func(b *testing.B) { + benchmarkOperation(b, "", g2s+ks+"ec_multi_exp BN254g2; pop", "int 1") + }) + } + + b.Run("g1 subgroup", func(b *testing.B) { + benchmarkOperation(b, "", byteG1+"ec_subgroup_check BN254g1; pop", "int 1") + }) + b.Run("g2 subgroup", func(b *testing.B) { + benchmarkOperation(b, "", byteG2+"ec_subgroup_check BN254g2; pop", "int 1") + }) + + b.Run("g1 map to", func(b *testing.B) { + benchmarkOperation(b, "", fpbytes+"ec_map_to BN254g1; pop", "int 1") + }) + b.Run("g2 map to", func(b *testing.B) { + benchmarkOperation(b, "", fp2bytes+"ec_map_to BN254g2; pop", "int 1") + }) + +} + +func bn254RandomG1() bn254.G1Affine { + var fp bn254fp.Element + fp.SetRandom() + return bn254.MapToG1(fp) +} + +func bn254RandomG2() bn254.G2Affine { + fp2 := bn254.G2Affine{}.X // no way to declare an fptower.E2 + fp2.SetRandom() + return bn254.MapToG2(fp2) +} + +func byteRepeat(bytes []byte, count int) string { + return "byte 0x" + strings.Repeat(hex.EncodeToString(bytes), count) + "\n" +} + +func BenchmarkBls12381(b *testing.B) { + if pairingVersion > LogicVersion { + b.Skip() + } + + was := eccMontgomery.NbTasks + eccMontgomery.NbTasks = 0 + defer func() { eccMontgomery.NbTasks = was }() + + fpbytes := fmt.Sprintf("byte 0x%s\n", + strings.Repeat("00", 1)+strings.Repeat("22", bls12381fpSize-1)) + fp2bytes := fpbytes + fpbytes + "concat\n" + + kbytes := make([]byte, scalarSize) + rand.Read(kbytes) + byteK := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(kbytes)) + + g1point := bls12381RandomG1() + g1bytes := bls12381G1ToBytes(&g1point) + byteG1 := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(g1bytes)) + + g2point := bls12381RandomG2() + g2bytes := bls12381G2ToBytes(&g2point) + byteG2 := byteRepeat(g2bytes, 1) + + b.Run("g1 add", func(b *testing.B) { + benchmarkOperation(b, byteG1, "dup; ec_add BLS12_381g1", "pop; int 1") + }) + b.Run("g2 add", func(b *testing.B) { + benchmarkOperation(b, byteG2, "dup; ec_add BLS12_381g2", "pop; int 1") + }) + + b.Run("g1 scalar_mul", func(b *testing.B) { + benchmarkOperation(b, byteG1, byteK+"ec_scalar_mul BLS12_381g1", "pop; int 1") + }) + b.Run("g2 scalar_mul", func(b *testing.B) { + benchmarkOperation(b, byteG2, byteK+"ec_scalar_mul BLS12_381g2", "pop; int 1") + }) + + b.Run("g1 pairing f", func(b *testing.B) { + benchmarkOperation(b, "", byteG1+byteG2+"ec_pairing_check BLS12_381g1; pop", "int 1") + }) + b.Run("g2 pairing f", func(b *testing.B) { + benchmarkOperation(b, "", byteG2+byteG1+"ec_pairing_check BLS12_381g2; pop", "int 1") + }) + + var g1GenNeg bls12381.G1Affine + g1GenNeg.Neg(&blsG1Gen) + g1points := []bls12381.G1Affine{g1GenNeg, blsG1Gen} + g2points := []bls12381.G2Affine{blsG2Gen, blsG2Gen} + // -1 g1 g2 + g1 g2 = 0 + g1pbytes := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(bls12381G1sToBytes(g1points))) + g2pbytes := fmt.Sprintf("byte 0x%s\n", hex.EncodeToString(bls12381G2sToBytes(g2points))) + + b.Run("g1 pairing t2", func(b *testing.B) { + benchmarkOperation(b, "", g1pbytes+g2pbytes+"ec_pairing_check BLS12_381g1; assert", "int 1") + }) + b.Run("g2 pairing t2", func(b *testing.B) { + benchmarkOperation(b, "", g2pbytes+g1pbytes+"ec_pairing_check BLS12_381g2; assert", "int 1") + }) + b.Run("g1 pairing t4", func(b *testing.B) { + benchmarkOperation(b, "", + g1pbytes+g1pbytes+"concat;"+g2pbytes+g2pbytes+"concat; ec_pairing_check BLS12_381g1; assert", "int 1") + }) + b.Run("g2 pairing t4", func(b *testing.B) { + benchmarkOperation(b, "", + g2pbytes+g2pbytes+"concat;"+g1pbytes+g1pbytes+"concat; ec_pairing_check BLS12_381g2; assert", "int 1") + }) + + for _, size := range []int{1, 5, 10, 15, 20, 25} { + g1s := byteRepeat(g1bytes, size) + ks := byteRepeat(kbytes, size) + b.Run(fmt.Sprintf("g1 multi_exp %d", size), func(b *testing.B) { + benchmarkOperation(b, "", g1s+ks+"ec_multi_exp BLS12_381g1; pop", "int 1") + }) + } + for _, size := range []int{1, 3, 5, 7, 9, 11} { + g2s := byteRepeat(g2bytes, size) + ks := byteRepeat(kbytes, size) + b.Run(fmt.Sprintf("g2 multi_exp %d", size), func(b *testing.B) { + benchmarkOperation(b, "", g2s+ks+"ec_multi_exp BLS12_381g2; pop", "int 1") + }) + } + + b.Run("g1 subgroup", func(b *testing.B) { + benchmarkOperation(b, "", byteG1+"ec_subgroup_check BLS12_381g1; pop", "int 1") + }) + b.Run("g2 subgroup", func(b *testing.B) { + benchmarkOperation(b, "", byteG2+"ec_subgroup_check BLS12_381g2; pop", "int 1") + }) + + b.Run("g1 map to", func(b *testing.B) { + benchmarkOperation(b, "", fpbytes+"ec_map_to BLS12_381g1; pop", "int 1") + }) + b.Run("g2 map to", func(b *testing.B) { + benchmarkOperation(b, "", fp2bytes+"ec_map_to BLS12_381g2; pop", "int 1") + }) +} + +func bls12381RandomG1() bls12381.G1Affine { + var fp bls12381fp.Element + fp.SetRandom() + point := bls12381.MapToG1(fp) + for !point.IsOnCurve() { + fp.SetRandom() + point = bls12381.MapToG1(fp) + } + return point +} + +func bls12381RandomG2() bls12381.G2Affine { + fp2 := bls12381.G2Affine{}.X // no way to declare an fptower.E2 + fp2.SetRandom() + point := bls12381.MapToG2(fp2) + for !point.IsOnCurve() { + fp2.SetRandom() + point = bls12381.MapToG2(fp2) + } + return point +} + +var bnG1Gen bn254.G1Affine +var bnG2Gen bn254.G2Affine + +func init() { + var g1GenJac bn254.G1Jac + var g2GenJac bn254.G2Jac + + g1GenJac.X.SetOne() + g1GenJac.Y.SetUint64(2) + g1GenJac.Z.SetOne() + + g2GenJac.X.SetString( + "10857046999023057135944570762232829481370756359578518086990519993285655852781", + "11559732032986387107991004021392285783925812861821192530917403151452391805634") + g2GenJac.Y.SetString( + "8495653923123431417604973247489272438418190587263600148770280649306958101930", + "4082367875863433681332203403145435568316851327593401208105741076214120093531") + g2GenJac.Z.SetString("1", "0") + + bnG1Gen.FromJacobian(&g1GenJac) + bnG2Gen.FromJacobian(&g2GenJac) +} + +var blsG1Gen bls12381.G1Affine +var blsG2Gen bls12381.G2Affine + +func init() { + var g1GenJac bls12381.G1Jac + var g2GenJac bls12381.G2Jac + + g1GenJac.X.SetOne() + g1GenJac.Y.SetUint64(2) + g1GenJac.Z.SetOne() + + g1GenJac.X.SetString("3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507") + g1GenJac.Y.SetString("1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569") + g1GenJac.Z.SetOne() + + g2GenJac.X.SetString( + "352701069587466618187139116011060144890029952792775240219908644239793785735715026873347600343865175952761926303160", + "3059144344244213709971259814753781636986470325476647558659373206291635324768958432433509563104347017837885763365758") + g2GenJac.Y.SetString( + "1985150602287291935568054521177171638300868978215655730859378665066344726373823718423869104263333984641494340347905", + "927553665492332455747201965776037880757740193453592970025027978793976877002675564980949289727957565575433344219582") + g2GenJac.Z.SetString("1", "0") + + blsG1Gen.FromJacobian(&g1GenJac) + blsG2Gen.FromJacobian(&g2GenJac) +} diff --git a/data/transactions/logic/teal.tmLanguage.json b/data/transactions/logic/teal.tmLanguage.json index 59dc143681..3a31b38e73 100644 --- a/data/transactions/logic/teal.tmLanguage.json +++ b/data/transactions/logic/teal.tmLanguage.json @@ -76,7 +76,7 @@ }, { "name": "keyword.operator.teal", - "match": "^(\\!|\\!\\=|%|\u0026|\u0026\u0026|\\*|\\+|\\-|/|\\\u003c|\\\u003c\\=|\\=\\=|\\\u003e|\\\u003e\\=|\\^|addw|bitlen|bn256_add|bn256_pairing|bn256_scalar_mul|btoi|concat|divmodw|divw|ecdsa_pk_decompress|ecdsa_pk_recover|ecdsa_verify|ed25519verify|ed25519verify_bare|exp|expw|getbit|getbyte|itob|keccak256|len|mulw|setbit|setbyte|sha256|sha3_256|sha512_256|shl|shr|sqrt|vrf_verify|\\||\\|\\||\\~|b\\!\\=|b%|b\\*|b\\+|b\\-|b/|b\\\u003c|b\\\u003c\\=|b\\=\\=|b\\\u003e|b\\\u003e\\=|bsqrt|b\u0026|b\\^|b\\||b\\~|base64_decode|extract|extract3|extract_uint16|extract_uint32|extract_uint64|json_ref|replace2|replace3|substring|substring3|gitxn|gitxna|gitxnas|itxn|itxn_begin|itxn_field|itxn_next|itxn_submit|itxna|itxnas)\\b" + "match": "^(\\!|\\!\\=|%|\u0026|\u0026\u0026|\\*|\\+|\\-|/|\\\u003c|\\\u003c\\=|\\=\\=|\\\u003e|\\\u003e\\=|\\^|addw|bitlen|btoi|concat|divmodw|divw|ec_add|ec_map_to|ec_multi_exp|ec_pairing_check|ec_scalar_mul|ec_subgroup_check|ecdsa_pk_decompress|ecdsa_pk_recover|ecdsa_verify|ed25519verify|ed25519verify_bare|exp|expw|getbit|getbyte|itob|keccak256|len|mulw|setbit|setbyte|sha256|sha3_256|sha512_256|shl|shr|sqrt|vrf_verify|\\||\\|\\||\\~|b\\!\\=|b%|b\\*|b\\+|b\\-|b/|b\\\u003c|b\\\u003c\\=|b\\=\\=|b\\\u003e|b\\\u003e\\=|bsqrt|b\u0026|b\\^|b\\||b\\~|base64_decode|extract|extract3|extract_uint16|extract_uint32|extract_uint64|json_ref|replace2|replace3|substring|substring3|gitxn|gitxna|gitxnas|itxn|itxn_begin|itxn_field|itxn_next|itxn_submit|itxna|itxnas)\\b" } ] }, @@ -112,7 +112,7 @@ }, { "name": "variable.parameter.teal", - "match": "\\b(unknown|pay|keyreg|acfg|axfer|afrz|appl|NoOp|OptIn|CloseOut|ClearState|UpdateApplication|DeleteApplication|Secp256k1|Secp256r1|Sender|Fee|FirstValid|FirstValidTime|LastValid|Note|Lease|Receiver|Amount|CloseRemainderTo|VotePK|SelectionPK|VoteFirst|VoteLast|VoteKeyDilution|Type|TypeEnum|XferAsset|AssetAmount|AssetSender|AssetReceiver|AssetCloseTo|GroupIndex|TxID|ApplicationID|OnCompletion|NumAppArgs|NumAccounts|ApprovalProgram|ClearStateProgram|RekeyTo|ConfigAsset|ConfigAssetTotal|ConfigAssetDecimals|ConfigAssetDefaultFrozen|ConfigAssetUnitName|ConfigAssetName|ConfigAssetURL|ConfigAssetMetadataHash|ConfigAssetManager|ConfigAssetReserve|ConfigAssetFreeze|ConfigAssetClawback|FreezeAsset|FreezeAssetAccount|FreezeAssetFrozen|NumAssets|NumApplications|GlobalNumUint|GlobalNumByteSlice|LocalNumUint|LocalNumByteSlice|ExtraProgramPages|Nonparticipation|NumLogs|CreatedAssetID|CreatedApplicationID|LastLog|StateProofPK|NumApprovalProgramPages|NumClearStateProgramPages|MinTxnFee|MinBalance|MaxTxnLife|ZeroAddress|GroupSize|LogicSigVersion|Round|LatestTimestamp|CurrentApplicationID|CreatorAddress|CurrentApplicationAddress|GroupID|OpcodeBudget|CallerApplicationID|CallerApplicationAddress|ApplicationArgs|Accounts|Assets|Applications|Logs|ApprovalProgramPages|ClearStateProgramPages|URLEncoding|StdEncoding|JSONString|JSONUint64|JSONObject|AssetBalance|AssetFrozen|AssetTotal|AssetDecimals|AssetDefaultFrozen|AssetUnitName|AssetName|AssetURL|AssetMetadataHash|AssetManager|AssetReserve|AssetFreeze|AssetClawback|AssetCreator|AppApprovalProgram|AppClearStateProgram|AppGlobalNumUint|AppGlobalNumByteSlice|AppLocalNumUint|AppLocalNumByteSlice|AppExtraProgramPages|AppCreator|AppAddress|AcctBalance|AcctMinBalance|AcctAuthAddr|AcctTotalNumUint|AcctTotalNumByteSlice|AcctTotalExtraAppPages|AcctTotalAppsCreated|AcctTotalAppsOptedIn|AcctTotalAssetsCreated|AcctTotalAssets|AcctTotalBoxes|AcctTotalBoxBytes|VrfAlgorand|BlkSeed|BlkTimestamp)\\b" + "match": "\\b(unknown|pay|keyreg|acfg|axfer|afrz|appl|NoOp|OptIn|CloseOut|ClearState|UpdateApplication|DeleteApplication|Secp256k1|Secp256r1|Sender|Fee|FirstValid|FirstValidTime|LastValid|Note|Lease|Receiver|Amount|CloseRemainderTo|VotePK|SelectionPK|VoteFirst|VoteLast|VoteKeyDilution|Type|TypeEnum|XferAsset|AssetAmount|AssetSender|AssetReceiver|AssetCloseTo|GroupIndex|TxID|ApplicationID|OnCompletion|NumAppArgs|NumAccounts|ApprovalProgram|ClearStateProgram|RekeyTo|ConfigAsset|ConfigAssetTotal|ConfigAssetDecimals|ConfigAssetDefaultFrozen|ConfigAssetUnitName|ConfigAssetName|ConfigAssetURL|ConfigAssetMetadataHash|ConfigAssetManager|ConfigAssetReserve|ConfigAssetFreeze|ConfigAssetClawback|FreezeAsset|FreezeAssetAccount|FreezeAssetFrozen|NumAssets|NumApplications|GlobalNumUint|GlobalNumByteSlice|LocalNumUint|LocalNumByteSlice|ExtraProgramPages|Nonparticipation|NumLogs|CreatedAssetID|CreatedApplicationID|LastLog|StateProofPK|NumApprovalProgramPages|NumClearStateProgramPages|MinTxnFee|MinBalance|MaxTxnLife|ZeroAddress|GroupSize|LogicSigVersion|Round|LatestTimestamp|CurrentApplicationID|CreatorAddress|CurrentApplicationAddress|GroupID|OpcodeBudget|CallerApplicationID|CallerApplicationAddress|ApplicationArgs|Accounts|Assets|Applications|Logs|ApprovalProgramPages|ClearStateProgramPages|URLEncoding|StdEncoding|JSONString|JSONUint64|JSONObject|AssetBalance|AssetFrozen|AssetTotal|AssetDecimals|AssetDefaultFrozen|AssetUnitName|AssetName|AssetURL|AssetMetadataHash|AssetManager|AssetReserve|AssetFreeze|AssetClawback|AssetCreator|AppApprovalProgram|AppClearStateProgram|AppGlobalNumUint|AppGlobalNumByteSlice|AppLocalNumUint|AppLocalNumByteSlice|AppExtraProgramPages|AppCreator|AppAddress|AcctBalance|AcctMinBalance|AcctAuthAddr|AcctTotalNumUint|AcctTotalNumByteSlice|AcctTotalExtraAppPages|AcctTotalAppsCreated|AcctTotalAppsOptedIn|AcctTotalAssetsCreated|AcctTotalAssets|AcctTotalBoxes|AcctTotalBoxBytes|VrfAlgorand|BlkSeed|BlkTimestamp|BN254g1|BN254g2|BLS12_381g1|BLS12_381g2)\\b" } ] },