From c629c389b80397400285065ccb16c1d10d2067ad Mon Sep 17 00:00:00 2001 From: D3v Date: Tue, 14 Nov 2023 21:53:57 +0100 Subject: [PATCH 01/12] Interface based refact, broken aged --- Makefile | 6 +- README.md | 42 +- aged/age_bind.go | 192 ++- aged/age_bind_test.go | 318 ++-- aged/obf.go | 12 +- aged/obf_test.go | 44 +- aged/stream.go | 11 - asymmetric/asymmetric.go | 97 +- asymmetric/asymmetric_test.go | 75 +- compression/compression.go | 211 ++- compression/compression_test.go | 121 +- generic/csprng.go | 15 + generic/csprng_test.go | 8 + generic/fs.go | 4 +- generic/utils.go | 25 + go.mod | 2 +- go.sum | 4 +- hash/fs.go | 4 +- hash/hash.go | 287 +++- hash/hash_test.go | 229 ++- hash/kdf.go | 253 ++-- hash/kdf_test.go | 54 +- symmetric/symmetric.go | 142 +- symmetric/symmetric_test.go | 28 +- .../klauspost/compress/flate/deflate.go | 1017 +++++++++++++ .../klauspost/compress/flate/dict_decoder.go | 184 +++ .../klauspost/compress/flate/fast_encoder.go | 193 +++ .../compress/flate/huffman_bit_writer.go | 1182 +++++++++++++++ .../klauspost/compress/flate/huffman_code.go | 417 ++++++ .../compress/flate/huffman_sortByFreq.go | 159 ++ .../compress/flate/huffman_sortByLiteral.go | 201 +++ .../klauspost/compress/flate/inflate.go | 829 +++++++++++ .../klauspost/compress/flate/inflate_gen.go | 1283 +++++++++++++++++ .../klauspost/compress/flate/level1.go | 241 ++++ .../klauspost/compress/flate/level2.go | 214 +++ .../klauspost/compress/flate/level3.go | 241 ++++ .../klauspost/compress/flate/level4.go | 221 +++ .../klauspost/compress/flate/level5.go | 708 +++++++++ .../klauspost/compress/flate/level6.go | 325 +++++ .../compress/flate/matchlen_amd64.go | 16 + .../klauspost/compress/flate/matchlen_amd64.s | 68 + .../compress/flate/matchlen_generic.go | 33 + .../klauspost/compress/flate/regmask_amd64.go | 37 + .../klauspost/compress/flate/regmask_other.go | 40 + .../klauspost/compress/flate/stateless.go | 318 ++++ .../klauspost/compress/flate/token.go | 379 +++++ .../klauspost/compress/gzip/gunzip.go | 375 +++++ .../klauspost/compress/gzip/gzip.go | 290 ++++ .../klauspost/compress/zlib/reader.go | 183 +++ .../klauspost/compress/zlib/writer.go | 201 +++ .../x/crypto/argon2/blamka_amd64.go | 1 - .../golang.org/x/crypto/argon2/blamka_amd64.s | 1 - .../golang.org/x/crypto/argon2/blamka_ref.go | 1 - .../x/crypto/blake2b/blake2bAVX2_amd64.go | 1 - .../x/crypto/blake2b/blake2bAVX2_amd64.s | 1 - .../x/crypto/blake2b/blake2b_amd64.go | 1 - .../x/crypto/blake2b/blake2b_amd64.s | 1 - .../x/crypto/blake2b/blake2b_ref.go | 1 - .../golang.org/x/crypto/blake2b/register.go | 1 - .../x/crypto/chacha20/chacha_arm64.go | 1 - .../x/crypto/chacha20/chacha_arm64.s | 1 - .../x/crypto/chacha20/chacha_noasm.go | 1 - .../x/crypto/chacha20/chacha_ppc64le.go | 1 - .../x/crypto/chacha20/chacha_ppc64le.s | 1 - .../x/crypto/chacha20/chacha_s390x.go | 1 - .../x/crypto/chacha20/chacha_s390x.s | 1 - .../chacha20poly1305_amd64.go | 1 - .../chacha20poly1305/chacha20poly1305_amd64.s | 25 +- .../chacha20poly1305_noasm.go | 1 - .../curve25519/internal/field/fe_amd64.go | 1 - .../curve25519/internal/field/fe_amd64.s | 1 - .../internal/field/fe_amd64_noasm.go | 1 - .../curve25519/internal/field/fe_arm64.go | 1 - .../curve25519/internal/field/fe_arm64.s | 1 - .../internal/field/fe_arm64_noasm.go | 1 - vendor/golang.org/x/crypto/hkdf/hkdf.go | 4 +- .../x/crypto/internal/alias/alias.go | 1 - .../x/crypto/internal/alias/alias_purego.go | 1 - .../x/crypto/internal/poly1305/bits_compat.go | 1 - .../x/crypto/internal/poly1305/bits_go1.13.go | 1 - .../x/crypto/internal/poly1305/mac_noasm.go | 1 - .../x/crypto/internal/poly1305/sum_amd64.go | 1 - .../x/crypto/internal/poly1305/sum_amd64.s | 1 - .../x/crypto/internal/poly1305/sum_ppc64le.go | 1 - .../x/crypto/internal/poly1305/sum_ppc64le.s | 1 - .../x/crypto/internal/poly1305/sum_s390x.go | 1 - .../x/crypto/internal/poly1305/sum_s390x.s | 1 - .../x/crypto/salsa20/salsa/salsa20_amd64.go | 1 - .../x/crypto/salsa20/salsa/salsa20_amd64.s | 1 - .../x/crypto/salsa20/salsa/salsa20_noasm.go | 1 - vendor/golang.org/x/crypto/sha3/doc.go | 62 + vendor/golang.org/x/crypto/sha3/hashes.go | 97 ++ .../x/crypto/sha3/hashes_generic.go | 27 + vendor/golang.org/x/crypto/sha3/keccakf.go | 414 ++++++ .../golang.org/x/crypto/sha3/keccakf_amd64.go | 13 + .../golang.org/x/crypto/sha3/keccakf_amd64.s | 390 +++++ vendor/golang.org/x/crypto/sha3/register.go | 18 + vendor/golang.org/x/crypto/sha3/sha3.go | 197 +++ vendor/golang.org/x/crypto/sha3/sha3_s390x.go | 288 ++++ vendor/golang.org/x/crypto/sha3/sha3_s390x.s | 33 + vendor/golang.org/x/crypto/sha3/shake.go | 172 +++ .../golang.org/x/crypto/sha3/shake_generic.go | 19 + vendor/golang.org/x/crypto/sha3/xor.go | 23 + .../golang.org/x/crypto/sha3/xor_generic.go | 28 + .../golang.org/x/crypto/sha3/xor_unaligned.go | 66 + vendor/modules.txt | 10 +- 106 files changed, 12857 insertions(+), 606 deletions(-) create mode 100644 vendor/github.com/klauspost/compress/flate/deflate.go create mode 100644 vendor/github.com/klauspost/compress/flate/dict_decoder.go create mode 100644 vendor/github.com/klauspost/compress/flate/fast_encoder.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_code.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go create mode 100644 vendor/github.com/klauspost/compress/flate/inflate.go create mode 100644 vendor/github.com/klauspost/compress/flate/inflate_gen.go create mode 100644 vendor/github.com/klauspost/compress/flate/level1.go create mode 100644 vendor/github.com/klauspost/compress/flate/level2.go create mode 100644 vendor/github.com/klauspost/compress/flate/level3.go create mode 100644 vendor/github.com/klauspost/compress/flate/level4.go create mode 100644 vendor/github.com/klauspost/compress/flate/level5.go create mode 100644 vendor/github.com/klauspost/compress/flate/level6.go create mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_amd64.go create mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_generic.go create mode 100644 vendor/github.com/klauspost/compress/flate/regmask_amd64.go create mode 100644 vendor/github.com/klauspost/compress/flate/regmask_other.go create mode 100644 vendor/github.com/klauspost/compress/flate/stateless.go create mode 100644 vendor/github.com/klauspost/compress/flate/token.go create mode 100644 vendor/github.com/klauspost/compress/gzip/gunzip.go create mode 100644 vendor/github.com/klauspost/compress/gzip/gzip.go create mode 100644 vendor/github.com/klauspost/compress/zlib/reader.go create mode 100644 vendor/github.com/klauspost/compress/zlib/writer.go create mode 100644 vendor/golang.org/x/crypto/sha3/doc.go create mode 100644 vendor/golang.org/x/crypto/sha3/hashes.go create mode 100644 vendor/golang.org/x/crypto/sha3/hashes_generic.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf_amd64.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf_amd64.s create mode 100644 vendor/golang.org/x/crypto/sha3/register.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3_s390x.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3_s390x.s create mode 100644 vendor/golang.org/x/crypto/sha3/shake.go create mode 100644 vendor/golang.org/x/crypto/sha3/shake_generic.go create mode 100644 vendor/golang.org/x/crypto/sha3/xor.go create mode 100644 vendor/golang.org/x/crypto/sha3/xor_generic.go create mode 100644 vendor/golang.org/x/crypto/sha3/xor_unaligned.go diff --git a/Makefile b/Makefile index 7e62a0b..debc931 100644 --- a/Makefile +++ b/Makefile @@ -1,11 +1,11 @@ lint: - golangci-lint run + golangci-lint run --fix test: - go test -cover ./... + go clean -testcache && go test -cover ./... test-v: - go test ./... -v + go clean -testcache && go test ./... -v golangci-lint-install: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.55.0 diff --git a/README.md b/README.md index 4f201ef..6b8875f 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,43 @@ -# Go-Crypt +# Go-Crypt (!!! WIP !!!) High-level API binding to low-level crypto APIs in golang + + +Crypto suite: +- Generic + - (Secure) Overwrite + - (Secure) Delete + - CSPRNG + - CSPRNGHex + - /dev/hwrng +- Symmetric + - XChacha20-poly1305 + - XChacha20-poly1305 Stream (modified age code) + - XOR (OTP) + - AES-GCM (pending) +- Asymmetric + - ed25519 + - ed448 + - x25519 (pending) +- Hash + - Blake2b-256 + - Blake2b-384 + - Blake2b-512 + - Argon2id + - Scrypt (pending) + - HKDF (pending) + - SHA3-256 + - SHA3-384 + - SHA3-512 + - SHAKE-128 (pending) + - SHAKE-256 (pending) + - go_simhash (pending) +- Compression + - flate + - gzip + - zlib + - zstd +- Aged + - Age encryption suite + - Age header obfuscation + diff --git a/aged/age_bind.go b/aged/age_bind.go index ef90a07..875bbab 100644 --- a/aged/age_bind.go +++ b/aged/age_bind.go @@ -3,6 +3,7 @@ package aged import ( "bytes" "errors" + "fmt" "io" "filippo.io/age" @@ -14,22 +15,34 @@ type Keychain struct { recipients []age.Recipient } -func SetupKeychain(secretKey string, publicKeys []string) (Keychain, error) { +type KeychainSetup struct { + SecretKey string + PublicKeys []string + SelfRecipient bool +} + +func SetupKeychain(keychainSetup KeychainSetup) (Keychain, error) { var keychain Keychain - identity, err := age.ParseX25519Identity(secretKey) + identity, err := age.ParseX25519Identity(keychainSetup.SecretKey) if err != nil { return Keychain{}, err } + keychain.secretKey = identity - keychain.recipients = append(keychain.recipients, identity.Recipient()) - for _, e := range publicKeys { - publicKey, err := age.ParseX25519Recipient(e) - if err != nil { - return Keychain{}, err + for _, e := range keychainSetup.PublicKeys { + if identity.Recipient().String() != e { + publicKey, err := age.ParseX25519Recipient(e) + if err != nil { + return Keychain{}, err + } + keychain.recipients = append(keychain.recipients, publicKey) } - keychain.recipients = append(keychain.recipients, publicKey) + } + + if keychainSetup.SelfRecipient { + keychain.recipients = append(keychain.recipients, identity.Recipient()) } return keychain, nil @@ -43,55 +56,44 @@ func GenKeypair() (*age.X25519Identity, error) { return identity, nil } -func (k Keychain) Encrypt(data []byte, compress bool, header bool) ([]byte, error) { - var reader *bytes.Reader - if compress { - raw, err := compression.GzipCompress(data, 6) - if err != nil { - return []byte{}, err - } - reader = bytes.NewReader(raw) - } else { - reader = bytes.NewReader(data) - } +type Parameters struct { + Data []byte + Compressor compression.Compressor + Compress bool + Obfuscation bool + Obfuscator Obfuscation +} - out := &bytes.Buffer{} - w, err := age.Encrypt(out, k.recipients...) +func (k Keychain) Encrypt(p Parameters) ([]byte, error) { + + in, err := compressor(p) if err != nil { return []byte{}, err } + out := &bytes.Buffer{} + w, err := age.Encrypt(out, k.recipients...) if err != nil { return []byte{}, err } - if _, err := io.Copy(w, reader); err != nil { + if _, err := io.Copy(w, in); err != nil { return []byte{}, err } if err := w.Close(); err != nil { return []byte{}, err } - if header { - obf, err := ObfHeader(out.Bytes()) - if err != nil { - return []byte{}, errors.New("failed to obfuscate header") - } - return obf, nil - } - return out.Bytes(), nil + return obfuscator(p, out.Bytes()) } -func (k Keychain) Decrypt(cipherdata []byte, compress bool, header bool) ([]byte, error) { - if header { - var err error - cipherdata, err = DeobfHeader(cipherdata) - if err != nil { - return []byte{}, errors.New("failed to deobfuscate header, maybe not encrypted") - } +func (k Keychain) Decrypt(p Parameters) ([]byte, error) { + cipherData, err := deobfuscator(p) + if err != nil { + return []byte{}, err } - r, err := age.Decrypt(bytes.NewReader(cipherdata), k.secretKey) + r, err := age.Decrypt(bytes.NewReader(cipherData), k.secretKey) if err != nil { return []byte{}, err } @@ -100,27 +102,13 @@ func (k Keychain) Decrypt(cipherdata []byte, compress bool, header bool) ([]byte return []byte{}, err } - if compress { - raw, err := compression.GzipDecompress(out.Bytes()) - if err != nil { - return []byte{}, err - } - return raw, nil - } - - return out.Bytes(), nil + return decompressor(p) } -func EncryptWithPwd(pwd string, data []byte, compress bool, header bool) ([]byte, error) { - var reader *bytes.Reader - if compress { - raw, err := compression.GzipCompress(data, 6) - if err != nil { - return []byte{}, err - } - reader = bytes.NewReader(raw) - } else { - reader = bytes.NewReader(data) +func EncryptWithPwd(p Parameters, pwd string) ([]byte, error) { + in, err := compressor(p) + if err != nil { + return []byte{}, err } pwdRecepient, err := age.NewScryptRecipient(pwd) @@ -138,30 +126,20 @@ func EncryptWithPwd(pwd string, data []byte, compress bool, header bool) ([]byte return []byte{}, err } - if _, err := io.Copy(w, reader); err != nil { + if _, err := io.Copy(w, in); err != nil { return []byte{}, err } if err := w.Close(); err != nil { return []byte{}, err } - if header { - obf, err := ObfHeader(out.Bytes()) - if err != nil { - return []byte{}, errors.New("failed to obfuscate header") - } - return obf, nil - } - return out.Bytes(), nil + return obfuscator(p, out.Bytes()) } -func DecryptWithPwd(pwd string, cipherdata []byte, compress bool, header bool) ([]byte, error) { - if header { - var err error - cipherdata, err = DeobfHeader(cipherdata) - if err != nil { - return []byte{}, errors.New("failed to deobfuscate header, maybe not encrypted") - } +func DecryptWithPwd(p Parameters, pwd string) ([]byte, error) { + cipherData, err := deobfuscator(p) + if err != nil { + return []byte{}, err } pwdIdentity, err := age.NewScryptIdentity(pwd) @@ -169,7 +147,7 @@ func DecryptWithPwd(pwd string, cipherdata []byte, compress bool, header bool) ( return []byte{}, err } - r, err := age.Decrypt(bytes.NewReader(cipherdata), pwdIdentity) + r, err := age.Decrypt(bytes.NewReader(cipherData), pwdIdentity) if err != nil { return []byte{}, err } @@ -179,13 +157,73 @@ func DecryptWithPwd(pwd string, cipherdata []byte, compress bool, header bool) ( return []byte{}, err } - if compress { - raw, err := compression.GzipDecompress(out.Bytes()) + return decompressor(p) +} + +func compressor(p Parameters) (*bytes.Reader, error) { + var in *bytes.Reader + + if p.Compress { + var writer *bytes.Buffer + compressorIn := bytes.NewReader(p.Data) + + err := p.Compressor.CompressStream(compressorIn, writer) + if err != nil { + return nil, err + } + + in = bytes.NewReader(writer.Bytes()) + + } else { + in = bytes.NewReader(p.Data) + } + return in, nil +} + +func decompressor(p Parameters) ([]byte, error) { + if p.Compress { + raw, err := p.Compressor.Decompress(p.Data) if err != nil { return []byte{}, err } return raw, nil } + return p.Data, nil +} + +func obfuscator(p Parameters, in []byte) ([]byte, error) { + if p.Obfuscation { + obf, err := p.Obfuscator.Obfuscate(in) + if err != nil { + return []byte{}, errors.New("failed to obfuscate header") + } + return obf, nil + } + return in, nil +} + +func deobfuscator(p Parameters) ([]byte, error) { + var cipherData []byte + if p.Obfuscation { + var err error + cipherData, err = p.Obfuscator.Deobfuscate(p.Data) + if err != nil { + return []byte{}, errors.New("failed to deobfuscate header, maybe not encrypted") + } + } else { + cipherData = p.Data + } + return cipherData, nil +} + +func (k Keychain) KeychainExport() []string { + keys := make([]string, len(k.recipients)) + for _, key := range k.recipients { + keys = append(keys, fmt.Sprint(key)) + } + return keys +} - return out.Bytes(), nil +func (k Keychain) KeychainExportSecretKey() string { + return k.secretKey.String() } diff --git a/aged/age_bind_test.go b/aged/age_bind_test.go index e97b463..ddcb2d8 100644 --- a/aged/age_bind_test.go +++ b/aged/age_bind_test.go @@ -5,17 +5,12 @@ import ( "filippo.io/age" "github.com/D3vl0per/crypt/aged" + "github.com/D3vl0per/crypt/compression" "github.com/D3vl0per/crypt/generic" a "github.com/stretchr/testify/assert" r "github.com/stretchr/testify/require" ) -var ( - //nolint:gochecknoglobals - //nolint:gochecknoglobals - plainData []byte = []byte("4ukxipMYfoXNbaEClwKAQHz4kHLQHoIh Correct Horse Battery Staple l44zAP9dBPk1OyUxH7Vyfhwuk76kq1QZ") -) - type chains struct { secretKey1 *age.X25519Identity publicKey1 *age.X25519Identity @@ -24,6 +19,7 @@ type chains struct { keychain aged.Keychain keychain2 aged.Keychain keychainWrong aged.Keychain + plainData []byte } func keychainInit(t *testing.T) chains { @@ -37,11 +33,28 @@ func keychainInit(t *testing.T) chains { wrongKeypair, err := aged.GenKeypair() r.NoError(t, err) - keychain, err := aged.SetupKeychain(secretKey1.String(), []string{publicKey1.Recipient().String(), publicKey2.Recipient().String()}) + keychain, err := aged.SetupKeychain(aged.KeychainSetup{ + SecretKey: secretKey1.String(), + PublicKeys: []string{publicKey1.Recipient().String(), publicKey2.Recipient().String()}, + SelfRecipient: true, + }) + r.NoError(t, err) + + keychain2, err := aged.SetupKeychain(aged.KeychainSetup{ + SecretKey: publicKey1.String(), + PublicKeys: []string{secretKey1.Recipient().String(), publicKey2.Recipient().String()}, + SelfRecipient: true, + }) r.NoError(t, err) - keychain2, err := aged.SetupKeychain(publicKey1.String(), []string{secretKey1.Recipient().String(), publicKey2.Recipient().String()}) + + keychainWrong, err := aged.SetupKeychain(aged.KeychainSetup{ + SecretKey: wrongKeypair.String(), + PublicKeys: []string{secretKey1.Recipient().String(), publicKey2.Recipient().String()}, + SelfRecipient: true, + }) r.NoError(t, err) - keychainWrong, err := aged.SetupKeychain(wrongKeypair.String(), []string{secretKey1.Recipient().String(), publicKey2.Recipient().String()}) + + plainData, err := generic.CSPRNG(128) r.NoError(t, err) return chains{ @@ -52,129 +65,244 @@ func keychainInit(t *testing.T) chains { keychain: keychain, keychain2: keychain2, keychainWrong: keychainWrong, + plainData: plainData, } } -func TestEncryptAndDecryptPlain(t *testing.T) { - keychains := keychainInit(t) +func TestRoundTrips(t *testing.T) { + config := keychainInit(t) - cipherData, err := keychains.keychain.Encrypt(plainData, false, false) - r.NoError(t, err, "Encryption without error") - t.Logf("Original size:%d Processed size: %d", len(plainData), len(cipherData)) + big, err := generic.CSPRNG(10485760) + r.NoError(t, err) - decryptedData, err2 := keychains.keychain.Decrypt(cipherData, false, false) - r.NoError(t, err2, "Decryption without error") - r.Equal(t, plainData, decryptedData, "Decrypted data is equal with the plaintext data by the same keychain") + p := []aged.Parameters{ + // No compress, No obfuscator + { + Data: config.plainData, + Obfuscation: false, + Compress: false, + }, + // No compress, obfuscate + { + Data: config.plainData, + Obfuscator: &aged.AgeV1Obf{}, + Obfuscation: true, + Compress: false, + }, + // Compress with Gzip, no obfuscate + { + Data: config.plainData, + Obfuscation: false, + Compressor: &compression.Gzip{}, + Compress: true, + }, + // Compress with Gzip, obfuscate + { + Data: config.plainData, + Obfuscator: &aged.AgeV1Obf{}, + Obfuscation: true, + Compressor: &compression.Gzip{}, + Compress: true, + }, + // Compress with Zstd, no obfuscate + { + Data: config.plainData, + Obfuscation: false, + Compressor: &compression.Zstd{}, + Compress: true, + }, + // Compress with Zstd, obfuscate + { + Data: config.plainData, + Obfuscator: &aged.AgeV1Obf{}, + Obfuscation: true, + Compressor: &compression.Zstd{}, + Compress: true, + }, + // Compress with Flate, no obfuscate + { + Data: config.plainData, + Obfuscation: false, + Compressor: &compression.Flate{}, + Compress: true, + }, + // Compess with Flate, obfuscate + { + Data: config.plainData, + Obfuscator: &aged.AgeV1Obf{}, + Obfuscation: true, + Compressor: &compression.Flate{}, + Compress: true, + }, + // Compress with Zlib, no obfuscate + { + Data: config.plainData, + Obfuscation: false, + Compressor: &compression.Zlib{}, + Compress: true, + }, + // Compress with Zlib, obfuscate + { + Data: config.plainData, + Obfuscator: &aged.AgeV1Obf{}, + Obfuscation: true, + Compressor: &compression.Zlib{}, + Compress: false, + }, + // Compress big file with Zstd, obfuscate + { + Data: big, + Obfuscator: &aged.AgeV1Obf{}, + Obfuscation: true, + Compressor: &compression.Zlib{}, + Compress: true, + }, + } - decryptedData2, err3 := keychains.keychain2.Decrypt(cipherData, false, false) - r.NoError(t, err3, "Decryption two without error") - r.Equal(t, plainData, decryptedData2, "Decrypted data is equal with the plaintext data by different valid keychain") + for _, encryptParam := range p { - decryptedData3, err4 := keychains.keychainWrong.Decrypt(cipherData, false, false) - r.Equal(t, []byte{}, decryptedData3) - r.EqualError(t, err4, "no identity matched any of the recipients") -} + decryptParam := encryptParam + var err error -func TestEncryptAndDecryptCompress(t *testing.T) { - keychains := keychainInit(t) + decryptParam.Data, err = config.keychain.Encrypt(encryptParam) + r.NoError(t, err, "Encryption without error") + t.Logf("Original size:%d Processed size: %d", len(encryptParam.Data), len(decryptParam.Data)) - cipherData, err := keychains.keychain.Encrypt(plainData, true, false) - r.NoError(t, err, "Encryption without error") - t.Logf("Original size:%d Processed size: %d", len(plainData), len(cipherData)) + decryptedData, err2 := config.keychain.Decrypt(decryptParam) + r.NoError(t, err2, "Decryption without error") + r.Equal(t, encryptParam.Data, decryptedData, "Decrypted data is equal with the plaintext data by the same keychain") - decryptedData, err2 := keychains.keychain.Decrypt(cipherData, true, false) - r.NoError(t, err2, "Decryption without error") - r.Equal(t, plainData, decryptedData, "Decrypted data is equal with the plaintext data by the same keychain") + decryptedData2, err3 := config.keychain2.Decrypt(decryptParam) + r.NoError(t, err3, "Decryption two without error") + r.Equal(t, encryptParam.Data, decryptedData2, "Decrypted data is equal with the plaintext data by different valid keychain") - decryptedData2, err3 := keychains.keychain2.Decrypt(cipherData, true, false) - r.NoError(t, err3, "Decryption two without error") - r.Equal(t, plainData, decryptedData2, "Decrypted data is equal with the plaintext data by different valid keychain") + decryptedData3, err4 := config.keychainWrong.Decrypt(decryptParam) + r.Equal(t, []byte{}, decryptedData3) + r.EqualError(t, err4, "no identity matched any of the recipients") + } - decryptedData3, err4 := keychains.keychainWrong.Decrypt(cipherData, true, false) - r.Equal(t, []byte{}, decryptedData3) - r.EqualError(t, err4, "no identity matched any of the recipients") } -func TestEncryptAndDecryptObfuscated(t *testing.T) { - keychains := keychainInit(t) +/* + func TestEncryptAndDecryptCompress(t *testing.T) { + keychains := keychainInit(t) - cipherData, err := keychains.keychain.Encrypt(plainData, false, true) - r.NoError(t, err, "Encryption without error") - t.Logf("Original size:%d Processed size: %d", len(plainData), len(cipherData)) + cipherData, err := keychains.keychain.Encrypt(plainData, true, false) + r.NoError(t, err, "Encryption without error") + t.Logf("Original size:%d Processed size: %d", len(plainData), len(cipherData)) - decryptedData, err2 := keychains.keychain.Decrypt(cipherData, false, true) - r.NoError(t, err2, "Decryption without error") - r.Equal(t, plainData, decryptedData, "Decrypted data is equal with the plaintext data by the same keychain") + decryptedData, err2 := keychains.keychain.Decrypt(cipherData, true, false) + r.NoError(t, err2, "Decryption without error") + r.Equal(t, plainData, decryptedData, "Decrypted data is equal with the plaintext data by the same keychain") - decryptedData2, err3 := keychains.keychain2.Decrypt(cipherData, false, true) - r.NoError(t, err3, "Decryption two without error") - r.Equal(t, plainData, decryptedData2, "Decrypted data is equal with the plaintext data by different valid keychain") + decryptedData2, err3 := keychains.keychain2.Decrypt(cipherData, true, false) + r.NoError(t, err3, "Decryption two without error") + r.Equal(t, plainData, decryptedData2, "Decrypted data is equal with the plaintext data by different valid keychain") - decryptedData3, err4 := keychains.keychainWrong.Decrypt(cipherData, false, true) - r.Equal(t, []byte{}, decryptedData3) - r.EqualError(t, err4, "no identity matched any of the recipients") -} + decryptedData3, err4 := keychains.keychainWrong.Decrypt(cipherData, true, false) + r.Equal(t, []byte{}, decryptedData3) + r.EqualError(t, err4, "no identity matched any of the recipients") + } -func TestEncryptAndDecryptBigFile(t *testing.T) { - keychains := keychainInit(t) + func TestEncryptAndDecryptObfuscated(t *testing.T) { + keychains := keychainInit(t) - plainText, err := generic.CSPRNG(10485760) - r.NoError(t, err, "Encryption without error") - cipherData, err := keychains.keychain.Encrypt(plainText, false, true) - r.NoError(t, err, "Encryption without error") - t.Logf("Original size:%d Processed size: %d", len(plainText), len(cipherData)) + cipherData, err := keychains.keychain.Encrypt(plainData, false, true) + r.NoError(t, err, "Encryption without error") + t.Logf("Original size:%d Processed size: %d", len(plainData), len(cipherData)) - decryptedData, err2 := keychains.keychain.Decrypt(cipherData, false, true) - r.NoError(t, err2, "Decryption without error") - r.Equal(t, plainText, decryptedData, "Decrypted data is equal with the plaintext data by the same keychain") + decryptedData, err2 := keychains.keychain.Decrypt(cipherData, false, true) + r.NoError(t, err2, "Decryption without error") + r.Equal(t, plainData, decryptedData, "Decrypted data is equal with the plaintext data by the same keychain") - decryptedData2, err3 := keychains.keychain2.Decrypt(cipherData, false, true) - r.NoError(t, err3, "Decryption two without error") - r.Equal(t, plainText, decryptedData2, "Decrypted data is equal with the plaintext data by different valid keychain") + decryptedData2, err3 := keychains.keychain2.Decrypt(cipherData, false, true) + r.NoError(t, err3, "Decryption two without error") + r.Equal(t, plainData, decryptedData2, "Decrypted data is equal with the plaintext data by different valid keychain") - decryptedData3, err4 := keychains.keychainWrong.Decrypt(cipherData, false, true) - r.Equal(t, []byte{}, decryptedData3) - r.EqualError(t, err4, "no identity matched any of the recipients") -} + decryptedData3, err4 := keychains.keychainWrong.Decrypt(cipherData, false, true) + r.Equal(t, []byte{}, decryptedData3) + r.EqualError(t, err4, "no identity matched any of the recipients") + } -func TestEncryptAndDecryptCompressAndObfuscated(t *testing.T) { - keychains := keychainInit(t) + func TestEncryptAndDecryptBigFile(t *testing.T) { + keychains := keychainInit(t) - cipherData, err := keychains.keychain.Encrypt(plainData, true, true) - r.NoError(t, err, "Encryption without error") - t.Logf("Size:%d", len(cipherData)) + plainText, err := generic.CSPRNG(10485760) + r.NoError(t, err, "Encryption without error") + cipherData, err := keychains.keychain.Encrypt(plainText, false, true) + r.NoError(t, err, "Encryption without error") + t.Logf("Original size:%d Processed size: %d", len(plainText), len(cipherData)) - decryptedData, err2 := keychains.keychain.Decrypt(cipherData, true, true) - r.NoError(t, err2, "Decryption without error") - r.Equal(t, plainData, decryptedData, "Decrypted data is equal with the plaintext data by the same keychain") + decryptedData, err2 := keychains.keychain.Decrypt(cipherData, false, true) + r.NoError(t, err2, "Decryption without error") + r.Equal(t, plainText, decryptedData, "Decrypted data is equal with the plaintext data by the same keychain") - decryptedData2, err3 := keychains.keychain2.Decrypt(cipherData, true, true) - r.NoError(t, err3, "Decryption two without error") - r.Equal(t, plainData, decryptedData2, "Decrypted data is equal with the plaintext data by different valid keychain") + decryptedData2, err3 := keychains.keychain2.Decrypt(cipherData, false, true) + r.NoError(t, err3, "Decryption two without error") + r.Equal(t, plainText, decryptedData2, "Decrypted data is equal with the plaintext data by different valid keychain") - decryptedData3, err4 := keychains.keychainWrong.Decrypt(cipherData, true, true) - r.Equal(t, []byte{}, decryptedData3) - r.EqualError(t, err4, "no identity matched any of the recipients") -} + decryptedData3, err4 := keychains.keychainWrong.Decrypt(cipherData, false, true) + r.Equal(t, []byte{}, decryptedData3) + r.EqualError(t, err4, "no identity matched any of the recipients") + } -func TestEncryptWithPwd(t *testing.T) { - key, err := generic.CSPRNG(32) - r.NoError(t, err, "CSPRNG without error") + func TestEncryptAndDecryptCompressAndObfuscated(t *testing.T) { + keychains := keychainInit(t) - cipherData, err := aged.EncryptWithPwd(string(key), plainData, true, true) - r.NoError(t, err, "Encryption without error") - t.Logf("Size: %d", len(cipherData)) + cipherData, err := keychains.keychain.Encrypt(plainData, true, true) + r.NoError(t, err, "Encryption without error") + t.Logf("Size:%d", len(cipherData)) - decryptedData, err := aged.DecryptWithPwd(string(key), cipherData, true, true) - r.NoError(t, err, "Decryption without error") - r.Equal(t, plainData, decryptedData) -} + decryptedData, err2 := keychains.keychain.Decrypt(cipherData, true, true) + r.NoError(t, err2, "Decryption without error") + r.Equal(t, plainData, decryptedData, "Decrypted data is equal with the plaintext data by the same keychain") + + decryptedData2, err3 := keychains.keychain2.Decrypt(cipherData, true, true) + r.NoError(t, err3, "Decryption two without error") + r.Equal(t, plainData, decryptedData2, "Decrypted data is equal with the plaintext data by different valid keychain") + + decryptedData3, err4 := keychains.keychainWrong.Decrypt(cipherData, true, true) + r.Equal(t, []byte{}, decryptedData3) + r.EqualError(t, err4, "no identity matched any of the recipients") + } + func TestEncryptWithPwd(t *testing.T) { + key, err := generic.CSPRNG(32) + r.NoError(t, err, "CSPRNG without error") + + cipherData, err := aged.EncryptWithPwd(string(key), plainData, true, true) + r.NoError(t, err, "Encryption without error") + t.Logf("Size: %d", len(cipherData)) + + decryptedData, err := aged.DecryptWithPwd(string(key), cipherData, true, true) + r.NoError(t, err, "Decryption without error") + r.Equal(t, plainData, decryptedData) + } +*/ func TestGenKeypair(t *testing.T) { _, err := aged.GenKeypair() r.NoError(t, err) } +func TestKeychainImportExport(t *testing.T) { + keychain := keychainInit(t) + + s := aged.KeychainSetup{ + SecretKey: keychain.keychain.KeychainExportSecretKey(), + PublicKeys: keychain.keychain.KeychainExport(), + SelfRecipient: true, + } + + t.Log("Public Keys: ", s.PublicKeys) + t.Log("Secret Key: ", s.SecretKey) + + keychainExpected, err := aged.SetupKeychain(s) + r.NoError(t, err) + + r.Equal(t, keychain.keychain.KeychainExportSecretKey(), keychainExpected.KeychainExportSecretKey()) + r.Equal(t, keychain.keychain.KeychainExport(), keychainExpected.KeychainExport()) +} + func TestKeychain(t *testing.T) { identity, err := aged.GenKeypair() r.NoError(t, err) diff --git a/aged/obf.go b/aged/obf.go index faf1b34..4052f96 100644 --- a/aged/obf.go +++ b/aged/obf.go @@ -6,6 +6,13 @@ import ( "errors" ) +type Obfuscation interface { + Obfuscate([]byte) ([]byte, error) + Deobfuscate([]byte) ([]byte, error) +} + +type AgeV1Obf struct{} + var ( //nolint:gochecknoglobals endOfHeader = []byte{45, 45, 45, 32} @@ -16,7 +23,8 @@ var ( //nolint:gochecknoglobals const lengthOfKey = 47 -func ObfHeader(payload []byte) ([]byte, error) { +func (a *AgeV1Obf) Obfuscate(payload []byte) ([]byte, error) { + headerIndex := bytes.Index(payload, endOfHeader) if headerIndex == -1 { return []byte{}, errors.New("missing end flag") @@ -37,7 +45,7 @@ func ObfHeader(payload []byte) ([]byte, error) { return bytes.ReplaceAll(payload, header, obfHeader), nil } -func DeobfHeader(payload []byte) ([]byte, error) { +func (a *AgeV1Obf) Deobfuscate(payload []byte) ([]byte, error) { headerIndex := bytes.Index(payload, endFlag) if headerIndex == -1 { return []byte{}, errors.New("missing end flag") diff --git a/aged/obf_test.go b/aged/obf_test.go index 4499134..46e25d0 100644 --- a/aged/obf_test.go +++ b/aged/obf_test.go @@ -5,6 +5,8 @@ import ( "testing" "github.com/D3vl0per/crypt/aged" + "github.com/D3vl0per/crypt/compression" + "github.com/D3vl0per/crypt/generic" a "github.com/stretchr/testify/assert" r "github.com/stretchr/testify/require" ) @@ -14,26 +16,48 @@ func TestObf(t *testing.T) { r.NoError(t, err) obfKeypair2, err := aged.GenKeypair() r.NoError(t, err) - obfKeychain, err := aged.SetupKeychain(obfKeypair1.String(), []string{obfKeypair2.Recipient().String()}) + obfuscator := aged.AgeV1Obf{} + + obfKeychain, err := aged.SetupKeychain(aged.KeychainSetup{ + SecretKey: obfKeypair1.String(), + PublicKeys: []string{obfKeypair2.Recipient().String()}, + SelfRecipient: true, + }) + r.NoError(t, err) + + obfTestData, err := generic.CSPRNG(128) r.NoError(t, err) - obfTestString := []byte("Testing") - obfEncrypted, err := obfKeychain.Encrypt(obfTestString, false, false) + obfEncrypted, err := obfKeychain.Encrypt(aged.Parameters{ + Data: obfTestData, + Compress: true, + Obfuscation: false, + Obfuscator: &aged.AgeV1Obf{}, + }) + r.NoError(t, err) - a.True(t, bytes.Contains(obfEncrypted, []byte("age-encryption.org/"))) + a.True(t, bytes.Contains(obfEncrypted, []byte("age-encryption.org/v1"))) - obfEncryptedObf, err := aged.ObfHeader(obfEncrypted) + obfEncryptedObf, err := obfuscator.Obfuscate(obfEncrypted) r.NoError(t, err) - a.False(t, bytes.Contains(obfEncryptedObf, []byte("age-encryption.org/"))) + a.False(t, bytes.Contains(obfEncryptedObf, []byte("age-encryption.org/v1"))) - obfEncryptedDeObf, err := aged.DeobfHeader(obfEncryptedObf) + obfEncryptedDeObf, err := obfuscator.Deobfuscate(obfEncryptedObf) r.NoError(t, err) - a.True(t, bytes.Contains(obfEncryptedDeObf, []byte("age-encryption.org/"))) + a.True(t, bytes.Contains(obfEncryptedDeObf, []byte("age-encryption.org/v1"))) r.Equal(t, obfEncryptedDeObf, obfEncrypted) - decrypted, err := obfKeychain.Decrypt(obfEncrypted, false, false) + decrypted, err := obfKeychain.Decrypt(aged.Parameters{ + Data: obfEncrypted, + Compressor: &compression.Zstd{ + Level: 11, + }, + Compress: true, + Obfuscation: true, + Obfuscator: &aged.AgeV1Obf{}, + }) r.NoError(t, err) - r.Equal(t, obfTestString, decrypted) + r.Equal(t, obfTestData, decrypted) } diff --git a/aged/stream.go b/aged/stream.go index 9658128..c9bde2c 100644 --- a/aged/stream.go +++ b/aged/stream.go @@ -6,13 +6,11 @@ package aged import ( "crypto/cipher" - "crypto/sha256" "errors" "fmt" "io" "golang.org/x/crypto/chacha20poly1305" - "golang.org/x/crypto/hkdf" // nolint:staticcheck "golang.org/x/crypto/poly1305" @@ -240,12 +238,3 @@ func (w *Writer) flushChunk(last bool) error { incNonce(&w.nonce) return err } - -func StreamKey(fileKey, nonce []byte) []byte { - h := hkdf.New(sha256.New, fileKey, nonce, []byte("payload")) - streamKey := make([]byte, chacha20poly1305.KeySize) - if _, err := io.ReadFull(h, streamKey); err != nil { - panic(err) - } - return streamKey -} diff --git a/asymmetric/asymmetric.go b/asymmetric/asymmetric.go index 2d86a74..22ea896 100644 --- a/asymmetric/asymmetric.go +++ b/asymmetric/asymmetric.go @@ -1,6 +1,7 @@ -package crypt +package asymmetric import ( + "crypto" "crypto/ed25519" "encoding/hex" "errors" @@ -14,62 +15,112 @@ import ( /// Ed25519 Suite /// -func GenerateEd25519Keypair() (ed25519.PublicKey, ed25519.PrivateKey, error) { - pk, sk, err := ed25519.GenerateKey(generic.Rand()) +type Encryption interface { + Encrypt() + Decrypt() +} + +type Signing interface { + Generate() error + GenerateFromSeed([]byte) error + Sign([]byte) string + Verify([]byte, []byte) string +} + +type Ed25519 struct { + SecretKey ed25519.PrivateKey + PublicKey ed25519.PublicKey +} +type Ed448 struct { + SecretKey ed448.PrivateKey + PublicKey ed448.PublicKey + Context string +} + +func (e *Ed25519) Generate() error { + var err error + e.PublicKey, e.SecretKey, err = ed25519.GenerateKey(generic.Rand()) if err != nil { - return ed25519.PublicKey{}, ed25519.PrivateKey{}, err + return err } - return pk, sk, err + + return nil } -func GenerateEd25519KeypairFromSeed(seed []byte) (ed25519.PrivateKey, error) { +func (e *Ed25519) GenerateFromSeed(seed []byte) error { if l := len(seed); l != ed25519.SeedSize { - return nil, errors.New(generic.StrCnct([]string{"seed size must be ", strconv.Itoa(ed25519.SeedSize), " bytes long"}...)) + return errors.New(generic.StrCnct([]string{"seed size must be ", strconv.Itoa(ed25519.SeedSize), " bytes long"}...)) } - return ed25519.NewKeyFromSeed(seed), nil + var err error + e.SecretKey = ed25519.NewKeyFromSeed(seed) + e.PublicKey, err = Ed25519ToPublicKey(e.SecretKey.Public()) + return err } -func SignEd25519(sk ed25519.PrivateKey, msg []byte) string { - return hex.EncodeToString(ed25519.Sign(sk, msg)) +func (e *Ed25519) Sign(msg []byte) string { + return hex.EncodeToString(ed25519.Sign(e.SecretKey, msg)) } -func VerifyEd25519(pk ed25519.PublicKey, msg []byte, sig string) (bool, error) { +func (e *Ed25519) Verify(msg []byte, sig string) (bool, error) { + sig_raw, err := hex.DecodeString(sig) if err != nil { return false, err } - return ed25519.Verify(pk, msg, sig_raw), nil + return ed25519.Verify(e.PublicKey, msg, sig_raw), nil } /// /// ED448 Suite /// -func GenerateEd448Keypair() (ed448.PublicKey, ed448.PrivateKey, error) { - pk, sk, err := ed448.GenerateKey(generic.Rand()) +func (e *Ed448) Generate() error { + var err error + e.PublicKey, e.SecretKey, err = ed448.GenerateKey(generic.Rand()) if err != nil { - return ed448.PublicKey{}, ed448.PrivateKey{}, err + return err } - return pk, sk, err + return nil } -func GenerateEd448KeypairFromSeed(seed []byte) (ed448.PrivateKey, error) { +func (e *Ed448) GenerateFromSeed(seed []byte) error { if l := len(seed); l != ed448.SeedSize { - return nil, errors.New(generic.StrCnct([]string{"seed size must be ", strconv.Itoa(ed448.SeedSize), " bytes long"}...)) + return errors.New(generic.StrCnct([]string{"seed size must be ", strconv.Itoa(ed448.SeedSize), " bytes long"}...)) } - return ed448.NewKeyFromSeed(seed), nil + var err error + e.SecretKey = ed448.NewKeyFromSeed(seed) + e.PublicKey, err = Ed448ToPublicKey(e.SecretKey.Public()) + return err } -func SignEd448(sk ed25519.PrivateKey, msg []byte) string { - return hex.EncodeToString(ed25519.Sign(sk, msg)) +func (e *Ed448) Sign(msg []byte) string { + return hex.EncodeToString(ed448.Sign(e.SecretKey, msg, e.Context)) } -func VerifyEd448(pk ed448.PublicKey, msg []byte, sig string) (bool, error) { +func (e *Ed448) Verify(msg []byte, sig string) (bool, error) { sig_raw, err := hex.DecodeString(sig) if err != nil { return false, err } - return ed448.Verify(pk, msg, sig_raw, ""), nil + return ed448.Verify(e.PublicKey, msg, sig_raw, e.Context), nil +} + +func Ed25519ToPublicKey(pub crypto.PublicKey) (ed25519.PublicKey, error) { + switch pub := pub.(type) { + case ed25519.PublicKey: + return pub, nil + default: + return nil, errors.New("public key type") + } +} + +func Ed448ToPublicKey(pub crypto.PublicKey) (ed448.PublicKey, error) { + switch pub := pub.(type) { + case ed448.PublicKey: + return pub, nil + default: + return nil, errors.New("public key type") + } } diff --git a/asymmetric/asymmetric_test.go b/asymmetric/asymmetric_test.go index f9f8a84..426a7d1 100644 --- a/asymmetric/asymmetric_test.go +++ b/asymmetric/asymmetric_test.go @@ -1,25 +1,27 @@ -package crypt +package asymmetric_test import ( "encoding/hex" "testing" + "github.com/D3vl0per/crypt/asymmetric" "github.com/D3vl0per/crypt/generic" a "github.com/stretchr/testify/assert" r "github.com/stretchr/testify/require" ) func TestGenerateEd25519Keypair(t *testing.T) { - pk, sk, err := GenerateEd25519Keypair() + asym := asymmetric.Ed25519{} + err := asym.Generate() r.NoError(t, err) - a.Len(t, pk, 32, "generated key size not match") - a.Len(t, sk, 64, "generated key size not match") - r.NotEqual(t, pk, sk, "public and secret key are equal") + a.Len(t, asym.PublicKey, 32, "generated key size not match") + a.Len(t, asym.SecretKey, 64, "generated key size not match") + r.NotEqual(t, asym.PublicKey, asym.SecretKey, "public and secret key are equal") - t.Log("Ed25519 Secret Key:", sk) - t.Log("Ed25519 Secret Key Hex:", hex.EncodeToString(sk)) - t.Log("Ed25519 Public Key:", pk) - t.Log("Ed25519 Public Key Hex:", hex.EncodeToString(pk)) + t.Log("Ed25519 Secret Key:", asym.SecretKey) + t.Log("Ed25519 Secret Key Hex:", hex.EncodeToString(asym.SecretKey)) + t.Log("Ed25519 Public Key:", asym.PublicKey) + t.Log("Ed25519 Public Key Hex:", hex.EncodeToString(asym.PublicKey)) } // Deterministic key generation check. @@ -27,25 +29,32 @@ func TestGenerateEd25519KeypairFromSeed(t *testing.T) { rng, err := generic.CSPRNG(32) r.NoError(t, err) - sk, err := GenerateEd25519KeypairFromSeed(rng) + asym := asymmetric.Ed25519{} + + err = asym.GenerateFromSeed(rng) r.NoError(t, err) - expectedSk, err := GenerateEd25519KeypairFromSeed(rng) + asym2 := asymmetric.Ed25519{} + + err = asym2.GenerateFromSeed(rng) r.NoError(t, err) - r.Equal(t, expectedSk, sk) + r.Equal(t, asym2.SecretKey, asym.SecretKey) + r.Equal(t, asym2.PublicKey, asym.PublicKey) } func TestE2EEEd25519SignVerify(t *testing.T) { msg := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit.") - pk, sk, err := GenerateEd25519Keypair() + asym := asymmetric.Ed25519{} + + err := asym.Generate() r.NoError(t, err) - signature := SignEd25519(sk, msg) + signature := asym.Sign(msg) r.NotEmpty(t, signature) - isValid, err := VerifyEd25519(pk, msg, signature) + isValid, err := asym.Verify(msg, signature) r.NoError(t, err) r.True(t, isValid) } @@ -78,16 +87,17 @@ func TestKeyWrapping(t *testing.T) { */ func TestGenerateEd448Keypair(t *testing.T) { - pk, sk, err := GenerateEd448Keypair() + asym := asymmetric.Ed448{} + err := asym.Generate() r.NoError(t, err) - a.Len(t, pk, 57, "generated key size not match") - a.Len(t, sk, 114, "generated key size not match") - a.NotEqual(t, pk, sk, "public and secret key are equal") + a.Len(t, asym.PublicKey, 57, "generated key size not match") + a.Len(t, asym.SecretKey, 114, "generated key size not match") + a.NotEqual(t, asym.PublicKey, asym.SecretKey, "public and secret key are equal") - t.Log("Ed448 Secret Key:", sk) - t.Log("Ed448 Secret Key Hex:", hex.EncodeToString(sk)) - t.Log("Ed448 Public Key:", pk) - t.Log("Ed448 Public Key Hex:", hex.EncodeToString(pk)) + t.Log("Ed448 Secret Key:", asym.SecretKey) + t.Log("Ed448 Secret Key Hex:", hex.EncodeToString(asym.SecretKey)) + t.Log("Ed448 Public Key:", asym.PublicKey) + t.Log("Ed448 Public Key Hex:", hex.EncodeToString(asym.PublicKey)) } // Deterministic generation check. @@ -95,25 +105,34 @@ func TestGenerateEd448KeypairFromSeed(t *testing.T) { rng, err := generic.CSPRNG(57) r.NoError(t, err) - sk, err := GenerateEd448KeypairFromSeed(rng) + asym := asymmetric.Ed448{} + + err = asym.GenerateFromSeed(rng) r.NoError(t, err) - expectedSk, err := GenerateEd448KeypairFromSeed(rng) + asym2 := asymmetric.Ed448{} + + err = asym2.GenerateFromSeed(rng) r.NoError(t, err) - r.Equal(t, expectedSk, sk) + r.Equal(t, asym2.SecretKey, asym.SecretKey) + r.Equal(t, asym2.PublicKey, asym.PublicKey) } func TestGenerateEd448KeypairFromSeedWithWrongSeedSize(t *testing.T) { rng, err := generic.CSPRNG(32) r.NoError(t, err) - _, err = GenerateEd448KeypairFromSeed(rng) + asym := asymmetric.Ed448{} + + err = asym.GenerateFromSeed(rng) r.EqualError(t, err, "seed size must be 57 bytes long") rng, err = generic.CSPRNG(64) r.NoError(t, err) - _, err = GenerateEd448KeypairFromSeed(rng) + asym2 := asymmetric.Ed448{} + + err = asym2.GenerateFromSeed(rng) r.EqualError(t, err, "seed size must be 57 bytes long") } diff --git a/compression/compression.go b/compression/compression.go index c688c86..52906eb 100644 --- a/compression/compression.go +++ b/compression/compression.go @@ -2,47 +2,89 @@ package compression import ( "bytes" - "compress/gzip" "io" + "github.com/klauspost/compress/flate" + "github.com/klauspost/compress/gzip" + "github.com/klauspost/compress/zlib" "github.com/klauspost/compress/zstd" ) -func GzipCompress(src []byte, level int) ([]byte, error) { - var buff bytes.Buffer - gzip, err := gzip.NewWriterLevel(&buff, level) +type Compressor interface { + Compress([]byte) ([]byte, error) + Decompress([]byte) ([]byte, error) + CompressStream(io.Reader, io.Writer) error + DecompressStream(io.Reader, io.Writer) error + GetLevel() int +} + +type Gzip struct { + Level int +} + +func (g *Gzip) Compress(in []byte) ([]byte, error) { + reader := bytes.NewReader(in) + var compressedBuff bytes.Buffer + + err := g.CompressStream(reader, &compressedBuff) if err != nil { - return []byte{}, err - } - if _, err := gzip.Write(src); err != nil { - return []byte{}, err + return []byte{}, nil } - if err := gzip.Close(); err != nil { - return []byte{}, err + + return compressedBuff.Bytes(), nil +} + +func (g *Gzip) CompressStream(in io.Reader, out io.Writer) error { + + enc, err := gzip.NewWriterLevel(out, g.Level) + if err != nil { + return err } - return buff.Bytes(), nil + _, err = io.Copy(enc, in) + if err != nil { + _ = enc.Close() + return err + } + return enc.Close() } -func GzipDecompress(src []byte) ([]byte, error) { - rdata := bytes.NewReader(src) - gzip, err := gzip.NewReader(rdata) +func (g *Gzip) Decompress(in []byte) ([]byte, error) { + reader := bytes.NewReader(in) + var deCompressedBuff bytes.Buffer + + err := g.DecompressStream(reader, &deCompressedBuff) if err != nil { - return []byte{}, err + return []byte{}, nil } - raw, err := io.ReadAll(gzip) + + return deCompressedBuff.Bytes(), nil +} + +func (g *Gzip) DecompressStream(in io.Reader, out io.Writer) error { + d, err := gzip.NewReader(in) if err != nil { - return []byte{}, err + d.Close() + return err } + defer d.Close() + _, err = io.Copy(out, d) + return err +} - return raw, nil +func (g *Gzip) GetLevel() int { + return g.Level } -func ZstdCompress(raw []byte, options ...zstd.EOption) ([]byte, error) { - reader := bytes.NewReader(raw) +type Zstd struct { + Level int +} + +func (z *Zstd) Compress(in []byte) ([]byte, error) { + reader := bytes.NewReader(in) var compressedBuff bytes.Buffer - err := ZstdCompressStream(reader, &compressedBuff, options...) + err := z.CompressStream(reader, &compressedBuff) if err != nil { return []byte{}, nil } @@ -50,8 +92,9 @@ func ZstdCompress(raw []byte, options ...zstd.EOption) ([]byte, error) { return compressedBuff.Bytes(), nil } -func ZstdCompressStream(in io.Reader, out io.Writer, options ...zstd.EOption) error { - enc, err := zstd.NewWriter(out, options...) +func (z *Zstd) CompressStream(in io.Reader, out io.Writer) error { + + enc, err := zstd.NewWriter(out, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(z.Level))) if err != nil { return err } @@ -63,11 +106,11 @@ func ZstdCompressStream(in io.Reader, out io.Writer, options ...zstd.EOption) er return enc.Close() } -func ZstdDecompress(compressed []byte) ([]byte, error) { - reader := bytes.NewReader(compressed) +func (z *Zstd) Decompress(in []byte) ([]byte, error) { + reader := bytes.NewReader(in) var deCompressedBuff bytes.Buffer - err := ZstdDecompressStream(reader, &deCompressedBuff) + err := z.DecompressStream(reader, &deCompressedBuff) if err != nil { return []byte{}, nil } @@ -75,7 +118,7 @@ func ZstdDecompress(compressed []byte) ([]byte, error) { return deCompressedBuff.Bytes(), nil } -func ZstdDecompressStream(in io.Reader, out io.Writer) error { +func (z *Zstd) DecompressStream(in io.Reader, out io.Writer) error { d, err := zstd.NewReader(in) if err != nil { d.Close() @@ -85,3 +128,117 @@ func ZstdDecompressStream(in io.Reader, out io.Writer) error { _, err = io.Copy(out, d) return err } + +func (z *Zstd) GetLevel() int { + return z.Level +} + +type Flate struct { + Level int +} + +func (f *Flate) Compress(in []byte) ([]byte, error) { + reader := bytes.NewReader(in) + var compressedBuff bytes.Buffer + + err := f.CompressStream(reader, &compressedBuff) + if err != nil { + return []byte{}, nil + } + + return compressedBuff.Bytes(), nil +} + +func (f *Flate) CompressStream(in io.Reader, out io.Writer) error { + + enc, err := flate.NewWriter(out, f.Level) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + _ = enc.Close() + return err + } + return enc.Close() +} + +func (f *Flate) Decompress(in []byte) ([]byte, error) { + reader := bytes.NewReader(in) + var deCompressedBuff bytes.Buffer + + err := f.DecompressStream(reader, &deCompressedBuff) + if err != nil { + return []byte{}, nil + } + + return deCompressedBuff.Bytes(), nil +} + +func (f *Flate) DecompressStream(in io.Reader, out io.Writer) error { + d := flate.NewReader(in) + defer d.Close() + _, err := io.Copy(out, d) + return err +} + +func (f *Flate) GetLevel() int { + return f.Level +} + +type Zlib struct { + Level int +} + +func (zl *Zlib) Compress(in []byte) ([]byte, error) { + reader := bytes.NewReader(in) + var compressedBuff bytes.Buffer + + err := zl.CompressStream(reader, &compressedBuff) + if err != nil { + return []byte{}, nil + } + + return compressedBuff.Bytes(), nil +} + +func (zl *Zlib) CompressStream(in io.Reader, out io.Writer) error { + + enc, err := zlib.NewWriterLevel(out, zl.Level) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + _ = enc.Close() + return err + } + return enc.Close() +} + +func (zl *Zlib) Decompress(in []byte) ([]byte, error) { + reader := bytes.NewReader(in) + var deCompressedBuff bytes.Buffer + + err := zl.DecompressStream(reader, &deCompressedBuff) + if err != nil { + return []byte{}, nil + } + + return deCompressedBuff.Bytes(), nil +} + +func (zl *Zlib) DecompressStream(in io.Reader, out io.Writer) error { + d, err := zlib.NewReader(in) + if err != nil { + d.Close() + return err + } + defer d.Close() + _, err = io.Copy(out, d) + return err +} + +func (zl *Zlib) GetLevel() int { + return zl.Level +} diff --git a/compression/compression_test.go b/compression/compression_test.go index 7ade7e1..6bf595c 100644 --- a/compression/compression_test.go +++ b/compression/compression_test.go @@ -6,96 +6,89 @@ import ( "github.com/D3vl0per/crypt/compression" "github.com/D3vl0per/crypt/generic" - "github.com/klauspost/compress/zstd" r "github.com/stretchr/testify/require" ) -func TestGzipCompress(t *testing.T) { - data := []byte("PSGIeAYZuvDa2QScJkAI1S824E0fA8M2aAYH3SdMd9mWlETmDIgfbexxT5nwygIDIHFp5A92V6Ke4Sl7FwsOU5ox7IIhReltbLONZutz0EbnN3TiquWz3QJjNlo0HJ1t") +func TestGzipCompressRoundTrip(t *testing.T) { + data, err := generic.CSPRNG(256) + r.NoError(t, err) + for i := 0; i <= 9; i++ { - cmp, err := compression.GzipCompress(data, i) + gzip := compression.Gzip{ + Level: i, + } + cmp, err := gzip.Compress(data) r.NoError(t, err) - dcmp, err := compression.GzipDecompress(cmp) + dcmp, err := gzip.Decompress(cmp) r.NoError(t, err) r.Equal(t, data, dcmp) } } -func TestGzipWrongLevel(t *testing.T) { - data := []byte("PSGIeAYZuvDa2QScJkAI1S824E0fA8M2aAYH3SdMd9mWlETmDIgfbexxT5nwygIDIHFp5A92V6Ke4Sl7FwsOU5ox7IIhReltbLONZutz0EbnN3TiquWz3QJjNlo0HJ1t") - _, err := compression.GzipCompress(data, 10) - r.EqualError(t, err, "gzip: invalid compression level: 10") -} +func TestRoundTrips(t *testing.T) { + genericModes := []int{9, 1, 0, -1, -2} + zstdModes := []int{11, 7, 3, 1} -func TestZstdEndToEnd(t *testing.T) { - modes := []int{11, 7, 3, 1} test := map[int][]byte{ 0: []byte("PSGIeAYZuvDa2QScJkAI1S824E0fA8M2aAYH3SdMd9mWlETmDIgfbexxT5nwygIDIHFp5A92V6Ke4Sl7FwsOU5ox7IIhReltbLONZutz0EbnN3TiquWz3QJjNlo0HJ1t"), 1: []byte("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), 2: []byte("10101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010"), } - for datai, data := range test { - for _, elem := range modes { - - // Compression with ZstdCompress function - compressed, err := compression.ZstdCompress(data, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(elem))) - r.NoError(t, err) - - var compressedBuff bytes.Buffer - var decompressedBuff bytes.Buffer - reader := bytes.NewReader(data) - - // Compression with ZstdCompressStream function - err = compression.ZstdCompressStream(reader, &compressedBuff, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(elem))) - r.NoError(t, err) + for _, data := range test { + for _, level := range zstdModes { + testRoundTrip(t, &compression.Zstd{Level: level}, data) + } + } - // Compression cross-check (ZstdCompress and ZstdCompressStream) - r.Equal(t, compressed, compressedBuff.Bytes()) + for _, data := range test { + for _, level := range genericModes { + testRoundTrip(t, &compression.Flate{Level: level}, data) + testRoundTrip(t, &compression.Zlib{Level: level}, data) + testRoundTrip(t, &compression.Gzip{Level: level}, data) + } + } +} - t.Log("Data sample: ", datai) - t.Log("Orignal size: ", len(data)) - t.Log("Compressed size: ", compressedBuff.Len()) - t.Log("Compression mode: ", elem) - t.Log("---") - compressedReader := bytes.NewReader(compressedBuff.Bytes()) +func testRoundTrip(t *testing.T, compressor compression.Compressor, data []byte) { - // Decompress with ZstdDecompress function - decompressed, err := compression.ZstdDecompress(compressed) - r.NoError(t, err) + // Compression with ZstdCompress function + compressed, err := compressor.Compress(data) + r.NoError(t, err) - // Decompress with ZstdStream function - err = compression.ZstdDecompressStream(compressedReader, &decompressedBuff) - r.NoError(t, err) + var compressedBuff bytes.Buffer + var decompressedBuff bytes.Buffer + reader := bytes.NewReader(data) - // Decompression cross-check (ZstdCompress and ZstdCompressStream) - r.Equal(t, decompressed, decompressedBuff.Bytes()) + // Compression with ZstdCompressStream function + err = compressor.CompressStream(reader, &compressedBuff) + r.NoError(t, err) - r.Len(t, decompressedBuff.Bytes(), len(data)) - r.Equal(t, decompressedBuff.Bytes(), data) - } - t.Log("=============") - } -} + // Compression cross-check (ZstdCompress and ZstdCompressStream) + r.Equal(t, compressed, compressedBuff.Bytes()) -func TestZstdWrongLevel(t *testing.T) { - data := []byte("PSGIeAYZuvDa2QScJkAI1S824E0fA8M2aAYH3SdMd9mWlETmDIgfbexxT5nwygIDIHFp5A92V6Ke4Sl7FwsOU5ox7IIhReltbLONZutz0EbnN3TiquWz3QJjNlo0HJ1t") + t.Log("Data sample: ", data[:16]) + t.Log("Orignal size: ", len(data)) + t.Log("Compressed size: ", compressedBuff.Len()) + t.Log("Compression mode: ", compressor.GetLevel()) + t.Log("---") + compressedReader := bytes.NewReader(compressedBuff.Bytes()) - reader := bytes.NewReader(data) - var compressedBuff bytes.Buffer + // Decompress with ZstdDecompress function + decompressed, err := compressor.Decompress(compressed) + r.NoError(t, err) - err := compression.ZstdCompressStream(reader, &compressedBuff, zstd.WithEncoderLevel(12)) - r.EqualError(t, err, "unknown encoder level") -} + // Decompress with ZstdStream function + err = compressor.DecompressStream(compressedReader, &decompressedBuff) + r.NoError(t, err) -func TestZstdWrongConcurrency(t *testing.T) { - data := []byte("PSGIeAYZuvDa2QScJkAI1S824E0fA8M2aAYH3SdMd9mWlETmDIgfbexxT5nwygIDIHFp5A92V6Ke4Sl7FwsOU5ox7IIhReltbLONZutz0EbnN3TiquWz3QJjNlo0HJ1t") + // Decompression cross-check (ZstdCompress and ZstdCompressStream) + r.Equal(t, decompressed, decompressedBuff.Bytes()) - reader := bytes.NewReader(data) - var compressedBuff bytes.Buffer + r.Len(t, decompressedBuff.Bytes(), len(data)) + r.Equal(t, decompressedBuff.Bytes(), data) - err := compression.ZstdCompressStream(reader, &compressedBuff, zstd.WithEncoderConcurrency(-1)) - r.EqualError(t, err, "concurrency must be at least 1") + t.Log("=============") } func TestZstdWrongDecompressData(t *testing.T) { @@ -106,6 +99,10 @@ func TestZstdWrongDecompressData(t *testing.T) { reader := bytes.NewReader(data) var compressedBuff bytes.Buffer - err = compression.ZstdDecompressStream(reader, &compressedBuff) + compressor := compression.Zstd{ + Level: 11, + } + + err = compressor.DecompressStream(reader, &compressedBuff) r.Error(t, err) } diff --git a/generic/csprng.go b/generic/csprng.go index 1b53206..4f3cc46 100644 --- a/generic/csprng.go +++ b/generic/csprng.go @@ -4,6 +4,7 @@ import ( "crypto/rand" "encoding/hex" "io" + "os" ) func CSPRNG(n int64) ([]byte, error) { @@ -19,6 +20,20 @@ func CSPRNGHex(n int64) (string, error) { return hex.EncodeToString(rnd), err } +func HWRng(n int64) ([]byte, error) { + file, err := os.Open("/dev/hwrng") + if err != nil { + return nil, err + } + defer file.Close() + + random := make([]byte, n) + if _, err = io.ReadFull(file, random); err != nil { + return nil, err + } + return random, nil +} + func Rand() io.Reader { return rand.Reader } diff --git a/generic/csprng_test.go b/generic/csprng_test.go index 609887d..9b1c0e1 100644 --- a/generic/csprng_test.go +++ b/generic/csprng_test.go @@ -31,6 +31,14 @@ func TestCSPRNGHex(t *testing.T) { } } +func TestHWRng(t *testing.T) { + length := 32 + rnd, err := generic.HWRng(int64(length)) + r.NoError(t, err) + r.Len(t, rnd, length) + t.Log(hex.EncodeToString(rnd)) +} + func TestRand(t *testing.T) { reader := generic.Rand() r.Equal(t, reflect.TypeOf(reader), reflect.TypeOf(rand.Reader)) diff --git a/generic/fs.go b/generic/fs.go index 1f2224f..a293c77 100644 --- a/generic/fs.go +++ b/generic/fs.go @@ -7,7 +7,7 @@ import ( "path/filepath" ) -func SecureDelete(targetPath string, cycle int) error { +func Delete(targetPath string, cycle int) error { if cycle == 0 { cycle = 3 } @@ -65,7 +65,7 @@ func SecureDelete(targetPath string, cycle int) error { return nil } -func SecureOverwrite(targetPath string, data []byte, cycle int) error { +func Overwrite(targetPath string, data []byte, cycle int) error { if cycle == 0 { cycle = 3 } diff --git a/generic/utils.go b/generic/utils.go index 08025fc..b5070b5 100644 --- a/generic/utils.go +++ b/generic/utils.go @@ -6,10 +6,35 @@ import ( "crypto/ed25519" "crypto/subtle" "crypto/x509" + "encoding/base64" "encoding/hex" "encoding/pem" ) +type Encoder interface { + Encode([]byte) string + Decode([]byte) ([]byte, error) +} + +type Base64 struct{} +type Hex struct{} + +func (b *Base64) Encode(data []byte) string { + return base64.RawStdEncoding.EncodeToString(data) +} + +func (b *Base64) Decode(data string) ([]byte, error) { + return base64.RawStdEncoding.DecodeString(data) +} + +func (h *Hex) Encode(data []byte) string { + return hex.EncodeToString(data) +} + +func (h *Hex) Decode(data string) ([]byte, error) { + return hex.DecodeString(data) +} + func Compare(x, y []byte) bool { return subtle.ConstantTimeCompare(x, y) == 1 } diff --git a/go.mod b/go.mod index 39a1bea..66cf823 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/cloudflare/circl v1.3.6 github.com/klauspost/compress v1.17.2 github.com/stretchr/testify v1.8.4 - golang.org/x/crypto v0.14.0 + golang.org/x/crypto v0.15.0 ) require ( diff --git a/go.sum b/go.sum index 0c4ba0a..6576747 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= +golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/hash/fs.go b/hash/fs.go index 247a791..b5f72d2 100644 --- a/hash/fs.go +++ b/hash/fs.go @@ -2,12 +2,12 @@ package hash import "github.com/D3vl0per/crypt/generic" -func ReadFileContentAndHash(path string) ([]byte, error) { +func ReadFileContentAndHash(algo Algorithms, path string) ([]byte, error) { content, err := generic.ReadFileContent(path) if err != nil { return []byte{}, err } - hash, err := Blake256(content) + hash, err := algo.Hash(content) if err != nil { return []byte{}, err } diff --git a/hash/hash.go b/hash/hash.go index 7ccddd6..942b401 100644 --- a/hash/hash.go +++ b/hash/hash.go @@ -1,65 +1,298 @@ package hash import ( - "encoding/hex" - "errors" + "hash" "github.com/D3vl0per/crypt/generic" "golang.org/x/crypto/blake2b" + "golang.org/x/crypto/sha3" ) -func Blake256(data []byte) ([]byte, error) { - h, err := blake2b.New256(nil) +type Algorithms interface { + Hash([]byte) ([]byte, error) + ValidateHash([]byte, []byte) (bool, error) + Hmac([]byte, []byte) ([]byte, error) + ValidateHmac([]byte, []byte, []byte) (bool, error) +} + +type Blake2b256 struct{} +type Blake2b384 struct{} +type Blake2b512 struct{} + +type Sha3256 struct{} +type Sha3384 struct{} +type Sha3512 struct{} + +type Shake128 struct{} +type Shake256 struct{} + +/// +/// Blake2b-256 +/// + +func (b *Blake2b256) Hash(data []byte) ([]byte, error) { + return hashBlake2b(blake2b.Size256, nil, data) +} + +func (b *Blake2b256) ValidateHash(plaintext, expectedHash []byte) (bool, error) { + hashed, err := hashBlake2b(blake2b.Size256, nil, plaintext) + if err != nil { + return false, err + } + + return generic.Compare(hashed, expectedHash), nil +} + +func (b *Blake2b256) Hmac(key, data []byte) ([]byte, error) { + return hashBlake2b(blake2b.Size256, key, data) +} + +func (b *Blake2b256) ValidateHmac(key, data, expectedHash []byte) (bool, error) { + hashed, err := hashBlake2b(blake2b.Size256, key, data) if err != nil { - return []byte{}, err + return false, err } - h.Write(data) - return h.Sum(nil), nil + return generic.Compare(hashed, expectedHash), nil +} + +/// +/// Blake2b-384 +/// + +func (b *Blake2b384) Hash(data []byte) ([]byte, error) { + return hashBlake2b(blake2b.Size384, nil, data) } -func Blake512(data []byte) ([]byte, error) { - h, err := blake2b.New512(nil) +func (b *Blake2b384) ValidateHash(plaintext, expectedHash []byte) (bool, error) { + hashed, err := hashBlake2b(blake2b.Size384, nil, plaintext) if err != nil { - return []byte{}, err + return false, err } - h.Write(data) - return h.Sum(nil), nil + return generic.Compare(hashed, expectedHash), nil +} + +func (b *Blake2b384) Hmac(key, data []byte) ([]byte, error) { + return hashBlake2b(blake2b.Size384, key, data) } -func HMACBase(key, data []byte) ([]byte, error) { - if len(key) <= 16 { - return []byte{}, errors.New("key length is unsecurely short") +func (b *Blake2b384) ValidateHmac(key, data, expectedHash []byte) (bool, error) { + hashed, err := hashBlake2b(blake2b.Size384, key, data) + if err != nil { + return false, err } - if generic.AllZero(key) { - return []byte{}, errors.New("key is all zero") + return generic.Compare(hashed, expectedHash), nil +} + +/// +/// Blake2b-512 +/// + +func (b *Blake2b512) Hash(data []byte) ([]byte, error) { + return hashBlake2b(blake2b.Size, nil, data) +} + +func (b *Blake2b512) ValidateHash(plaintext, expectedHash []byte) (bool, error) { + hashed, err := hashBlake2b(blake2b.Size, nil, plaintext) + if err != nil { + return false, err } - h, err := blake2b.New(64, key) + return generic.Compare(hashed, expectedHash), nil +} + +func (b *Blake2b512) Hmac(key, data []byte) ([]byte, error) { + return hashBlake2b(blake2b.Size, key, data) +} + +func (b *Blake2b512) ValidateHmac(key, data, expectedHash []byte) (bool, error) { + hashed, err := hashBlake2b(blake2b.Size, key, data) if err != nil { - return []byte{}, err + return false, err + } + + return generic.Compare(hashed, expectedHash), nil +} + +func hashBlake2b(size int, key, data []byte) ([]byte, error) { + var err error + var hash hash.Hash + + if key != nil { + hash, err = blake2b.New(size, key) + if err != nil { + return nil, err + } + } else { + hash, err = blake2b.New(size, nil) + if err != nil { + return nil, err + } } - h.Write(data) - return h.Sum(nil), nil + if _, err := hash.Write(data); err != nil { + return nil, err + } + return hash.Sum(nil), nil } -func HmacGen(key, data []byte) ([]byte, error) { - return HMACBase(key, data) +/// +/// SHA3-256 +/// + +func (s *Sha3256) Hash(data []byte) ([]byte, error) { + return hashSha3256(nil, data) } -func HmacVerify(key, data []byte, expected string) (bool, error) { - expextedHash, err := hex.DecodeString(expected) +func (s *Sha3256) ValidateHash(plaintext, expectedHash []byte) (bool, error) { + hashed, err := hashSha3256(nil, plaintext) if err != nil { return false, err } - hash, err := HMACBase(key, data) + return generic.Compare(hashed, expectedHash), nil +} + +func (s *Sha3256) Hmac(key, data []byte) ([]byte, error) { + return hashSha3256(key, data) +} + +func (s *Sha3256) ValidateHmac(key, data, expectedHash []byte) (bool, error) { + hashed, err := hashSha3256(key, data) if err != nil { return false, err } - return generic.Compare(hash, expextedHash), nil + return generic.Compare(hashed, expectedHash), nil +} + +func hashSha3256(key, data []byte) ([]byte, error) { + var err error + var hash hash.Hash + + if key != nil { + hash = sha3.New256() + if _, err := hash.Write(key); err != nil { + return nil, err + } + } else { + hash = sha3.New256() + if err != nil { + return nil, err + } + } + + if _, err := hash.Write(data); err != nil { + return nil, err + } + + return hash.Sum(nil), nil +} + +/// +/// SHA3-384 +/// + +func (s *Sha3384) Hash(data []byte) ([]byte, error) { + return hashSha3384(nil, data) +} + +func (s *Sha3384) ValidateHash(plaintext, expectedHash []byte) (bool, error) { + hashed, err := hashSha3384(nil, plaintext) + if err != nil { + return false, err + } + + return generic.Compare(hashed, expectedHash), nil +} + +func (s *Sha3384) Hmac(key, data []byte) ([]byte, error) { + return hashSha3384(key, data) +} + +func (s *Sha3384) ValidateHmac(key, data, expectedHash []byte) (bool, error) { + hashed, err := hashSha3384(key, data) + if err != nil { + return false, err + } + + return generic.Compare(hashed, expectedHash), nil +} + +func hashSha3384(key, data []byte) ([]byte, error) { + var err error + var hash hash.Hash + + if key != nil { + hash = sha3.New384() + if _, err := hash.Write(key); err != nil { + return nil, err + } + } else { + hash = sha3.New384() + if err != nil { + return nil, err + } + } + + if _, err := hash.Write(data); err != nil { + return nil, err + } + + return hash.Sum(nil), nil +} + +/// +/// SHA3-512 +/// + +func (s *Sha3512) Hash(data []byte) ([]byte, error) { + return hashSha3512(nil, data) +} + +func (s *Sha3512) ValidateHash(plaintext, expectedHash []byte) (bool, error) { + hashed, err := hashSha3512(nil, plaintext) + if err != nil { + return false, err + } + + return generic.Compare(hashed, expectedHash), nil +} + +func (s *Sha3512) Hmac(key, data []byte) ([]byte, error) { + return hashSha3512(key, data) +} + +func (s *Sha3512) ValidateHmac(key, data, expectedHash []byte) (bool, error) { + hashed, err := hashSha3512(key, data) + if err != nil { + return false, err + } + + return generic.Compare(hashed, expectedHash), nil +} + +func hashSha3512(key, data []byte) ([]byte, error) { + var err error + var hash hash.Hash + + if key != nil { + hash = sha3.New512() + if _, err := hash.Write(key); err != nil { + return nil, err + } + } else { + hash = sha3.New512() + if err != nil { + return nil, err + } + } + + if _, err := hash.Write(data); err != nil { + return nil, err + } + + return hash.Sum(nil), nil } diff --git a/hash/hash_test.go b/hash/hash_test.go index 01da96d..e598e7a 100644 --- a/hash/hash_test.go +++ b/hash/hash_test.go @@ -1,75 +1,198 @@ package hash_test import ( + // "encoding/hex" "encoding/hex" "testing" - "github.com/D3vl0per/crypt/hash" - a "github.com/stretchr/testify/assert" + hasher "github.com/D3vl0per/crypt/hash" + // a "github.com/stretchr/testify/assert" r "github.com/stretchr/testify/require" ) -func TestBlake256(t *testing.T) { - data := []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh") - // nolint:lll - expected := []byte{32, 109, 96, 136, 177, 62, 96, 1, 20, 103, 183, 90, 60, 235, 88, 246, 192, 122, 156, 107, 186, 36, 51, 3, 141, 52, 76, 81, 98, 229, 179, 237} - hash, err := hash.Blake256(data) - t.Log(hash) - r.NoError(t, err) - r.Equal(t, expected, hash) +type testBlakes struct { + Algo hasher.Algorithms + Data []byte + Expected []byte + Key []byte } -func TestBlake512(t *testing.T) { - data := []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh") - // nolint:lll - expected := []byte{119, 40, 57, 190, 223, 104, 229, 179, 37, 38, 116, 236, 59, 79, 64, 38, 242, 100, 128, 101, 147, 40, 14, 159, 186, 100, 251, 182, 206, 58, 244, 200, 26, 133, 123, 65, 131, 213, 220, 248, 152, 111, 73, 93, 126, 181, 139, 26, 48, 40, 254, 156, 254, 108, 19, 47, 92, 67, 209, 60, 127, 148, 155, 39} - hash, err := hash.Blake512(data) - t.Log(hash) - r.NoError(t, err) - r.Equal(t, expected, hash) -} +func TestBlakes(t *testing.T) { + tests := []testBlakes{ + { + Algo: &hasher.Blake2b256{}, + Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + Key: nil, + //nolint:lll + Expected: []byte{32, 109, 96, 136, 177, 62, 96, 1, 20, 103, 183, 90, 60, 235, 88, 246, 192, 122, 156, 107, 186, 36, 51, 3, 141, 52, 76, 81, 98, 229, 179, 237}, + }, + { + Algo: &hasher.Blake2b256{}, + Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + Key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + //nolint:lll + Expected: []byte{141, 216, 41, 55, 227, 130, 65, 74, 238, 19, 155, 174, 22, 46, 103, 68, 212, 184, 176, 225, 176, 182, 94, 11, 128, 55, 85, 127, 136, 105, 14, 169}, + }, + { + Algo: &hasher.Blake2b384{}, + Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + Key: nil, + //nolint:lll + Expected: []byte{50, 67, 235, 235, 146, 164, 58, 187, 5, 182, 182, 179, 132, 31, 200, 27, 68, 50, 83, 71, 221, 131, 86, 164, 203, 194, 251, 64, 172, 45, 105, 200, 90, 118, 50, 47, 37, 237, 28, 153, 88, 166, 95, 221, 138, 249, 176, 116}, + }, + { + Algo: &hasher.Blake2b384{}, + Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + Key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + //nolint:lll + Expected: []byte{2, 58, 228, 49, 225, 253, 51, 171, 34, 190, 207, 112, 186, 131, 0, 65, 58, 117, 119, 182, 72, 69, 151, 185, 128, 227, 180, 137, 5, 39, 172, 99, 21, 102, 79, 245, 62, 180, 104, 244, 218, 233, 60, 57, 161, 15, 31, 169}, + }, + { + Algo: &hasher.Blake2b512{}, + Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + Key: nil, + //nolint:lll + Expected: []byte{119, 40, 57, 190, 223, 104, 229, 179, 37, 38, 116, 236, 59, 79, 64, 38, 242, 100, 128, 101, 147, 40, 14, 159, 186, 100, 251, 182, 206, 58, 244, 200, 26, 133, 123, 65, 131, 213, 220, 248, 152, 111, 73, 93, 126, 181, 139, 26, 48, 40, 254, 156, 254, 108, 19, 47, 92, 67, 209, 60, 127, 148, 155, 39}, + }, + { + Algo: &hasher.Blake2b512{}, + Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + Key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + //nolint:lll + Expected: []byte{216, 241, 59, 128, 75, 177, 73, 147, 208, 198, 138, 37, 187, 128, 230, 173, 60, 117, 96, 33, 223, 55, 143, 219, 51, 47, 108, 67, 98, 0, 159, 197, 24, 112, 56, 191, 150, 82, 9, 225, 89, 0, 213, 168, 81, 69, 18, 10, 189, 249, 143, 31, 55, 119, 242, 126, 205, 253, 41, 158, 156, 30, 188, 105}, + }, + { + Algo: &hasher.Sha3256{}, + Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + Key: nil, + //nolint:lll + Expected: []byte{195, 62, 41, 181, 107, 38, 110, 3, 129, 21, 52, 217, 117, 49, 247, 163, 218, 89, 94, 205, 254, 161, 207, 196, 114, 73, 155, 161, 61, 38, 229, 59}, + }, + { + Algo: &hasher.Sha3384{}, + Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + Key: nil, + //nolint:lll + Expected: []byte{13, 164, 89, 48, 108, 199, 207, 244, 184, 228, 229, 210, 233, 175, 29, 85, 79, 200, 21, 45, 82, 193, 210, 227, 195, 78, 6, 230, 102, 127, 126, 121, 118, 120, 44, 105, 214, 238, 75, 46, 166, 133, 61, 161, 228, 2, 6, 46}, + }, + { + Algo: &hasher.Sha3512{}, + Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + Key: nil, + //nolint:lll + Expected: []byte{125, 21, 172, 36, 13, 53, 250, 136, 28, 214, 188, 8, 227, 249, 19, 86, 128, 200, 212, 106, 225, 41, 67, 3, 81, 115, 58, 187, 209, 129, 44, 191, 163, 205, 134, 207, 246, 127, 72, 31, 9, 11, 33, 184, 131, 16, 44, 152, 2, 55, 71, 215, 195, 73, 233, 147, 80, 13, 79, 131, 146, 100, 38, 202}, + }, + } -func TestHMACVerify(t *testing.T) { - key_1 := []byte("SuperMegaSecretKey") - data := []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh") - // nolint:lll - expected := "185d9e682b053bbc996325266de43541c198df70e81bc2a9a60793832ad0e9c246b11994ea768af413b97f339ae501c220188a194c734f937e816760780381cf" + for _, test := range tests { + testHash(t, test) + } - result, err := hash.HmacVerify(key_1, data, expected) - r.NoError(t, err) - a.True(t, result) } -func TestHMACGen(t *testing.T) { - key_1 := []byte("SuperMegaSecretKey") - data := []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh") - // nolint:lll - expected, err := hex.DecodeString("185d9e682b053bbc996325266de43541c198df70e81bc2a9a60793832ad0e9c246b11994ea768af413b97f339ae501c220188a194c734f937e816760780381cf") - r.NoError(t, err) +func testHash(t *testing.T, test testBlakes) { + if test.Key == nil { + hash, err := test.Algo.Hash(test.Data) + r.NoError(t, err) + t.Log("Hash: ", hex.EncodeToString(hash)) + r.Equal(t, test.Expected, hash) + + validate, err := test.Algo.ValidateHash(test.Data, hash) + r.NoError(t, err) + r.True(t, validate) + } else { + hash, err := test.Algo.Hmac(test.Key, test.Data) + r.NoError(t, err) + t.Log("Hash: ", hex.EncodeToString(hash)) + r.Equal(t, test.Expected, hash) - result, err := hash.HmacGen(key_1, data) - r.NoError(t, err) - r.Equal(t, expected, result) + validate, err := test.Algo.ValidateHmac(test.Key, test.Data, hash) + r.NoError(t, err) + r.True(t, validate) + } } -func TestHMACCheckSmallKeyError(t *testing.T) { - data := []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh") - // nolint:lll - expected := "185d9e682b053bbc996325266de43541c198df70e81bc2a9a60793832ad0e9c246b11994ea768af413b97f339ae501c220188a194c734f937e816760780381cf" - key_1 := []byte("Super") +func TestFaultBlakes(t *testing.T) { + tests := []testBlakes{ + { + Algo: &hasher.Blake2b256{}, + Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + Key: nil, + //nolint:lll + Expected: []byte{32, 109, 96, 136, 177, 62, 96, 1, 20, 103, 183, 90, 60, 235, 88, 246, 192, 122, 156, 107, 186, 36, 51, 3, 141, 52, 76, 81, 98, 229, 179, 237}, + }, + { + Algo: &hasher.Blake2b256{}, + Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + Key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + //nolint:lll + Expected: []byte{141, 216, 41, 55, 227, 130, 65, 74, 238, 19, 155, 174, 22, 46, 103, 68, 212, 184, 176, 225, 176, 182, 94, 11, 128, 55, 85, 127, 136, 105, 14, 169}, + }, + { + Algo: &hasher.Blake2b384{}, + Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + Key: nil, + //nolint:lll + Expected: []byte{50, 67, 235, 235, 146, 164, 58, 187, 5, 182, 182, 179, 132, 31, 200, 27, 68, 50, 83, 71, 221, 131, 86, 164, 203, 194, 251, 64, 172, 45, 105, 200, 90, 118, 50, 47, 37, 237, 28, 153, 88, 166, 95, 221, 138, 249, 176, 116}, + }, + { + Algo: &hasher.Blake2b384{}, + Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + Key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + //nolint:lll + Expected: []byte{2, 58, 228, 49, 225, 253, 51, 171, 34, 190, 207, 112, 186, 131, 0, 65, 58, 117, 119, 182, 72, 69, 151, 185, 128, 227, 180, 137, 5, 39, 172, 99, 21, 102, 79, 245, 62, 180, 104, 244, 218, 233, 60, 57, 161, 15, 31, 169}, + }, + { + Algo: &hasher.Blake2b512{}, + Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + Key: nil, + //nolint:lll + Expected: []byte{119, 40, 57, 190, 223, 104, 229, 179, 37, 38, 116, 236, 59, 79, 64, 38, 242, 100, 128, 101, 147, 40, 14, 159, 186, 100, 251, 182, 206, 58, 244, 200, 26, 133, 123, 65, 131, 213, 220, 248, 152, 111, 73, 93, 126, 181, 139, 26, 48, 40, 254, 156, 254, 108, 19, 47, 92, 67, 209, 60, 127, 148, 155, 39}, + }, + { + Algo: &hasher.Blake2b512{}, + Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + Key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + //nolint:lll + Expected: []byte{216, 241, 59, 128, 75, 177, 73, 147, 208, 198, 138, 37, 187, 128, 230, 173, 60, 117, 96, 33, 223, 55, 143, 219, 51, 47, 108, 67, 98, 0, 159, 197, 24, 112, 56, 191, 150, 82, 9, 225, 89, 0, 213, 168, 81, 69, 18, 10, 189, 249, 143, 31, 55, 119, 242, 126, 205, 253, 41, 158, 156, 30, 188, 105}, + }, + { + Algo: &hasher.Sha3256{}, + Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + Key: nil, + //nolint:lll + Expected: []byte{195, 62, 41, 181, 107, 38, 110, 3, 129, 21, 52, 217, 117, 49, 247, 163, 218, 89, 94, 205, 254, 161, 207, 196, 114, 73, 155, 161, 61, 38, 229, 59}, + }, + { + Algo: &hasher.Sha3384{}, + Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + Key: nil, + //nolint:lll + Expected: []byte{13, 164, 89, 48, 108, 199, 207, 244, 184, 228, 229, 210, 233, 175, 29, 85, 79, 200, 21, 45, 82, 193, 210, 227, 195, 78, 6, 230, 102, 127, 126, 121, 118, 120, 44, 105, 214, 238, 75, 46, 166, 133, 61, 161, 228, 2, 6, 46}, + }, + { + Algo: &hasher.Sha3512{}, + Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + Key: nil, + //nolint:lll + Expected: []byte{125, 21, 172, 36, 13, 53, 250, 136, 28, 214, 188, 8, 227, 249, 19, 86, 128, 200, 212, 106, 225, 41, 67, 3, 81, 115, 58, 187, 209, 129, 44, 191, 163, 205, 134, 207, 246, 127, 72, 31, 9, 11, 33, 184, 131, 16, 44, 152, 2, 55, 71, 215, 195, 73, 233, 147, 80, 13, 79, 131, 146, 100, 38, 202}, + }, + } - result, err := hash.HmacVerify(key_1, data, expected) - r.EqualError(t, err, "key length is unsecurely short") - a.False(t, result) -} + for _, test := range tests { + testHashFault(t, test) + } -func TestHMACCheckNullKeyError(t *testing.T) { - data := []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh") - // nolint:lll - expected := "185d9e682b053bbc996325266de43541c198df70e81bc2a9a60793832ad0e9c246b11994ea768af413b97f339ae501c220188a194c734f937e816760780381cf" - key_1 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} +} - result, err := hash.HmacVerify(key_1, data, expected) - r.EqualError(t, err, "key is all zero") - a.False(t, result) +func testHashFault(t *testing.T, test testBlakes) { + if test.Key == nil { + validate, err := test.Algo.ValidateHash(test.Data, test.Expected) + r.NoError(t, err) + r.False(t, validate) + } else { + validate, err := test.Algo.ValidateHmac(test.Key, test.Data, test.Expected) + r.NoError(t, err) + r.False(t, validate) + } } diff --git a/hash/kdf.go b/hash/kdf.go index dad3fd4..9050c94 100644 --- a/hash/kdf.go +++ b/hash/kdf.go @@ -1,125 +1,173 @@ package hash import ( - "crypto/sha256" - "encoding/hex" + "encoding/base64" "errors" - "io" + "regexp" + "strconv" "github.com/D3vl0per/crypt/generic" "golang.org/x/crypto/argon2" - "golang.org/x/crypto/hkdf" ) const ( - aTime uint32 = 2 - aMemory uint32 = 1 * 64 * 1024 - aThreads uint8 = 4 - aKeyLen uint32 = 32 - HKDFKeysize int = 32 + AIterations uint32 = 2 + AMemory uint32 = 1 * 64 * 1024 + AParallelism uint8 = 4 + AKeyLen uint32 = 32 + HKDFKeysize int = 32 ) -type keys struct { - Salt string - Hash string +type Kdf interface { + Hash([]byte) (string, error) + Validate([]byte, string) (bool, error) } -// Easy to user argon2ID toolset. -func Argon2IDBase(pass, salt []byte) (keys, error) { - hash := argon2.IDKey(pass, salt, aTime, aMemory, aThreads, aKeyLen) +type Argon2ID struct { + Salt []byte + Memory uint32 + Iterations uint32 + Parallelism uint8 + KeyLen uint32 +} - return keys{ - Salt: hex.EncodeToString(salt), - Hash: hex.EncodeToString(hash), - }, nil +type argonOutput struct { + ArgonString string + Hash []byte + HashBase64 string + Salt []byte + SaltBase64 string } -func Argon2IDRecreate(pass []byte, salt string) ([]byte, error) { - salt_raw, err := hex.DecodeString(salt) - if err != nil { - return []byte{}, err +func (a *Argon2ID) argon2ID(data []byte) argonOutput { + hash := argon2.IDKey(data, a.Salt, a.Iterations, a.Memory, a.Parallelism, a.KeyLen) + + hashB64 := base64.RawStdEncoding.EncodeToString(hash) + saltB64 := base64.RawStdEncoding.EncodeToString(a.Salt) + + argonString := generic.StrCnct([]string{ + "$argon2id$v=", strconv.FormatInt(int64(argon2.Version), 10), + "$m=", strconv.FormatUint(uint64(a.Memory), 10), + ",t=", strconv.FormatUint(uint64(a.Iterations), 10), + ",p=", strconv.FormatInt(int64(a.Parallelism), 10), + "$", saltB64, + "$", hashB64}..., + ) + + return argonOutput{ + ArgonString: argonString, + Hash: hash, + HashBase64: hashB64, + Salt: a.Salt, + SaltBase64: saltB64, } - - hash_to_validate := argon2.IDKey(pass, salt_raw, aTime, aMemory, aThreads, aKeyLen) - return hash_to_validate, nil } -func Argon2ID(pass []byte) (keys, error) { - salt, err := generic.CSPRNG(16) - if err != nil { - return keys{}, err +func (a *Argon2ID) Hash(data []byte) (string, error) { + if a.Salt != nil { + if len(a.Salt) != 16 { + return "", errors.New("salt must be 16 byte long") + } + } else { + var err error + a.Salt, err = generic.CSPRNG(16) + if err != nil { + return "", err + } } - return Argon2IDBase(pass, salt) -} + a.Iterations |= AIterations + a.Memory |= AMemory + a.Parallelism |= AParallelism + a.KeyLen |= AKeyLen -func Argon2IDCustomSalt(pass, salt []byte) (keys, error) { - if len(salt) != 16 { - return keys{}, errors.New("salt length is incorrect") - } - return Argon2IDBase(pass, salt) + output := a.argon2ID(data) + return output.ArgonString, nil } -func Argon2IDVerify(pass []byte, salt, hash string) (bool, error) { - hash_to_validate, err := Argon2IDRecreate(pass, salt) +func (a *Argon2ID) Validate(data []byte, argonString string) (bool, error) { + parameters, err := a.ExtractParameters(argonString) if err != nil { return false, err } - hash_raw, err := hex.DecodeString(hash) + providedHash, err := base64.RawStdEncoding.DecodeString(parameters["hash"]) if err != nil { - return false, err + return false, errors.New(generic.StrCnct([]string{"hash base64 decode error: ", err.Error()}...)) } - return generic.Compare(hash_raw, hash_to_validate), nil -} + a.Salt, err = base64.RawStdEncoding.DecodeString(parameters["salt"]) + if err != nil { + return false, errors.New(generic.StrCnct([]string{"salt base64 decode error: ", err.Error()}...)) + } -// Easy to user HKDF toolset. -func HKDFBase(secret, salt, msg []byte) ([]byte, error) { - hash := sha256.New - kdf := hkdf.New(hash, secret, salt, msg) + if a.Iterations == 0 { + parsed, err := strconv.ParseUint(parameters["iterations"], 10, 32) + if err != nil { + return false, errors.New(generic.StrCnct([]string{"iteration parameter parsing error: ", err.Error()}...)) + } + a.Iterations = uint32(parsed) + } - key := make([]byte, HKDFKeysize) + if a.Memory == 0 { + parsed, err := strconv.ParseUint(parameters["memory"], 10, 32) + if err != nil { + return false, errors.New(generic.StrCnct([]string{"memory parameter parsing error: ", err.Error()}...)) + } + a.Memory = uint32(parsed) + } - if _, err := io.ReadFull(kdf, key); err != nil { - return []byte{}, err + if a.Parallelism == 0 { + parsed, err := strconv.ParseUint(parameters["parallelism"], 10, 32) + if err != nil { + return false, errors.New(generic.StrCnct([]string{"parallelism parameter parsing error: ", err.Error()}...)) + } + a.Parallelism = uint8(parsed) } - return key, nil -} -func HKDFRecreate(secret, msg []byte, salt string) ([]byte, error) { - salt_raw, err := hex.DecodeString(salt) - if err != nil { - return []byte{}, err + if a.KeyLen == 0 { + a.KeyLen = AKeyLen } - return HKDFBase(secret, salt_raw, msg) + hashed := a.argon2ID(data) + + return generic.Compare(hashed.Hash, providedHash), nil } -func HKDF(secret, msg []byte) (keys, error) { - hash := sha256.New - salt, err := generic.CSPRNG(int64(hash().Size())) - if err != nil { - return keys{}, err +/* + type Hkdf struct { + Salt []byte + Secret []byte + HashMode func() hash.Hash } - key, err := HKDFBase(secret, salt, msg) - if err != nil { - return keys{}, err +// Easy to user HKDF toolset. + + func (h *Hkdf) Hash(data []byte) ([]byte, error) { + kdf := hkdf.New(h.HashMode, h.Secret, h.Salt, data) + + key := make([]byte, HKDFKeysize) + + if _, err := io.ReadFull(kdf, key); err != nil { + return []byte{}, err + } + return key, nil } - return keys{ - Salt: hex.EncodeToString(salt), - Hash: hex.EncodeToString(key), - }, nil -} + func HKDFRecreate(secret, msg []byte, salt string) ([]byte, error) { + salt_raw, err := hex.DecodeString(salt) + if err != nil { + return []byte{}, err + } -/* -func HKDFCustomSalt(secret, salt, msg []byte) (keys, error) { + return HKDFBase(secret, salt_raw, msg) + } + func HKDF(secret, msg []byte) (keys, error) { hash := sha256.New - if len(salt) != hash().Size(){ - return keys{}, errors.New("salt length is incorrect") + salt, err := generic.CSPRNG(int64(hash().Size())) + if err != nil { + return keys{}, err } key, err := HKDFBase(secret, salt, msg) @@ -132,17 +180,58 @@ func HKDFCustomSalt(secret, salt, msg []byte) (keys, error) { Hash: hex.EncodeToString(key), }, nil } + + func HKDFVerify(secret, msg []byte, salt, hash string) (bool, error) { + hash_to_validate, err := HKDFRecreate(secret, msg, salt) + if err != nil { + return false, err + } + + hash_raw, err := hex.DecodeString(hash) + if err != nil { + return false, err + } + + return generic.Compare(hash_raw, hash_to_validate), nil + } */ -func HKDFVerify(secret, msg []byte, salt, hash string) (bool, error) { - hash_to_validate, err := HKDFRecreate(secret, msg, salt) - if err != nil { - return false, err + +func (a *Argon2ID) ExtractParameters(input string) (map[string]string, error) { + pattern := `\$(argon2id)\$v=(\d+)\$m=(\d+),t=(\d+),p=(\d+)\$([^$]+)\$([^$]+)$` + + re := regexp.MustCompile(pattern) + + matches := re.FindStringSubmatch(input) + + if len(matches) != 8 { + return nil, errors.New("invalid input format") } - hash_raw, err := hex.DecodeString(hash) - if err != nil { - return false, err + parameters := map[string]string{ + "algorithm": matches[1], + "version": matches[2], + "memory": matches[3], + "iterations": matches[4], + "parallelism": matches[5], + "salt": matches[6], + "hash": matches[7], + } + + if len(parameters["algorithm"]) == 0 || parameters["algorithm"] != "argon2id" { + return map[string]string{}, errors.New(generic.StrCnct([]string{"invalid algorithm: ", parameters["algorithm"]}...)) + } + + if len(parameters["version"]) == 0 || parameters["version"] != strconv.FormatInt(int64(argon2.Version), 10) { + return map[string]string{}, errors.New(generic.StrCnct([]string{"invalid version: ", parameters["version"]}...)) + } + + if len(parameters["hash"]) == 0 { + return map[string]string{}, errors.New("missing hash") + } + + if len(parameters["salt"]) == 0 { + return map[string]string{}, errors.New("missing salt") } - return generic.Compare(hash_raw, hash_to_validate), nil + return parameters, nil } diff --git a/hash/kdf_test.go b/hash/kdf_test.go index 5eb265e..92159d1 100644 --- a/hash/kdf_test.go +++ b/hash/kdf_test.go @@ -1,7 +1,6 @@ package hash_test import ( - "encoding/hex" "testing" "github.com/D3vl0per/crypt/generic" @@ -11,16 +10,52 @@ import ( ) func TestArgon2ID(t *testing.T) { - pass := []byte("Correct Horse Battery Staple") - - blob, err := hash.Argon2ID(pass) + data := []byte("Correct Horse Battery Staple") + salt, err := generic.CSPRNG(16) r.NoError(t, err) + argon := []hash.Argon2ID{ + {}, + { + Memory: 2 * 64 * 1024, + }, + { + Iterations: 4, + }, + { + Parallelism: 8, + }, + { + KeyLen: 64, + }, + { + Salt: salt, + }, + { + Memory: 2 * 64 * 1024, + Iterations: 2, + Parallelism: 8, + KeyLen: 64, + Salt: salt, + }, + } + + for _, e := range argon { + argonString, err := e.Hash(data) + r.NoError(t, err) + + t.Log("Argon string: ", argonString) + parameters, err := e.ExtractParameters(argonString) + r.NoError(t, err) + t.Log("Argon parameters: ", parameters) + + isValid, err := e.Validate(data, argonString) + r.NoError(t, err) + a.True(t, isValid) + } - isValid, err := hash.Argon2IDVerify(pass, blob.Salt, blob.Hash) - r.NoError(t, err) - a.True(t, isValid) } +/* func TestArgon2IDCustomSalt(t *testing.T) { pass := []byte("Correct Horse Battery Staple") salt, err := generic.CSPRNG(16) @@ -37,7 +72,8 @@ func TestArgon2IDCustomSalt(t *testing.T) { r.NoError(t, err) a.True(t, isValid) } - +*/ +/* func TestHKDF(t *testing.T) { secret := []byte("Correct Horse Battery Staple") msg := []byte("https://xkcd.com/936/") @@ -49,7 +85,7 @@ func TestHKDF(t *testing.T) { r.NoError(t, err) a.True(t, isValid) } - +*/ /* func TestHKDFCustomSalt(t *testing.T) { secret := []byte("Correct Horse Battery Staple") diff --git a/symmetric/symmetric.go b/symmetric/symmetric.go index 677dafb..523a541 100644 --- a/symmetric/symmetric.go +++ b/symmetric/symmetric.go @@ -2,19 +2,43 @@ package symmetric import ( "errors" + "hash" "io" "github.com/D3vl0per/crypt/aged" "github.com/D3vl0per/crypt/generic" "golang.org/x/crypto/chacha20poly1305" + "golang.org/x/crypto/hkdf" + "golang.org/x/crypto/sha3" ) -func EncryptXChaCha20(secret, plaintext []byte) ([]byte, error) { - if len(secret) != chacha20poly1305.KeySize { - return []byte{}, errors.New("wrong secret size") +type Symmetric interface { + Encrypt([]byte, []byte) ([]byte, error) + Decrypt([]byte, []byte) ([]byte, error) +} + +type SymmetricStream interface { + Encrypt(io.Reader, io.Writer) error + Decrypt(io.Reader, io.Writer) error +} + +type XChaCha20 struct{} +type Xor struct{} + +type XChaCha20Stream struct { + Key []byte + Hash func() hash.Hash +} + +// / +// / XChaCha20-Poly1305 +// / +func (x *XChaCha20) Encrypt(key, plaintext []byte) ([]byte, error) { + if len(key) != chacha20poly1305.KeySize { + return []byte{}, errors.New("wrong key size") } - aead, err := chacha20poly1305.NewX(secret) + aead, err := chacha20poly1305.NewX(key) if err != nil { return []byte{}, err } @@ -27,12 +51,12 @@ func EncryptXChaCha20(secret, plaintext []byte) ([]byte, error) { return aead.Seal(nonce, nonce, plaintext, nil), nil } -func DecryptXChacha20(secret, ciphertext []byte) ([]byte, error) { - if len(secret) != chacha20poly1305.KeySize { +func (x *XChaCha20) Decrypt(key, ciphertext []byte) ([]byte, error) { + if len(key) != chacha20poly1305.KeySize { return []byte{}, errors.New("wrong secret size") } - aead, err := chacha20poly1305.NewX(secret) + aead, err := chacha20poly1305.NewX(key) if err != nil { return []byte{}, err } @@ -50,7 +74,10 @@ func DecryptXChacha20(secret, ciphertext []byte) ([]byte, error) { return plaintext, nil } -func XOR(payload, key []byte) ([]byte, error) { +// / +// / XOR +// / +func (x *Xor) Encrypt(key, payload []byte) ([]byte, error) { if len(payload) != len(key) { return []byte{}, errors.New("insecure xor operation, key and payload length need to equal") } @@ -67,24 +94,24 @@ func XOR(payload, key []byte) ([]byte, error) { return xored, nil } -func EncryptStreamXChacha20(in io.Reader, out io.Writer) (key []byte, err error) { - key = make([]byte, chacha20poly1305.KeySize) - if _, err = generic.Rand().Read(key); err != nil { - return []byte{}, err - } - - if err = encryptXChaCha20Stream(in, out, key); err != nil { - return []byte{}, err - } - return key, nil +func (x *Xor) Decrypt(key, payload []byte) ([]byte, error) { + return x.Encrypt(key, payload) } -func EncryptStreamXChacha20CustomKey(in io.Reader, out io.Writer, key []byte) (err error) { - return encryptXChaCha20Stream(in, out, key) -} +func (x *XChaCha20Stream) Encrypt(in io.Reader, out io.Writer) error { + if len(x.Key) != chacha20poly1305.KeySize { + return errors.New("wrong key size") + } -func encryptXChaCha20Stream(in io.Reader, out io.Writer, key []byte) error { - w, err := streamWriter(out, key) + var str stream + if x.Hash == nil { + str = stream{ + Hash: sha3.New384, + } + } else { + str = stream{Hash: x.Hash} + } + w, err := str.writer(out, x.Key) if err != nil { return err } @@ -98,25 +125,21 @@ func encryptXChaCha20Stream(in io.Reader, out io.Writer, key []byte) error { return nil } -func streamWriter(dst io.Writer, key []byte) (io.WriteCloser, error) { - nonce := make([]byte, chacha20poly1305.NonceSizeX) - if _, err := generic.Rand().Read(nonce); err != nil { - return nil, err +func (x *XChaCha20Stream) Decrypt(in io.Reader, out io.Writer) error { + if len(x.Key) != chacha20poly1305.KeySize { + return errors.New("wrong key size") } - if _, err := dst.Write(nonce); err != nil { - return nil, err + var str stream + if x.Hash == nil { + str = stream{ + Hash: sha3.New384, + } + } else { + str = stream{Hash: x.Hash} } - return aged.NewWriter(aged.StreamKey(key, nonce), dst) -} - -func DecryptStreamXChacha20Custom(in io.Reader, out io.Writer, key []byte) (err error) { - return DecryptStreamXChacha20(in, out, key) -} - -func DecryptStreamXChacha20(in io.Reader, out io.Writer, key []byte) error { - r, err := streamReader(in, key) + r, err := str.reader(in, x.Key) if err != nil { return err } @@ -127,11 +150,50 @@ func DecryptStreamXChacha20(in io.Reader, out io.Writer, key []byte) error { return nil } -func streamReader(src io.Reader, key []byte) (io.Reader, error) { +type stream struct{ + Hash func() hash.Hash +} + +func (s *stream) reader(src io.Reader, key []byte) (io.Reader, error) { nonce := make([]byte, chacha20poly1305.NonceSizeX) if _, err := io.ReadFull(src, nonce); err != nil { return nil, errors.New("failed to read nonce") } - return aged.NewReader(aged.StreamKey(key, nonce), src) + streamerKey, err := s.key(key, nonce) + if err != nil { + return nil, err + } + + return aged.NewReader(streamerKey, src) +} + +func (s *stream) writer(dst io.Writer, key []byte) (io.WriteCloser, error) { + nonce := make([]byte, chacha20poly1305.NonceSizeX) + if _, err := generic.Rand().Read(nonce); err != nil { + return nil, err + } + + if _, err := dst.Write(nonce); err != nil { + return nil, err + } + + streamerKey, err := s.key(key, nonce) + if err != nil { + return nil, err + } + + return aged.NewWriter(streamerKey, dst) +} + +func (s *stream) key(fileKey, nonce []byte) ([]byte, error) { + h := hkdf.New(s.Hash, fileKey, nonce, []byte("payload")) + streamKey := make([]byte, chacha20poly1305.KeySize) + if _, err := io.ReadFull(h, streamKey); err != nil { + return nil,err + } + if generic.AllZero(streamKey){ + return nil, errors.New("streamer key is all zero") + } + return streamKey, nil } diff --git a/symmetric/symmetric_test.go b/symmetric/symmetric_test.go index cd1ee6d..e08c7be 100644 --- a/symmetric/symmetric_test.go +++ b/symmetric/symmetric_test.go @@ -6,16 +6,25 @@ import ( "testing" + "github.com/D3vl0per/crypt/generic" "github.com/D3vl0per/crypt/symmetric" r "github.com/stretchr/testify/require" + "golang.org/x/crypto/chacha20poly1305" ) func TestStreamXChaCha20(t *testing.T) { - plainText := []byte("Black lives matter.") + plainText := []byte("https://xkcd.com/936/") out := &bytes.Buffer{} in := bytes.NewReader(plainText) - key, err := symmetric.EncryptStreamXChacha20(in, out) + key, err := generic.CSPRNG(chacha20poly1305.KeySize) + r.NoError(t, err) + + sym := symmetric.XChaCha20Stream{ + Key: key, + } + + err = sym.Encrypt(in, out) r.NoError(t, err) t.Logf("Key: %s", hex.EncodeToString(key)) @@ -24,7 +33,12 @@ func TestStreamXChaCha20(t *testing.T) { rr := bytes.NewReader(out.Bytes()) out2 := &bytes.Buffer{} - r.NoError(t, symmetric.DecryptStreamXChacha20(rr, out2, key)) + + sym2 := symmetric.XChaCha20Stream{ + Key: key, + } + + r.NoError(t, sym2.Decrypt(rr, out2)) t.Logf("Decrypted file size: %d\n", out2.Len()) t.Logf("Decrypted value: %s", out2.String()) @@ -36,10 +50,14 @@ func TestXChaCha20(t *testing.T) { r.NoError(t, err) payload := []byte("https://xkcd.com/936/") - ciphertext, err := symmetric.EncryptXChaCha20(secret, payload) + sym := symmetric.XChaCha20{} + + ciphertext, err := sym.Encrypt(secret, payload) r.NoError(t, err) - plaintext, err := symmetric.DecryptXChacha20(secret, ciphertext) + sym2 := symmetric.XChaCha20{} + + plaintext, err := sym2.Decrypt(secret, ciphertext) r.NoError(t, err) r.Equal(t, payload, plaintext) diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go new file mode 100644 index 0000000..de912e1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -0,0 +1,1017 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright (c) 2015 Klaus Post +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +const ( + NoCompression = 0 + BestSpeed = 1 + BestCompression = 9 + DefaultCompression = -1 + + // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman + // entropy encoding. This mode is useful in compressing data that has + // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) + // that lacks an entropy encoder. Compression gains are achieved when + // certain bytes in the input stream occur more frequently than others. + // + // Note that HuffmanOnly produces a compressed output that is + // RFC 1951 compliant. That is, any valid DEFLATE decompressor will + // continue to be able to decompress this output. + HuffmanOnly = -2 + ConstantCompression = HuffmanOnly // compatibility alias. + + logWindowSize = 15 + windowSize = 1 << logWindowSize + windowMask = windowSize - 1 + logMaxOffsetSize = 15 // Standard DEFLATE + minMatchLength = 4 // The smallest match that the compressor looks for + maxMatchLength = 258 // The longest match for the compressor + minOffsetSize = 1 // The shortest offset that makes any sense + + // The maximum number of tokens we will encode at the time. + // Smaller sizes usually creates less optimal blocks. + // Bigger can make context switching slow. + // We use this for levels 7-9, so we make it big. + maxFlateBlockTokens = 1 << 15 + maxStoreBlockSize = 65535 + hashBits = 17 // After 17 performance degrades + hashSize = 1 << hashBits + hashMask = (1 << hashBits) - 1 + hashShift = (hashBits + minMatchLength - 1) / minMatchLength + maxHashOffset = 1 << 28 + + skipNever = math.MaxInt32 + + debugDeflate = false +) + +type compressionLevel struct { + good, lazy, nice, chain, fastSkipHashing, level int +} + +// Compression levels have been rebalanced from zlib deflate defaults +// to give a bigger spread in speed and compression. +// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ +var levels = []compressionLevel{ + {}, // 0 + // Level 1-6 uses specialized algorithm - values not used + {0, 0, 0, 0, 0, 1}, + {0, 0, 0, 0, 0, 2}, + {0, 0, 0, 0, 0, 3}, + {0, 0, 0, 0, 0, 4}, + {0, 0, 0, 0, 0, 5}, + {0, 0, 0, 0, 0, 6}, + // Levels 7-9 use increasingly more lazy matching + // and increasingly stringent conditions for "good enough". + {8, 12, 16, 24, skipNever, 7}, + {16, 30, 40, 64, skipNever, 8}, + {32, 258, 258, 1024, skipNever, 9}, +} + +// advancedState contains state for the advanced levels, with bigger hash tables, etc. +type advancedState struct { + // deflate state + length int + offset int + maxInsertIndex int + chainHead int + hashOffset int + + ii uint16 // position of last match, intended to overflow to reset. + + // input window: unprocessed data is window[index:windowEnd] + index int + hashMatch [maxMatchLength + minMatchLength]uint32 + + // Input hash chains + // hashHead[hashValue] contains the largest inputIndex with the specified hash value + // If hashHead[hashValue] is within the current window, then + // hashPrev[hashHead[hashValue] & windowMask] contains the previous index + // with the same hash value. + hashHead [hashSize]uint32 + hashPrev [windowSize]uint32 +} + +type compressor struct { + compressionLevel + + h *huffmanEncoder + w *huffmanBitWriter + + // compression algorithm + fill func(*compressor, []byte) int // copy data to window + step func(*compressor) // process window + + window []byte + windowEnd int + blockStart int // window index where current tokens start + err error + + // queued output tokens + tokens tokens + fast fastEnc + state *advancedState + + sync bool // requesting flush + byteAvailable bool // if true, still need to process window[index-1]. +} + +func (d *compressor) fillDeflate(b []byte) int { + s := d.state + if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) { + // shift the window by windowSize + //copy(d.window[:], d.window[windowSize:2*windowSize]) + *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:]) + s.index -= windowSize + d.windowEnd -= windowSize + if d.blockStart >= windowSize { + d.blockStart -= windowSize + } else { + d.blockStart = math.MaxInt32 + } + s.hashOffset += windowSize + if s.hashOffset > maxHashOffset { + delta := s.hashOffset - 1 + s.hashOffset -= delta + s.chainHead -= delta + // Iterate over slices instead of arrays to avoid copying + // the entire table onto the stack (Issue #18625). + for i, v := range s.hashPrev[:] { + if int(v) > delta { + s.hashPrev[i] = uint32(int(v) - delta) + } else { + s.hashPrev[i] = 0 + } + } + for i, v := range s.hashHead[:] { + if int(v) > delta { + s.hashHead[i] = uint32(int(v) - delta) + } else { + s.hashHead[i] = 0 + } + } + } + } + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { + if index > 0 || eof { + var window []byte + if d.blockStart <= index { + window = d.window[d.blockStart:index] + } + d.blockStart = index + //d.w.writeBlock(tok, eof, window) + d.w.writeBlockDynamic(tok, eof, window, d.sync) + return d.w.err + } + return nil +} + +// writeBlockSkip writes the current block and uses the number of tokens +// to determine if the block should be stored on no matches, or +// only huffman encoded. +func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { + if index > 0 || eof { + if d.blockStart <= index { + window := d.window[d.blockStart:index] + // If we removed less than a 64th of all literals + // we huffman compress the block. + if int(tok.n) > len(window)-int(tok.n>>6) { + d.w.writeBlockHuff(eof, window, d.sync) + } else { + // Write a dynamic huffman block. + d.w.writeBlockDynamic(tok, eof, window, d.sync) + } + } else { + d.w.writeBlock(tok, eof, nil) + } + d.blockStart = index + return d.w.err + } + return nil +} + +// fillWindow will fill the current window with the supplied +// dictionary and calculate all hashes. +// This is much faster than doing a full encode. +// Should only be used after a start/reset. +func (d *compressor) fillWindow(b []byte) { + // Do not fill window if we are in store-only or huffman mode. + if d.level <= 0 { + return + } + if d.fast != nil { + // encode the last data, but discard the result + if len(b) > maxMatchOffset { + b = b[len(b)-maxMatchOffset:] + } + d.fast.Encode(&d.tokens, b) + d.tokens.Reset() + return + } + s := d.state + // If we are given too much, cut it. + if len(b) > windowSize { + b = b[len(b)-windowSize:] + } + // Add all to window. + n := copy(d.window[d.windowEnd:], b) + + // Calculate 256 hashes at the time (more L1 cache hits) + loops := (n + 256 - minMatchLength) / 256 + for j := 0; j < loops; j++ { + startindex := j * 256 + end := startindex + 256 + minMatchLength - 1 + if end > n { + end = n + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + + if dstSize <= 0 { + continue + } + + dst := s.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + s.hashPrev[di&windowMask] = s.hashHead[newH] + // Set the head of the hash chain to us. + s.hashHead[newH] = uint32(di + s.hashOffset) + } + } + // Update window information. + d.windowEnd += n + s.index = n +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = minMatchLength - 1 + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + if minIndex < 0 { + minIndex = 0 + } + offset = 0 + + if d.chain < 100 { + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:i+minMatchLook], wPos) + if n > length { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i <= minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset + if i < minIndex { + break + } + } + return + } + + // Minimum gain to accept a match. + cGain := 4 + + // Some like it higher (CSV), some like it lower (JSON) + const baseCost = 3 + // Base is 4 bytes at with an additional cost. + // Matches must be better than this. + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:i+minMatchLook], wPos) + if n > length { + // Calculate gain. Estimate + newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) + + //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length) + if newGain > cGain { + length = n + offset = pos - i + cGain = newGain + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + } + if i <= minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset + if i < minIndex { + break + } + } + return +} + +func (d *compressor) writeStoredBlock(buf []byte) error { + if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { + return d.w.err + } + d.w.writeBytes(buf) + return d.w.err +} + +// hash4 returns a hash representation of the first 4 bytes +// of the supplied slice. +// The caller must ensure that len(b) >= 4. +func hash4(b []byte) uint32 { + return hash4u(binary.LittleEndian.Uint32(b), hashBits) +} + +// hash4 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4u(u uint32, h uint8) uint32 { + return (u * prime4bytes) >> (32 - h) +} + +// bulkHash4 will compute hashes using the same +// algorithm as hash4 +func bulkHash4(b []byte, dst []uint32) { + if len(b) < 4 { + return + } + hb := binary.LittleEndian.Uint32(b) + + dst[0] = hash4u(hb, hashBits) + end := len(b) - 4 + 1 + for i := 1; i < end; i++ { + hb = (hb >> 8) | uint32(b[i+3])<<24 + dst[i] = hash4u(hb, hashBits) + } +} + +func (d *compressor) initDeflate() { + d.window = make([]byte, 2*windowSize) + d.byteAvailable = false + d.err = nil + if d.state == nil { + return + } + s := d.state + s.index = 0 + s.hashOffset = 1 + s.length = minMatchLength - 1 + s.offset = 0 + s.chainHead = -1 +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazy() { + s := d.state + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = debugDeflate + + if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { + return + } + if d.windowEnd != s.index && d.chain > 100 { + // Get literal huffman coder. + if d.h == nil { + d.h = newHuffmanEncoder(maxFlateBlockTokens) + } + var tmp [256]uint16 + for _, v := range d.window[s.index:d.windowEnd] { + tmp[v]++ + } + d.h.generate(tmp[:], 15) + } + + s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + + for { + if sanity && s.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - s.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && s.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.AddLiteral(d.window[s.index-1]) + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + return + } + } + if s.index < s.maxInsertIndex { + // Update the hash + hash := hash4(d.window[s.index:]) + ch := s.hashHead[hash] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[hash] = uint32(s.index + s.hashOffset) + } + prevLength := s.length + prevOffset := s.offset + s.length = minMatchLength - 1 + s.offset = 0 + minIndex := s.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { + s.length = newLength + s.offset = newOffset + } + } + + if prevLength >= minMatchLength && s.length <= prevLength { + // No better match, but check for better match at end... + // + // Skip forward a number of bytes. + // Offset of 2 seems to yield best results. 3 is sometimes better. + const checkOff = 2 + + // Check all, except full length + if prevLength < maxMatchLength-checkOff { + prevIndex := s.index - 1 + if prevIndex+prevLength < s.maxInsertIndex { + end := lookahead + if lookahead > maxMatchLength+checkOff { + end = maxMatchLength + checkOff + } + end += prevIndex + + // Hash at match end. + h := hash4(d.window[prevIndex+prevLength:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { + length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) + // It seems like a pure length metric is best. + if length > prevLength { + prevLength = length + prevOffset = prevIndex - ch2 + + // Extend back... + for i := checkOff - 1; i >= 0; i-- { + if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] { + // Emit tokens we "owe" + for j := 0; j <= i; j++ { + d.tokens.AddLiteral(d.window[prevIndex+j]) + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + } + break + } else { + prevLength++ + } + } + } else if false { + // Check one further ahead. + // Only rarely better, disabled for now. + prevIndex++ + h := hash4(d.window[prevIndex+prevLength:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { + length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) + // It seems like a pure length metric is best. + if length > prevLength+checkOff { + prevLength = length + prevOffset = prevIndex - ch2 + prevIndex-- + + // Extend back... + for i := checkOff; i >= 0; i-- { + if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] { + // Emit tokens we "owe" + for j := 0; j <= i; j++ { + d.tokens.AddLiteral(d.window[prevIndex+j]) + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + } + break + } else { + prevLength++ + } + } + } + } + } + } + } + } + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + newIndex := s.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > s.maxInsertIndex { + end = s.maxInsertIndex + } + end += minMatchLength - 1 + startindex := s.index + 1 + if startindex > s.maxInsertIndex { + startindex = s.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := s.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + s.hashPrev[di&windowMask] = s.hashHead[newH] + // Set the head of the hash chain to us. + s.hashHead[newH] = uint32(di + s.hashOffset) + } + } + + s.index = newIndex + d.byteAvailable = false + s.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.ii = 0 + } else { + // Reset, if we got a match this run. + if s.length >= minMatchLength { + s.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + s.ii++ + d.tokens.AddLiteral(d.window[s.index-1]) + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when s.ii overflows after 64KB. + if n := int(s.ii) - d.chain; n > 0 { + n = 1 + int(n>>6) + for j := 0; j < n; j++ { + if s.index >= d.windowEnd-1 { + break + } + d.tokens.AddLiteral(d.window[s.index-1]) + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + // Index... + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + s.index++ + } + // Flush last byte + d.tokens.AddLiteral(d.window[s.index-1]) + d.byteAvailable = false + // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + } + } else { + s.index++ + d.byteAvailable = true + } + } + } +} + +func (d *compressor) store() { + if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + d.windowEnd = 0 + } +} + +// fillWindow will fill the buffer with data for huffman-only compression. +// The number of bytes copied is returned. +func (d *compressor) fillBlock(b []byte) int { + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeHuff() { + if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { + return + } + d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + d.windowEnd = 0 +} + +// storeFast will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeFast() { + // We only compress if we have maxStoreBlockSize. + if d.windowEnd < len(d.window) { + if !d.sync { + return + } + // Handle extremely small sizes. + if d.windowEnd < 128 { + if d.windowEnd == 0 { + return + } + if d.windowEnd <= 32 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + } else { + d.w.writeBlockHuff(false, d.window[:d.windowEnd], true) + d.err = d.w.err + } + d.tokens.Reset() + d.windowEnd = 0 + d.fast.Reset() + return + } + } + + d.fast.Encode(&d.tokens, d.window[:d.windowEnd]) + // If we made zero matches, store the block as is. + if d.tokens.n == 0 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + // If we removed less than 1/16th, huffman compress the block. + } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { + d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + } else { + d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + } + d.tokens.Reset() + d.windowEnd = 0 +} + +// write will add input byte to the stream. +// Unless an error occurs all bytes will be consumed. +func (d *compressor) write(b []byte) (n int, err error) { + if d.err != nil { + return 0, d.err + } + n = len(b) + for len(b) > 0 { + if d.windowEnd == len(d.window) || d.sync { + d.step(d) + } + b = b[d.fill(d, b):] + if d.err != nil { + return 0, d.err + } + } + return n, d.err +} + +func (d *compressor) syncFlush() error { + d.sync = true + if d.err != nil { + return d.err + } + d.step(d) + if d.err == nil { + d.w.writeStoredHeader(0, false) + d.w.flush() + d.err = d.w.err + } + d.sync = false + return d.err +} + +func (d *compressor) init(w io.Writer, level int) (err error) { + d.w = newHuffmanBitWriter(w) + + switch { + case level == NoCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).store + case level == ConstantCompression: + d.w.logNewTablePenalty = 10 + d.window = make([]byte, 32<<10) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeHuff + case level == DefaultCompression: + level = 5 + fallthrough + case level >= 1 && level <= 6: + d.w.logNewTablePenalty = 7 + d.fast = newFastEnc(level) + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeFast + case 7 <= level && level <= 9: + d.w.logNewTablePenalty = 8 + d.state = &advancedState{} + d.compressionLevel = levels[level] + d.initDeflate() + d.fill = (*compressor).fillDeflate + d.step = (*compressor).deflateLazy + case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize: + d.w.logNewTablePenalty = 7 + d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize} + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeFast + default: + return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) + } + d.level = level + return nil +} + +// reset the state of the compressor. +func (d *compressor) reset(w io.Writer) { + d.w.reset(w) + d.sync = false + d.err = nil + // We only need to reset a few things for Snappy. + if d.fast != nil { + d.fast.Reset() + d.windowEnd = 0 + d.tokens.Reset() + return + } + switch d.compressionLevel.chain { + case 0: + // level was NoCompression or ConstantCompresssion. + d.windowEnd = 0 + default: + s := d.state + s.chainHead = -1 + for i := range s.hashHead { + s.hashHead[i] = 0 + } + for i := range s.hashPrev { + s.hashPrev[i] = 0 + } + s.hashOffset = 1 + s.index, d.windowEnd = 0, 0 + d.blockStart, d.byteAvailable = 0, false + d.tokens.Reset() + s.length = minMatchLength - 1 + s.offset = 0 + s.ii = 0 + s.maxInsertIndex = 0 + } +} + +func (d *compressor) close() error { + if d.err != nil { + return d.err + } + d.sync = true + d.step(d) + if d.err != nil { + return d.err + } + if d.w.writeStoredHeader(0, true); d.w.err != nil { + return d.w.err + } + d.w.flush() + d.w.reset(nil) + return d.w.err +} + +// NewWriter returns a new Writer compressing data at the given level. +// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); +// higher levels typically run slower but compress more. +// Level 0 (NoCompression) does not attempt any compression; it only adds the +// necessary DEFLATE framing. +// Level -1 (DefaultCompression) uses the default compression level. +// Level -2 (ConstantCompression) will use Huffman compression only, giving +// a very fast compression for all types of input, but sacrificing considerable +// compression efficiency. +// +// If level is in the range [-2, 9] then the error returned will be nil. +// Otherwise the error returned will be non-nil. +func NewWriter(w io.Writer, level int) (*Writer, error) { + var dw Writer + if err := dw.d.init(w, level); err != nil { + return nil, err + } + return &dw, nil +} + +// NewWriterDict is like NewWriter but initializes the new +// Writer with a preset dictionary. The returned Writer behaves +// as if the dictionary had been written to it without producing +// any compressed output. The compressed data written to w +// can only be decompressed by a Reader initialized with the +// same dictionary. +func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { + zw, err := NewWriter(w, level) + if err != nil { + return nil, err + } + zw.d.fillWindow(dict) + zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. + return zw, err +} + +// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow. +const MinCustomWindowSize = 32 + +// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow. +const MaxCustomWindowSize = windowSize + +// NewWriterWindow returns a new Writer compressing data with a custom window size. +// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize. +func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) { + if windowSize < MinCustomWindowSize { + return nil, errors.New("flate: requested window size less than MinWindowSize") + } + if windowSize > MaxCustomWindowSize { + return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize") + } + var dw Writer + if err := dw.d.init(w, -windowSize); err != nil { + return nil, err + } + return &dw, nil +} + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + d compressor + dict []byte +} + +// Write writes data to w, which will eventually write the +// compressed form of data to its underlying writer. +func (w *Writer) Write(data []byte) (n int, err error) { + return w.d.write(data) +} + +// Flush flushes any pending data to the underlying writer. +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. +// Flush does not return until the data has been written. +// Calling Flush when there is no pending data still causes the Writer +// to emit a sync marker of at least 4 bytes. +// If the underlying writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (w *Writer) Flush() error { + // For more about flushing: + // http://www.bolet.org/~pornin/deflate-flush.html + return w.d.syncFlush() +} + +// Close flushes and closes the writer. +func (w *Writer) Close() error { + return w.d.close() +} + +// Reset discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level and dictionary. +func (w *Writer) Reset(dst io.Writer) { + if len(w.dict) > 0 { + // w was created with NewWriterDict + w.d.reset(dst) + if dst != nil { + w.d.fillWindow(w.dict) + } + } else { + // w was created with NewWriter + w.d.reset(dst) + } +} + +// ResetDict discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level, but sets a specific dictionary. +func (w *Writer) ResetDict(dst io.Writer, dict []byte) { + w.dict = dict + w.d.reset(dst) + w.d.fillWindow(w.dict) +} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go new file mode 100644 index 0000000..bb36351 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -0,0 +1,184 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// dictDecoder implements the LZ77 sliding dictionary as used in decompression. +// LZ77 decompresses data through sequences of two forms of commands: +// +// - Literal insertions: Runs of one or more symbols are inserted into the data +// stream as is. This is accomplished through the writeByte method for a +// single symbol, or combinations of writeSlice/writeMark for multiple symbols. +// Any valid stream must start with a literal insertion if no preset dictionary +// is used. +// +// - Backward copies: Runs of one or more symbols are copied from previously +// emitted data. Backward copies come as the tuple (dist, length) where dist +// determines how far back in the stream to copy from and length determines how +// many bytes to copy. Note that it is valid for the length to be greater than +// the distance. Since LZ77 uses forward copies, that situation is used to +// perform a form of run-length encoding on repeated runs of symbols. +// The writeCopy and tryWriteCopy are used to implement this command. +// +// For performance reasons, this implementation performs little to no sanity +// checks about the arguments. As such, the invariants documented for each +// method call must be respected. +type dictDecoder struct { + hist []byte // Sliding window history + + // Invariant: 0 <= rdPos <= wrPos <= len(hist) + wrPos int // Current output position in buffer + rdPos int // Have emitted hist[:rdPos] already + full bool // Has a full window length been written yet? +} + +// init initializes dictDecoder to have a sliding window dictionary of the given +// size. If a preset dict is provided, it will initialize the dictionary with +// the contents of dict. +func (dd *dictDecoder) init(size int, dict []byte) { + *dd = dictDecoder{hist: dd.hist} + + if cap(dd.hist) < size { + dd.hist = make([]byte, size) + } + dd.hist = dd.hist[:size] + + if len(dict) > len(dd.hist) { + dict = dict[len(dict)-len(dd.hist):] + } + dd.wrPos = copy(dd.hist, dict) + if dd.wrPos == len(dd.hist) { + dd.wrPos = 0 + dd.full = true + } + dd.rdPos = dd.wrPos +} + +// histSize reports the total amount of historical data in the dictionary. +func (dd *dictDecoder) histSize() int { + if dd.full { + return len(dd.hist) + } + return dd.wrPos +} + +// availRead reports the number of bytes that can be flushed by readFlush. +func (dd *dictDecoder) availRead() int { + return dd.wrPos - dd.rdPos +} + +// availWrite reports the available amount of output buffer space. +func (dd *dictDecoder) availWrite() int { + return len(dd.hist) - dd.wrPos +} + +// writeSlice returns a slice of the available buffer to write data to. +// +// This invariant will be kept: len(s) <= availWrite() +func (dd *dictDecoder) writeSlice() []byte { + return dd.hist[dd.wrPos:] +} + +// writeMark advances the writer pointer by cnt. +// +// This invariant must be kept: 0 <= cnt <= availWrite() +func (dd *dictDecoder) writeMark(cnt int) { + dd.wrPos += cnt +} + +// writeByte writes a single byte to the dictionary. +// +// This invariant must be kept: 0 < availWrite() +func (dd *dictDecoder) writeByte(c byte) { + dd.hist[dd.wrPos] = c + dd.wrPos++ +} + +// writeCopy copies a string at a given (dist, length) to the output. +// This returns the number of bytes copied and may be less than the requested +// length if the available space in the output buffer is too small. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) writeCopy(dist, length int) int { + dstBase := dd.wrPos + dstPos := dstBase + srcPos := dstPos - dist + endPos := dstPos + length + if endPos > len(dd.hist) { + endPos = len(dd.hist) + } + + // Copy non-overlapping section after destination position. + // + // This section is non-overlapping in that the copy length for this section + // is always less than or equal to the backwards distance. This can occur + // if a distance refers to data that wraps-around in the buffer. + // Thus, a backwards copy is performed here; that is, the exact bytes in + // the source prior to the copy is placed in the destination. + if srcPos < 0 { + srcPos += len(dd.hist) + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) + srcPos = 0 + } + + // Copy possibly overlapping section before destination position. + // + // This section can overlap if the copy length for this section is larger + // than the backwards distance. This is allowed by LZ77 so that repeated + // strings can be succinctly represented using (dist, length) pairs. + // Thus, a forwards copy is performed here; that is, the bytes copied is + // possibly dependent on the resulting bytes in the destination as the copy + // progresses along. This is functionally equivalent to the following: + // + // for i := 0; i < endPos-dstPos; i++ { + // dd.hist[dstPos+i] = dd.hist[srcPos+i] + // } + // dstPos = endPos + // + for dstPos < endPos { + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// tryWriteCopy tries to copy a string at a given (distance, length) to the +// output. This specialized version is optimized for short distances. +// +// This method is designed to be inlined for performance reasons. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) tryWriteCopy(dist, length int) int { + dstPos := dd.wrPos + endPos := dstPos + length + if dstPos < dist || endPos > len(dd.hist) { + return 0 + } + dstBase := dstPos + srcPos := dstPos - dist + + // Copy possibly overlapping section before destination position. +loop: + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + if dstPos < endPos { + goto loop // Avoid for-loop so that this function can be inlined + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// readFlush returns a slice of the historical buffer that is ready to be +// emitted to the user. The data returned by readFlush must be fully consumed +// before calling any other dictDecoder methods. +func (dd *dictDecoder) readFlush() []byte { + toRead := dd.hist[dd.rdPos:dd.wrPos] + dd.rdPos = dd.wrPos + if dd.wrPos == len(dd.hist) { + dd.wrPos, dd.rdPos = 0, 0 + dd.full = true + } + return toRead +} diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go new file mode 100644 index 0000000..c8124b5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -0,0 +1,193 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Modified for deflate by Klaus Post (c) 2015. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "fmt" +) + +type fastEnc interface { + Encode(dst *tokens, src []byte) + Reset() +} + +func newFastEnc(level int) fastEnc { + switch level { + case 1: + return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}} + case 2: + return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}} + case 3: + return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}} + case 4: + return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}} + case 5: + return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}} + case 6: + return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}} + default: + panic("invalid level specified") + } +} + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. + baseMatchOffset = 1 // The smallest match offset + baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 + maxMatchOffset = 1 << 15 // The largest match offset + + bTableBits = 17 // Bits used in the big tables + bTableSize = 1 << bTableBits // Size of the table + allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history. + bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this. +) + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +type tableEntry struct { + offset int32 +} + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastGen struct { + hist []byte + cur int32 +} + +func (e *fastGen) addBlock(src []byte) int32 { + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.hist = make([]byte, 0, allocHistory) + } else { + if cap(e.hist) < maxMatchOffset*2 { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - maxMatchOffset + // copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:maxMatchOffset] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +type tableEntryPrev struct { + Cur tableEntry + Prev tableEntry +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) +} + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} + +// matchlen will return the match length between offsets and t in src. +// The maximum length returned is maxMatchLength - 4. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastGen) matchlen(s, t int32, src []byte) int32 { + if debugDecode { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > maxMatchOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:s1], src[t:])) +} + +// matchlenLong will return the match length between offsets and t in src. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { + if debugDeflate { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > maxMatchOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastGen) Reset() { + if cap(e.hist) < allocHistory { + e.hist = make([]byte, 0, allocHistory) + } + // We offset current position so everything will be out of reach. + // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. + if e.cur <= bufferReset { + e.cur += maxMatchOffset + int32(len(e.hist)) + } + e.hist = e.hist[:0] +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go new file mode 100644 index 0000000..f70594c --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -0,0 +1,1182 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + // The largest offset code. + offsetCodeCount = 30 + + // The special code used to mark the end of a block. + endBlockMarker = 256 + + // The first length code. + lengthCodesStart = 257 + + // The number of codegen codes. + codegenCodeCount = 19 + badCode = 255 + + // maxPredefinedTokens is the maximum number of tokens + // where we check if fixed size is smaller. + maxPredefinedTokens = 250 + + // bufferFlushSize indicates the buffer size + // after which bytes are flushed to the writer. + // Should preferably be a multiple of 6, since + // we accumulate 6 bytes between writes to the buffer. + bufferFlushSize = 246 +) + +// Minimum length code that emits bits. +const lengthExtraBitsMinCode = 8 + +// The number of extra bits needed by length code X - LENGTH_CODES_START. +var lengthExtraBits = [32]uint8{ + /* 257 */ 0, 0, 0, + /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, + /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, + /* 280 */ 4, 5, 5, 5, 5, 0, +} + +// The length indicated by length code X - LENGTH_CODES_START. +var lengthBase = [32]uint8{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, + 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, + 64, 80, 96, 112, 128, 160, 192, 224, 255, +} + +// Minimum offset code that emits bits. +const offsetExtraBitsMinCode = 4 + +// offset code word extra bits. +var offsetExtraBits = [32]int8{ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, + /* extended window */ + 14, 14, +} + +var offsetCombined = [32]uint32{} + +func init() { + var offsetBase = [32]uint32{ + /* normal deflate */ + 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, + 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, + 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, + 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, + 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, + 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, + + /* extended window */ + 0x008000, 0x00c000, + } + + for i := range offsetCombined[:] { + // Don't use extended window values... + if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { + continue + } + offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) + } +} + +// The odd order in which the codegen code sizes are written. +var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +type huffmanBitWriter struct { + // writer is the underlying writer. + // Do not use it directly; use the write method, which ensures + // that Write errors are sticky. + writer io.Writer + + // Data waiting to be written is bytes[0:nbytes] + // and then the low nbits of bits. + bits uint64 + nbits uint8 + nbytes uint8 + lastHuffMan bool + literalEncoding *huffmanEncoder + tmpLitEncoding *huffmanEncoder + offsetEncoding *huffmanEncoder + codegenEncoding *huffmanEncoder + err error + lastHeader int + // Set between 0 (reused block can be up to 2x the size) + logNewTablePenalty uint + bytes [256 + 8]byte + literalFreq [lengthCodesStart + 32]uint16 + offsetFreq [32]uint16 + codegenFreq [codegenCodeCount]uint16 + + // codegen must have an extra space for the final symbol. + codegen [literalCount + offsetCodeCount + 1]uint8 +} + +// Huffman reuse. +// +// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections. +// +// This is controlled by several variables: +// +// If lastHeader is non-zero the Huffman table can be reused. +// This also indicates that a Huffman table has been generated that can output all +// possible symbols. +// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated +// an EOB with the previous table must be written. +// +// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid. +// +// An incoming block estimates the output size of a new table using a 'fresh' by calculating the +// optimal size and adding a penalty in 'logNewTablePenalty'. +// A Huffman table is not optimal, which is why we add a penalty, and generating a new table +// is slower both for compression and decompression. + +func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { + return &huffmanBitWriter{ + writer: w, + literalEncoding: newHuffmanEncoder(literalCount), + tmpLitEncoding: newHuffmanEncoder(literalCount), + codegenEncoding: newHuffmanEncoder(codegenCodeCount), + offsetEncoding: newHuffmanEncoder(offsetCodeCount), + } +} + +func (w *huffmanBitWriter) reset(writer io.Writer) { + w.writer = writer + w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil + w.lastHeader = 0 + w.lastHuffMan = false +} + +func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { + a := t.offHist[:offsetCodeCount] + b := w.offsetEncoding.codes + b = b[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + + a = t.extraHist[:literalCount-256] + b = w.literalEncoding.codes[256:literalCount] + b = b[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + + a = t.litHist[:256] + b = w.literalEncoding.codes[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + return true +} + +func (w *huffmanBitWriter) flush() { + if w.err != nil { + w.nbits = 0 + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + n := w.nbytes + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + if w.nbits > 8 { // Avoid underflow + w.nbits -= 8 + } else { + w.nbits = 0 + } + n++ + } + w.bits = 0 + w.write(w.bytes[:n]) + w.nbytes = 0 +} + +func (w *huffmanBitWriter) write(b []byte) { + if w.err != nil { + return + } + _, w.err = w.writer.Write(b) +} + +func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { + w.bits |= uint64(b) << (w.nbits & 63) + w.nbits += nb + if w.nbits >= 48 { + w.writeOutBits() + } +} + +func (w *huffmanBitWriter) writeBytes(bytes []byte) { + if w.err != nil { + return + } + n := w.nbytes + if w.nbits&7 != 0 { + w.err = InternalError("writeBytes with unfinished bits") + return + } + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + w.nbits -= 8 + n++ + } + if n != 0 { + w.write(w.bytes[:n]) + } + w.nbytes = 0 + w.write(bytes) +} + +// RFC 1951 3.2.7 specifies a special run-length encoding for specifying +// the literal and offset lengths arrays (which are concatenated into a single +// array). This method generates that run-length encoding. +// +// The result is written into the codegen array, and the frequencies +// of each code is written into the codegenFreq array. +// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional +// information. Code badCode is an end marker +// +// numLiterals The number of literals in literalEncoding +// numOffsets The number of offsets in offsetEncoding +// litenc, offenc The literal and offset encoder to use +func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { + for i := range w.codegenFreq { + w.codegenFreq[i] = 0 + } + // Note that we are using codegen both as a temporary variable for holding + // a copy of the frequencies, and as the place where we put the result. + // This is fine because the output is always shorter than the input used + // so far. + codegen := w.codegen[:] // cache + // Copy the concatenated code sizes to codegen. Put a marker at the end. + cgnl := codegen[:numLiterals] + for i := range cgnl { + cgnl[i] = litEnc.codes[i].len() + } + + cgnl = codegen[numLiterals : numLiterals+numOffsets] + for i := range cgnl { + cgnl[i] = offEnc.codes[i].len() + } + codegen[numLiterals+numOffsets] = badCode + + size := codegen[0] + count := 1 + outIndex := 0 + for inIndex := 1; size != badCode; inIndex++ { + // INVARIANT: We have seen "count" copies of size that have not yet + // had output generated for them. + nextSize := codegen[inIndex] + if nextSize == size { + count++ + continue + } + // We need to generate codegen indicating "count" of size. + if size != 0 { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + count-- + for count >= 3 { + n := 6 + if n > count { + n = count + } + codegen[outIndex] = 16 + outIndex++ + codegen[outIndex] = uint8(n - 3) + outIndex++ + w.codegenFreq[16]++ + count -= n + } + } else { + for count >= 11 { + n := 138 + if n > count { + n = count + } + codegen[outIndex] = 18 + outIndex++ + codegen[outIndex] = uint8(n - 11) + outIndex++ + w.codegenFreq[18]++ + count -= n + } + if count >= 3 { + // count >= 3 && count <= 10 + codegen[outIndex] = 17 + outIndex++ + codegen[outIndex] = uint8(count - 3) + outIndex++ + w.codegenFreq[17]++ + count = 0 + } + } + count-- + for ; count >= 0; count-- { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + } + // Set up invariant for next time through the loop. + size = nextSize + count = 1 + } + // Marker indicating the end of the codegen. + codegen[outIndex] = badCode +} + +func (w *huffmanBitWriter) codegens() int { + numCodegens := len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + return numCodegens +} + +func (w *huffmanBitWriter) headerSize() (size, numCodegens int) { + numCodegens = len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + return 3 + 5 + 5 + 4 + (3 * numCodegens) + + w.codegenEncoding.bitLength(w.codegenFreq[:]) + + int(w.codegenFreq[16])*2 + + int(w.codegenFreq[17])*3 + + int(w.codegenFreq[18])*7, numCodegens +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) { + size = litEnc.bitLength(w.literalFreq[:]) + + offEnc.bitLength(w.offsetFreq[:]) + return size +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { + header, numCodegens := w.headerSize() + size = header + + litEnc.bitLength(w.literalFreq[:]) + + offEnc.bitLength(w.offsetFreq[:]) + + extraBits + return size, numCodegens +} + +// extraBitSize will return the number of bits that will be written +// as "extra" bits on matches. +func (w *huffmanBitWriter) extraBitSize() int { + total := 0 + for i, n := range w.literalFreq[257:literalCount] { + total += int(n) * int(lengthExtraBits[i&31]) + } + for i, n := range w.offsetFreq[:offsetCodeCount] { + total += int(n) * int(offsetExtraBits[i&31]) + } + return total +} + +// fixedSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) fixedSize(extraBits int) int { + return 3 + + fixedLiteralEncoding.bitLength(w.literalFreq[:]) + + fixedOffsetEncoding.bitLength(w.offsetFreq[:]) + + extraBits +} + +// storedSize calculates the stored size, including header. +// The function returns the size in bits and whether the block +// fits inside a single block. +func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { + if in == nil { + return 0, false + } + if len(in) <= maxStoreBlockSize { + return (len(in) + 5) * 8, true + } + return 0, false +} + +func (w *huffmanBitWriter) writeCode(c hcode) { + // The function does not get inlined if we "& 63" the shift. + w.bits |= c.code64() << (w.nbits & 63) + w.nbits += c.len() + if w.nbits >= 48 { + w.writeOutBits() + } +} + +// writeOutBits will write bits to the buffer. +func (w *huffmanBitWriter) writeOutBits() { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + + // We over-write, but faster... + binary.LittleEndian.PutUint64(w.bytes[n:], bits) + n += 6 + + if n >= bufferFlushSize { + if w.err != nil { + n = 0 + return + } + w.write(w.bytes[:n]) + n = 0 + } + + w.nbytes = n +} + +// Write the header of a dynamic Huffman block to the output stream. +// +// numLiterals The number of literals specified in codegen +// numOffsets The number of offsets specified in codegen +// numCodegens The number of codegens used in codegen +func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { + if w.err != nil { + return + } + var firstBits int32 = 4 + if isEof { + firstBits = 5 + } + w.writeBits(firstBits, 3) + w.writeBits(int32(numLiterals-257), 5) + w.writeBits(int32(numOffsets-1), 5) + w.writeBits(int32(numCodegens-4), 4) + + for i := 0; i < numCodegens; i++ { + value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) + w.writeBits(int32(value), 3) + } + + i := 0 + for { + var codeWord = uint32(w.codegen[i]) + i++ + if codeWord == badCode { + break + } + w.writeCode(w.codegenEncoding.codes[codeWord]) + + switch codeWord { + case 16: + w.writeBits(int32(w.codegen[i]), 2) + i++ + case 17: + w.writeBits(int32(w.codegen[i]), 3) + i++ + case 18: + w.writeBits(int32(w.codegen[i]), 7) + i++ + } + } +} + +// writeStoredHeader will write a stored header. +// If the stored block is only used for EOF, +// it is replaced with a fixed huffman block. +func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { + if w.err != nil { + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes. + if length == 0 && isEof { + w.writeFixedHeader(isEof) + // EOB: 7 bits, value: 0 + w.writeBits(0, 7) + w.flush() + return + } + + var flag int32 + if isEof { + flag = 1 + } + w.writeBits(flag, 3) + w.flush() + w.writeBits(int32(length), 16) + w.writeBits(int32(^uint16(length)), 16) +} + +func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { + if w.err != nil { + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + // Indicate that we are a fixed Huffman block + var value int32 = 2 + if isEof { + value = 3 + } + w.writeBits(value, 3) +} + +// writeBlock will write a block of tokens with the smallest encoding. +// The original input can be supplied, and if the huffman encoded data +// is larger than the original bytes, the data will be written as a +// stored block. +// If the input is nil, the tokens will always be Huffman encoded. +func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { + if w.err != nil { + return + } + + tokens.AddEOB() + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + numLiterals, numOffsets := w.indexTokens(tokens, false) + w.generate() + var extraBits int + storedSize, storable := w.storedSize(input) + if storable { + extraBits = w.extraBitSize() + } + + // Figure out smallest code. + // Fixed Huffman baseline. + var literalEncoding = fixedLiteralEncoding + var offsetEncoding = fixedOffsetEncoding + var size = math.MaxInt32 + if tokens.n < maxPredefinedTokens { + size = w.fixedSize(extraBits) + } + + // Dynamic Huffman? + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + if dynamicSize < size { + size = dynamicSize + literalEncoding = w.literalEncoding + offsetEncoding = w.offsetEncoding + } + + // Stored bytes? + if storable && storedSize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Huffman. + if literalEncoding == fixedLiteralEncoding { + w.writeFixedHeader(eof) + } else { + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + } + + // Write the tokens. + w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes) +} + +// writeBlockDynamic encodes a block using a dynamic Huffman table. +// This should be used if the symbols used have a disproportionate +// histogram distribution. +// If input is supplied and the compression savings are below 1/16th of the +// input size the block is stored. +func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) { + if w.err != nil { + return + } + + sync = sync || eof + if sync { + tokens.AddEOB() + } + + // We cannot reuse pure huffman table, and must mark as EOF. + if (w.lastHuffMan || eof) && w.lastHeader > 0 { + // We will not try to reuse. + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + w.lastHuffMan = false + } + + // fillReuse enables filling of empty values. + // This will make encodings always reusable without testing. + // However, this does not appear to benefit on most cases. + const fillReuse = false + + // Check if we can reuse... + if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) { + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + numLiterals, numOffsets := w.indexTokens(tokens, !sync) + extraBits := 0 + ssize, storable := w.storedSize(input) + + const usePrefs = true + if storable || w.lastHeader > 0 { + extraBits = w.extraBitSize() + } + + var size int + + // Check if we should reuse. + if w.lastHeader > 0 { + // Estimate size for using a new table. + // Use the previous header size as the best estimate. + newSize := w.lastHeader + tokens.EstimatedBits() + newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty + + // The estimated size is calculated as an optimal table. + // We add a penalty to make it more realistic and re-use a bit more. + reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits + + // Check if a new table is better. + if newSize < reuseSize { + // Write the EOB we owe. + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + size = newSize + w.lastHeader = 0 + } else { + size = reuseSize + } + + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } + + // We want a new block/table + if w.lastHeader == 0 { + if fillReuse && !sync { + w.fillTokens() + numLiterals, numOffsets = maxNumLit, maxNumDist + } else { + w.literalFreq[endBlockMarker] = 1 + } + + w.generate() + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + + var numCodegens int + if fillReuse && !sync { + // Reindex for accurate size... + w.indexTokens(tokens, true) + } + size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + // Store predefined, if we don't get a reasonable improvement. + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { + // Store bytes, if we don't get an improvement. + if storable && ssize <= preSize { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } + + if storable && ssize <= size { + // Store bytes, if we don't get an improvement. + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Write Huffman table. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + if !sync { + w.lastHeader, _ = w.headerSize() + } + w.lastHuffMan = false + } + + if sync { + w.lastHeader = 0 + } + // Write the tokens. + w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) +} + +func (w *huffmanBitWriter) fillTokens() { + for i, v := range w.literalFreq[:literalCount] { + if v == 0 { + w.literalFreq[i] = 1 + } + } + for i, v := range w.offsetFreq[:offsetCodeCount] { + if v == 0 { + w.offsetFreq[i] = 1 + } + } +} + +// indexTokens indexes a slice of tokens, and updates +// literalFreq and offsetFreq, and generates literalEncoding +// and offsetEncoding. +// The number of literal and offset tokens is returned. +func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { + //copy(w.literalFreq[:], t.litHist[:]) + *(*[256]uint16)(w.literalFreq[:]) = t.litHist + //copy(w.literalFreq[256:], t.extraHist[:]) + *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist + w.offsetFreq = t.offHist + + if t.n == 0 { + return + } + if filled { + return maxNumLit, maxNumDist + } + // get the number of literals + numLiterals = len(w.literalFreq) + for w.literalFreq[numLiterals-1] == 0 { + numLiterals-- + } + // get the number of offsets + numOffsets = len(w.offsetFreq) + for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { + numOffsets-- + } + if numOffsets == 0 { + // We haven't found a single match. If we want to go with the dynamic encoding, + // we should count at least one offset to be sure that the offset huffman tree could be encoded. + w.offsetFreq[0] = 1 + numOffsets = 1 + } + return +} + +func (w *huffmanBitWriter) generate() { + w.literalEncoding.generate(w.literalFreq[:literalCount], 15) + w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) +} + +// writeTokens writes a slice of tokens to the output. +// codes for literal and offset encoding must be supplied. +func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { + if w.err != nil { + return + } + if len(tokens) == 0 { + return + } + + // Only last token should be endBlockMarker. + var deferEOB bool + if tokens[len(tokens)-1] == endBlockMarker { + tokens = tokens[:len(tokens)-1] + deferEOB = true + } + + // Create slices up to the next power of two to avoid bounds checks. + lits := leCodes[:256] + offs := oeCodes[:32] + lengths := leCodes[lengthCodesStart:] + lengths = lengths[:32] + + // Go 1.16 LOVES having these on stack. + bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + + for _, t := range tokens { + if t < 256 { + //w.writeCode(lits[t.literal()]) + c := lits[t] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + continue + } + + // Write the length + length := t.length() + lengthCode := lengthCode(length) & 31 + if false { + w.writeCode(lengths[lengthCode]) + } else { + // inlined + c := lengths[lengthCode] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + + if lengthCode >= lengthExtraBitsMinCode { + extraLengthBits := lengthExtraBits[lengthCode] + //w.writeBits(extraLength, extraLengthBits) + extraLength := int32(length - lengthBase[lengthCode]) + bits |= uint64(extraLength) << (nbits & 63) + nbits += extraLengthBits + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + // Write the offset + offset := t.offset() + offsetCode := (offset >> 16) & 31 + if false { + w.writeCode(offs[offsetCode]) + } else { + // inlined + c := offs[offsetCode] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + + if offsetCode >= offsetExtraBitsMinCode { + offsetComb := offsetCombined[offsetCode] + //w.writeBits(extraOffset, extraOffsetBits) + bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) + nbits += uint8(offsetComb) + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + } + // Restore... + w.bits, w.nbits, w.nbytes = bits, nbits, nbytes + + if deferEOB { + w.writeCode(leCodes[endBlockMarker]) + } +} + +// huffOffset is a static offset encoder used for huffman only encoding. +// It can be reused since we will not be encoding offset values. +var huffOffset *huffmanEncoder + +func init() { + w := newHuffmanBitWriter(nil) + w.offsetFreq[0] = 1 + huffOffset = newHuffmanEncoder(offsetCodeCount) + huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15) +} + +// writeBlockHuff encodes a block of bytes as either +// Huffman encoded literals or uncompressed bytes if the +// results only gains very little from compression. +func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { + if w.err != nil { + return + } + + // Clear histogram + for i := range w.literalFreq[:] { + w.literalFreq[i] = 0 + } + if !w.lastHuffMan { + for i := range w.offsetFreq[:] { + w.offsetFreq[i] = 0 + } + } + + const numLiterals = endBlockMarker + 1 + const numOffsets = 1 + + // Add everything as literals + // We have to estimate the header size. + // Assume header is around 70 bytes: + // https://stackoverflow.com/a/25454430 + const guessHeaderSizeBits = 70 * 8 + histogram(input, w.literalFreq[:numLiterals]) + ssize, storable := w.storedSize(input) + if storable && len(input) > 1024 { + // Quick check for incompressible content. + abs := float64(0) + avg := float64(len(input)) / 256 + max := float64(len(input) * 2) + for _, v := range w.literalFreq[:256] { + diff := float64(v) - avg + abs += diff * diff + if abs > max { + break + } + } + if abs < max { + if debugDeflate { + fmt.Println("stored", abs, "<", max) + } + // No chance we can compress this... + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } + w.literalFreq[endBlockMarker] = 1 + w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) + estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals]) + if estBits < math.MaxInt32 { + estBits += w.lastHeader + if w.lastHeader == 0 { + estBits += guessHeaderSizeBits + } + estBits += estBits >> w.logNewTablePenalty + } + + // Store bytes, if we don't get a reasonable improvement. + if storable && ssize <= estBits { + if debugDeflate { + fmt.Println("stored,", ssize, "<=", estBits) + } + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + if w.lastHeader > 0 { + reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256]) + + if estBits < reuseSize { + if debugDeflate { + fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") + } + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } else if debugDeflate { + fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) + } + } + + count := 0 + if w.lastHeader == 0 { + // Use the temp encoding, so swap. + w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + numCodegens := w.codegens() + + // Huffman. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + w.lastHuffMan = true + w.lastHeader, _ = w.headerSize() + if debugDeflate { + count += w.lastHeader + fmt.Println("header:", count/8) + } + } + + encoding := w.literalEncoding.codes[:256] + // Go 1.16 LOVES having these on stack. At least 1.5x the speed. + bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + + if debugDeflate { + count -= int(nbytes)*8 + int(nbits) + } + // Unroll, write 3 codes/loop. + // Fastest number of unrolls. + for len(input) > 3 { + // We must have at least 48 bits free. + if nbits >= 8 { + n := nbits >> 3 + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + bits >>= (n * 8) & 63 + nbits -= n * 8 + nbytes += n + } + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + if debugDeflate { + count += int(nbytes) * 8 + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + a, b := encoding[input[0]], encoding[input[1]] + bits |= a.code64() << (nbits & 63) + bits |= b.code64() << ((nbits + a.len()) & 63) + c := encoding[input[2]] + nbits += b.len() + a.len() + bits |= c.code64() << (nbits & 63) + nbits += c.len() + input = input[3:] + } + + // Remaining... + for _, t := range input { + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + if debugDeflate { + count += int(nbytes) * 8 + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + // Bitwriting inlined, ~30% speedup + c := encoding[t] + bits |= c.code64() << (nbits & 63) + + nbits += c.len() + if debugDeflate { + count += int(c.len()) + } + } + // Restore... + w.bits, w.nbits, w.nbytes = bits, nbits, nbytes + + if debugDeflate { + nb := count + int(nbytes)*8 + int(nbits) + fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") + } + // Flush if needed to have space. + if w.nbits >= 48 { + w.writeOutBits() + } + + if eof || sync { + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + w.lastHuffMan = false + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go new file mode 100644 index 0000000..be7b58b --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -0,0 +1,417 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "math" + "math/bits" +) + +const ( + maxBitsLimit = 16 + // number of valid literals + literalCount = 286 +) + +// hcode is a huffman code with a bit code and bit length. +type hcode uint32 + +func (h hcode) len() uint8 { + return uint8(h) +} + +func (h hcode) code64() uint64 { + return uint64(h >> 8) +} + +func (h hcode) zero() bool { + return h == 0 +} + +type huffmanEncoder struct { + codes []hcode + bitCount [17]int32 + + // Allocate a reusable buffer with the longest possible frequency table. + // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. + // The largest of these is literalCount, so we allocate for that case. + freqcache [literalCount + 1]literalNode +} + +type literalNode struct { + literal uint16 + freq uint16 +} + +// A levelInfo describes the state of the constructed tree for a given depth. +type levelInfo struct { + // Our level. for better printing + level int32 + + // The frequency of the last node at this level + lastFreq int32 + + // The frequency of the next character to add to this level + nextCharFreq int32 + + // The frequency of the next pair (from level below) to add to this level. + // Only valid if the "needed" value of the next lower level is 0. + nextPairFreq int32 + + // The number of chains remaining to generate for this level before moving + // up to the next level + needed int32 +} + +// set sets the code and length of an hcode. +func (h *hcode) set(code uint16, length uint8) { + *h = hcode(length) | (hcode(code) << 8) +} + +func newhcode(code uint16, length uint8) hcode { + return hcode(length) | (hcode(code) << 8) +} + +func reverseBits(number uint16, bitLength byte) uint16 { + return bits.Reverse16(number << ((16 - bitLength) & 15)) +} + +func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} } + +func newHuffmanEncoder(size int) *huffmanEncoder { + // Make capacity to next power of two. + c := uint(bits.Len32(uint32(size - 1))) + return &huffmanEncoder{codes: make([]hcode, size, 1<= 3 +// The cases of 0, 1, and 2 literals are handled by special case code. +// +// list An array of the literals with non-zero frequencies +// +// and their associated frequencies. The array is in order of increasing +// frequency, and has as its last element a special element with frequency +// MaxInt32 +// +// maxBits The maximum number of bits that should be used to encode any literal. +// +// Must be less than 16. +// +// return An integer array in which array[i] indicates the number of literals +// +// that should be encoded in i bits. +func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { + if maxBits >= maxBitsLimit { + panic("flate: maxBits too large") + } + n := int32(len(list)) + list = list[0 : n+1] + list[n] = maxNode() + + // The tree can't have greater depth than n - 1, no matter what. This + // saves a little bit of work in some small cases + if maxBits > n-1 { + maxBits = n - 1 + } + + // Create information about each of the levels. + // A bogus "Level 0" whose sole purpose is so that + // level1.prev.needed==0. This makes level1.nextPairFreq + // be a legitimate value that never gets chosen. + var levels [maxBitsLimit]levelInfo + // leafCounts[i] counts the number of literals at the left + // of ancestors of the rightmost node at level i. + // leafCounts[i][j] is the number of literals at the left + // of the level j ancestor. + var leafCounts [maxBitsLimit][maxBitsLimit]int32 + + // Descending to only have 1 bounds check. + l2f := int32(list[2].freq) + l1f := int32(list[1].freq) + l0f := int32(list[0].freq) + int32(list[1].freq) + + for level := int32(1); level <= maxBits; level++ { + // For every level, the first two items are the first two characters. + // We initialize the levels as if we had already figured this out. + levels[level] = levelInfo{ + level: level, + lastFreq: l1f, + nextCharFreq: l2f, + nextPairFreq: l0f, + } + leafCounts[level][level] = 2 + if level == 1 { + levels[level].nextPairFreq = math.MaxInt32 + } + } + + // We need a total of 2*n - 2 items at top level and have already generated 2. + levels[maxBits].needed = 2*n - 4 + + level := uint32(maxBits) + for level < 16 { + l := &levels[level] + if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { + // We've run out of both leafs and pairs. + // End all calculations for this level. + // To make sure we never come back to this level or any lower level, + // set nextPairFreq impossibly large. + l.needed = 0 + levels[level+1].nextPairFreq = math.MaxInt32 + level++ + continue + } + + prevFreq := l.lastFreq + if l.nextCharFreq < l.nextPairFreq { + // The next item on this row is a leaf node. + n := leafCounts[level][level] + 1 + l.lastFreq = l.nextCharFreq + // Lower leafCounts are the same of the previous node. + leafCounts[level][level] = n + e := list[n] + if e.literal < math.MaxUint16 { + l.nextCharFreq = int32(e.freq) + } else { + l.nextCharFreq = math.MaxInt32 + } + } else { + // The next item on this row is a pair from the previous row. + // nextPairFreq isn't valid until we generate two + // more values in the level below + l.lastFreq = l.nextPairFreq + // Take leaf counts from the lower level, except counts[level] remains the same. + if true { + save := leafCounts[level][level] + leafCounts[level] = leafCounts[level-1] + leafCounts[level][level] = save + } else { + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + } + levels[l.level-1].needed = 2 + } + + if l.needed--; l.needed == 0 { + // We've done everything we need to do for this level. + // Continue calculating one level up. Fill in nextPairFreq + // of that level with the sum of the two nodes we've just calculated on + // this level. + if l.level == maxBits { + // All done! + break + } + levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq + level++ + } else { + // If we stole from below, move down temporarily to replenish it. + for levels[level-1].needed > 0 { + level-- + } + } + } + + // Somethings is wrong if at the end, the top level is null or hasn't used + // all of the leaves. + if leafCounts[maxBits][maxBits] != n { + panic("leafCounts[maxBits][maxBits] != n") + } + + bitCount := h.bitCount[:maxBits+1] + bits := 1 + counts := &leafCounts[maxBits] + for level := maxBits; level > 0; level-- { + // chain.leafCount gives the number of literals requiring at least "bits" + // bits to encode. + bitCount[bits] = counts[level] - counts[level-1] + bits++ + } + return bitCount +} + +// Look at the leaves and assign them a bit count and an encoding as specified +// in RFC 1951 3.2.2 +func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { + code := uint16(0) + for n, bits := range bitCount { + code <<= 1 + if n == 0 || bits == 0 { + continue + } + // The literals list[len(list)-bits] .. list[len(list)-bits] + // are encoded using "bits" bits, and get the values + // code, code + 1, .... The code values are + // assigned in literal order (not frequency order). + chunk := list[len(list)-int(bits):] + + sortByLiteral(chunk) + for _, node := range chunk { + h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n)) + code++ + } + list = list[0 : len(list)-int(bits)] + } +} + +// Update this Huffman Code object to be the minimum code for the specified frequency count. +// +// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. +// maxBits The maximum number of bits to use for any literal. +func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { + list := h.freqcache[:len(freq)+1] + codes := h.codes[:len(freq)] + // Number of non-zero literals + count := 0 + // Set list to be the set of all non-zero literals and their frequencies + for i, f := range freq { + if f != 0 { + list[count] = literalNode{uint16(i), f} + count++ + } else { + codes[i] = 0 + } + } + list[count] = literalNode{} + + list = list[:count] + if count <= 2 { + // Handle the small cases here, because they are awkward for the general case code. With + // two or fewer literals, everything has bit length 1. + for i, node := range list { + // "list" is in order of increasing literal value. + h.codes[node.literal].set(uint16(i), 1) + } + return + } + sortByFreq(list) + + // Get the number of literals for each bit count + bitCount := h.bitCounts(list, maxBits) + // And do the assignment + h.assignEncodingAndSize(bitCount, list) +} + +// atLeastOne clamps the result between 1 and 15. +func atLeastOne(v float32) float32 { + if v < 1 { + return 1 + } + if v > 15 { + return 15 + } + return v +} + +func histogram(b []byte, h []uint16) { + if true && len(b) >= 8<<10 { + // Split for bigger inputs + histogramSplit(b, h) + } else { + h = h[:256] + for _, t := range b { + h[t]++ + } + } +} + +func histogramSplit(b []byte, h []uint16) { + // Tested, and slightly faster than 2-way. + // Writing to separate arrays and combining is also slightly slower. + h = h[:256] + for len(b)&3 != 0 { + h[b[0]]++ + b = b[1:] + } + n := len(b) / 4 + x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:] + y, z, w = y[:len(x)], z[:len(x)], w[:len(x)] + for i, t := range x { + v0 := &h[t] + v1 := &h[y[i]] + v3 := &h[w[i]] + v2 := &h[z[i]] + *v0++ + *v1++ + *v2++ + *v3++ + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go new file mode 100644 index 0000000..6c05ba8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go @@ -0,0 +1,159 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// Sort sorts data. +// It makes one call to data.Len to determine n, and O(n*log(n)) calls to +// data.Less and data.Swap. The sort is not guaranteed to be stable. +func sortByFreq(data []literalNode) { + n := len(data) + quickSortByFreq(data, 0, n, maxDepth(n)) +} + +func quickSortByFreq(data []literalNode, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSort(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivotByFreq(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSortByFreq(data, a, mlo, maxDepth) + a = mhi // i.e., quickSortByFreq(data, mhi, b) + } else { + quickSortByFreq(data, mhi, b, maxDepth) + b = mlo // i.e., quickSortByFreq(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSortByFreq(data, a, b) + } +} + +func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's ``Ninther,'' median of three medians of three. + s := (hi - lo) / 8 + medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s) + medianOfThreeSortByFreq(data, m, m-s, m+s) + medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThreeSortByFreq(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { + } + b := a + for { + for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot + } + for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot + } + for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +// Insertion sort +func insertionSortByFreq(data []literalNode, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// quickSortByFreq, loosely following Bentley and McIlroy, +// ``Engineering a Sort Function,'' SP&E November 1993. + +// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) { + // sort 3 elements + if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go new file mode 100644 index 0000000..93f1aea --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go @@ -0,0 +1,201 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// Sort sorts data. +// It makes one call to data.Len to determine n, and O(n*log(n)) calls to +// data.Less and data.Swap. The sort is not guaranteed to be stable. +func sortByLiteral(data []literalNode) { + n := len(data) + quickSort(data, 0, n, maxDepth(n)) +} + +func quickSort(data []literalNode, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSort(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivot(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSort(data, a, mlo, maxDepth) + a = mhi // i.e., quickSort(data, mhi, b) + } else { + quickSort(data, mhi, b, maxDepth) + b = mlo // i.e., quickSort(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i].literal < data[i-6].literal { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSort(data, a, b) + } +} +func heapSort(data []literalNode, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDown(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDown(data, lo, i, first) + } +} + +// siftDown implements the heap property on data[lo, hi). +// first is an offset into the array where the root of the heap lies. +func siftDown(data []literalNode, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && data[first+child].literal < data[first+child+1].literal { + child++ + } + if data[first+root].literal > data[first+child].literal { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} +func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's ``Ninther,'' median of three medians of three. + s := (hi - lo) / 8 + medianOfThree(data, lo, lo+s, lo+2*s) + medianOfThree(data, m, m-s, m+s) + medianOfThree(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThree(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && data[a].literal < data[pivot].literal; a++ { + } + b := a + for { + for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot + } + for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if data[m].literal > data[pivot].literal { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot + } + for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +// Insertion sort +func insertionSort(data []literalNode, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && data[j].literal < data[j-1].literal; j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// maxDepth returns a threshold at which quicksort should switch +// to heapsort. It returns 2*ceil(lg(n+1)). +func maxDepth(n int) int { + var depth int + for i := n; i > 0; i >>= 1 { + depth++ + } + return depth * 2 +} + +// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThree(data []literalNode, m1, m0, m2 int) { + // sort 3 elements + if data[m1].literal < data[m0].literal { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2].literal < data[m1].literal { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1].literal < data[m0].literal { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go new file mode 100644 index 0000000..2f410d6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -0,0 +1,829 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flate implements the DEFLATE compressed data format, described in +// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file +// formats. +package flate + +import ( + "bufio" + "compress/flate" + "fmt" + "io" + "math/bits" + "sync" +) + +const ( + maxCodeLen = 16 // max length of Huffman code + maxCodeLenMask = 15 // mask for max length of Huffman code + // The next three numbers come from the RFC section 3.2.7, with the + // additional proviso in section 3.2.5 which implies that distance codes + // 30 and 31 should never occur in compressed data. + maxNumLit = 286 + maxNumDist = 30 + numCodes = 19 // number of codes in Huffman meta-code + + debugDecode = false +) + +// Value of length - 3 and extra bits. +type lengthExtra struct { + length, extra uint8 +} + +var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// Initialize the fixedHuffmanDecoder only once upon first use. +var fixedOnce sync.Once +var fixedHuffmanDecoder huffmanDecoder + +// A CorruptInputError reports the presence of corrupt input at a given offset. +type CorruptInputError = flate.CorruptInputError + +// An InternalError reports an error in the flate code itself. +type InternalError string + +func (e InternalError) Error() string { return "flate: internal error: " + string(e) } + +// A ReadError reports an error encountered while reading input. +// +// Deprecated: No longer returned. +type ReadError = flate.ReadError + +// A WriteError reports an error encountered while writing output. +// +// Deprecated: No longer returned. +type WriteError = flate.WriteError + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// The data structure for decoding Huffman tables is based on that of +// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), +// For codes smaller than the table width, there are multiple entries +// (each combination of trailing bits has the same value). For codes +// larger than the table width, the table contains a link to an overflow +// table. The width of each entry in the link table is the maximum code +// size minus the chunk width. +// +// Note that you can do a lookup in the table even without all bits +// filled. Since the extra bits are zero, and the DEFLATE Huffman codes +// have the property that shorter codes come before longer ones, the +// bit length estimate in the result is a lower bound on the actual +// number of bits. +// +// See the following: +// http://www.gzip.org/algorithm.txt + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + maxRead int // the maximum number of bits we can read and not overread + chunks *[huffmanNumChunks]uint16 // chunks as described above + links [][]uint16 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(lengths []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.chunks == nil { + h.chunks = new([huffmanNumChunks]uint16) + } + + if h.maxRead != 0 { + *h = huffmanDecoder{chunks: h.chunks, links: h.links} + } + + // Count number of codes of each length, + // compute maxRead and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range lengths { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n&maxCodeLenMask]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i&maxCodeLenMask] = code + code += count[i&maxCodeLenMask] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + if cap(h.links) < huffmanNumChunks-link { + h.links = make([][]uint16, huffmanNumChunks-link) + } else { + h.links = h.links[:huffmanNumChunks-link] + } + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(bits.Reverse16(uint16(j))) + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint16(off<>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +// Reader is the actual read interface needed by NewReader. +// If the passed in io.Reader does not also have ReadByte, +// the NewReader will introduce its own buffering. +type Reader interface { + io.Reader + io.ByteReader +} + +type step uint8 + +const ( + copyData step = iota + 1 + nextBlock + huffmanBytesBuffer + huffmanBytesReader + huffmanBufioReader + huffmanStringsReader + huffmanGenericReader +) + +// Decompress state. +type decompressor struct { + // Input source. + r Reader + roffset int64 + + // Huffman decoders for literal/length, distance. + h1, h2 huffmanDecoder + + // Length arrays used to define Huffman codes. + bits *[maxNumLit + maxNumDist]int + codebits *[numCodes]int + + // Output history, buffer. + dict dictDecoder + + // Next step in the decompression, + // and decompression state. + step step + stepState int + err error + toRead []byte + hl, hd *huffmanDecoder + copyLen int + copyDist int + + // Temporary buffer (avoids repeated allocation). + buf [4]byte + + // Input bits, in top of b. + b uint32 + + nb uint + final bool +} + +func (f *decompressor) nextBlock() { + for f.nb < 1+2 { + if f.err = f.moreBits(); f.err != nil { + return + } + } + f.final = f.b&1 == 1 + f.b >>= 1 + typ := f.b & 3 + f.b >>= 2 + f.nb -= 1 + 2 + switch typ { + case 0: + f.dataBlock() + if debugDecode { + fmt.Println("stored block") + } + case 1: + // compressed, fixed Huffman tables + f.hl = &fixedHuffmanDecoder + f.hd = nil + f.huffmanBlockDecoder() + if debugDecode { + fmt.Println("predefinied huffman block") + } + case 2: + // compressed, dynamic Huffman tables + if f.err = f.readHuffman(); f.err != nil { + break + } + f.hl = &f.h1 + f.hd = &f.h2 + f.huffmanBlockDecoder() + if debugDecode { + fmt.Println("dynamic huffman block") + } + default: + // 3 is reserved. + if debugDecode { + fmt.Println("reserved data block encountered") + } + f.err = CorruptInputError(f.roffset) + } +} + +func (f *decompressor) Read(b []byte) (int, error) { + for { + if len(f.toRead) > 0 { + n := copy(b, f.toRead) + f.toRead = f.toRead[n:] + if len(f.toRead) == 0 { + return n, f.err + } + return n, nil + } + if f.err != nil { + return 0, f.err + } + + f.doStep() + + if f.err != nil && len(f.toRead) == 0 { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + } + } +} + +// WriteTo implements the io.WriteTo interface for io.Copy and friends. +func (f *decompressor) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + flushed := false + for { + if len(f.toRead) > 0 { + n, err := w.Write(f.toRead) + total += int64(n) + if err != nil { + f.err = err + return total, err + } + if n != len(f.toRead) { + return total, io.ErrShortWrite + } + f.toRead = f.toRead[:0] + } + if f.err != nil && flushed { + if f.err == io.EOF { + return total, nil + } + return total, f.err + } + if f.err == nil { + f.doStep() + } + if len(f.toRead) == 0 && f.err != nil && !flushed { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + flushed = true + } + } +} + +func (f *decompressor) Close() error { + if f.err == io.EOF { + return nil + } + return f.err +} + +// RFC 1951 section 3.2.7. +// Compression with dynamic Huffman codes + +var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +func (f *decompressor) readHuffman() error { + // HLIT[5], HDIST[5], HCLEN[4]. + for f.nb < 5+5+4 { + if err := f.moreBits(); err != nil { + return err + } + } + nlit := int(f.b&0x1F) + 257 + if nlit > maxNumLit { + if debugDecode { + fmt.Println("nlit > maxNumLit", nlit) + } + return CorruptInputError(f.roffset) + } + f.b >>= 5 + ndist := int(f.b&0x1F) + 1 + if ndist > maxNumDist { + if debugDecode { + fmt.Println("ndist > maxNumDist", ndist) + } + return CorruptInputError(f.roffset) + } + f.b >>= 5 + nclen := int(f.b&0xF) + 4 + // numCodes is 19, so nclen is always valid. + f.b >>= 4 + f.nb -= 5 + 5 + 4 + + // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. + for i := 0; i < nclen; i++ { + for f.nb < 3 { + if err := f.moreBits(); err != nil { + return err + } + } + f.codebits[codeOrder[i]] = int(f.b & 0x7) + f.b >>= 3 + f.nb -= 3 + } + for i := nclen; i < len(codeOrder); i++ { + f.codebits[codeOrder[i]] = 0 + } + if !f.h1.init(f.codebits[0:]) { + if debugDecode { + fmt.Println("init codebits failed") + } + return CorruptInputError(f.roffset) + } + + // HLIT + 257 code lengths, HDIST + 1 code lengths, + // using the code length Huffman code. + for i, n := 0, nlit+ndist; i < n; { + x, err := f.huffSym(&f.h1) + if err != nil { + return err + } + if x < 16 { + // Actual length. + f.bits[i] = x + i++ + continue + } + // Repeat previous length or zero. + var rep int + var nb uint + var b int + switch x { + default: + return InternalError("unexpected length code") + case 16: + rep = 3 + nb = 2 + if i == 0 { + if debugDecode { + fmt.Println("i==0") + } + return CorruptInputError(f.roffset) + } + b = f.bits[i-1] + case 17: + rep = 3 + nb = 3 + b = 0 + case 18: + rep = 11 + nb = 7 + b = 0 + } + for f.nb < nb { + if err := f.moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits:", err) + } + return err + } + } + rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1)) + f.b >>= nb & regSizeMaskUint32 + f.nb -= nb + if i+rep > n { + if debugDecode { + fmt.Println("i+rep > n", i, rep, n) + } + return CorruptInputError(f.roffset) + } + for j := 0; j < rep; j++ { + f.bits[i] = b + i++ + } + } + + if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { + if debugDecode { + fmt.Println("init2 failed") + } + return CorruptInputError(f.roffset) + } + + // As an optimization, we can initialize the maxRead bits to read at a time + // for the HLIT tree to the length of the EOB marker since we know that + // every block must terminate with one. This preserves the property that + // we never read any extra bytes after the end of the DEFLATE stream. + if f.h1.maxRead < f.bits[endBlockMarker] { + f.h1.maxRead = f.bits[endBlockMarker] + } + if !f.final { + // If not the final block, the smallest block possible is + // a predefined table, BTYPE=01, with a single EOB marker. + // This will take up 3 + 7 bits. + f.h1.maxRead += 10 + } + + return nil +} + +// Copy a single uncompressed data block from input to output. +func (f *decompressor) dataBlock() { + // Uncompressed. + // Discard current half-byte. + left := (f.nb) & 7 + f.nb -= left + f.b >>= left + + offBytes := f.nb >> 3 + // Unfilled values will be overwritten. + f.buf[0] = uint8(f.b) + f.buf[1] = uint8(f.b >> 8) + f.buf[2] = uint8(f.b >> 16) + f.buf[3] = uint8(f.b >> 24) + + f.roffset += int64(offBytes) + f.nb, f.b = 0, 0 + + // Length then ones-complement of length. + nr, err := io.ReadFull(f.r, f.buf[offBytes:4]) + f.roffset += int64(nr) + if err != nil { + f.err = noEOF(err) + return + } + n := uint16(f.buf[0]) | uint16(f.buf[1])<<8 + nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8 + if nn != ^n { + if debugDecode { + ncomp := ^n + fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp) + } + f.err = CorruptInputError(f.roffset) + return + } + + if n == 0 { + f.toRead = f.dict.readFlush() + f.finishBlock() + return + } + + f.copyLen = int(n) + f.copyData() +} + +// copyData copies f.copyLen bytes from the underlying reader into f.hist. +// It pauses for reads when f.hist is full. +func (f *decompressor) copyData() { + buf := f.dict.writeSlice() + if len(buf) > f.copyLen { + buf = buf[:f.copyLen] + } + + cnt, err := io.ReadFull(f.r, buf) + f.roffset += int64(cnt) + f.copyLen -= cnt + f.dict.writeMark(cnt) + if err != nil { + f.err = noEOF(err) + return + } + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = copyData + return + } + f.finishBlock() +} + +func (f *decompressor) finishBlock() { + if f.final { + if f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() + } + f.err = io.EOF + } + f.step = nextBlock +} + +func (f *decompressor) doStep() { + switch f.step { + case copyData: + f.copyData() + case nextBlock: + f.nextBlock() + case huffmanBytesBuffer: + f.huffmanBytesBuffer() + case huffmanBytesReader: + f.huffmanBytesReader() + case huffmanBufioReader: + f.huffmanBufioReader() + case huffmanStringsReader: + f.huffmanStringsReader() + case huffmanGenericReader: + f.huffmanGenericReader() + default: + panic("BUG: unexpected step state") + } +} + +// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF. +func noEOF(e error) error { + if e == io.EOF { + return io.ErrUnexpectedEOF + } + return e +} + +func (f *decompressor) moreBits() error { + c, err := f.r.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << (f.nb & regSizeMaskUint32) + f.nb += 8 + return nil +} + +// Read the next Huffman-encoded symbol from f according to h. +func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(h.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := f.r.ReadByte() + if err != nil { + f.b = b + f.nb = nb + return 0, noEOF(err) + } + f.roffset++ + b |= uint32(c) << (nb & regSizeMaskUint32) + nb += 8 + } + chunk := h.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return 0, f.err + } + f.b = b >> (n & regSizeMaskUint32) + f.nb = nb - n + return int(chunk >> huffmanValueShift), nil + } + } +} + +func makeReader(r io.Reader) Reader { + if rr, ok := r.(Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +func fixedHuffmanDecoderInit() { + fixedOnce.Do(func() { + // These come from the RFC section 3.2.6. + var bits [288]int + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + fixedHuffmanDecoder.init(bits[:]) + }) +} + +func (f *decompressor) Reset(r io.Reader, dict []byte) error { + *f = decompressor{ + r: makeReader(r), + bits: f.bits, + codebits: f.codebits, + h1: f.h1, + h2: f.h2, + dict: f.dict, + step: nextBlock, + } + f.dict.init(maxMatchOffset, dict) + return nil +} + +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = nextBlock + f.dict.init(maxMatchOffset, nil) + return &f +} + +// NewReaderDict is like NewReader but initializes the reader +// with a preset dictionary. The returned Reader behaves as if +// the uncompressed data stream started with the given dictionary, +// which has already been read. NewReaderDict is typically used +// to read data compressed by NewWriterDict. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = nextBlock + f.dict.init(maxMatchOffset, dict) + return &f +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go new file mode 100644 index 0000000..2b2f993 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -0,0 +1,1283 @@ +// Code generated by go generate gen_inflate.go. DO NOT EDIT. + +package flate + +import ( + "bufio" + "bytes" + "fmt" + "math/bits" + "strings" +) + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBytesBuffer() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bytes.Buffer) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanBytesBuffer + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanBytesBuffer // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBytesReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bytes.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanBytesReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanBytesReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBufioReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bufio.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanBufioReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanBufioReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanStringsReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*strings.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanStringsReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanStringsReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanGenericReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanGenericReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanGenericReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +func (f *decompressor) huffmanBlockDecoder() { + switch f.r.(type) { + case *bytes.Buffer: + f.huffmanBytesBuffer() + case *bytes.Reader: + f.huffmanBytesReader() + case *bufio.Reader: + f.huffmanBufioReader() + case *strings.Reader: + f.huffmanStringsReader() + case Reader: + f.huffmanGenericReader() + default: + f.huffmanGenericReader() + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go new file mode 100644 index 0000000..703b9a8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -0,0 +1,241 @@ +package flate + +import ( + "encoding/binary" + "fmt" + "math/bits" +) + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastEncL1 struct { + fastGen + table [tableSize]tableEntry +} + +// EncodeL1 uses a similar algorithm to level 1 +func (e *fastEncL1) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashBytes = 5 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + + for { + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, tableBits, hashBytes) + candidate = e.table[nextHash] + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + + now := load6432(src, nextS) + e.table[nextHash] = tableEntry{offset: s + e.cur} + nextHash = hashLen(now, tableBits, hashBytes) + + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + + // Do one right away... + cv = now + s = nextS + nextS++ + candidate = e.table[nextHash] + now >>= 8 + e.table[nextHash] = tableEntry{offset: s + e.cur} + + offset = s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + cv = now + s = nextS + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset - e.cur + var l = int32(4) + if false { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else { + // inlined: + a := src[s+4:] + b := src[t+4:] + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + l += int32(bits.TrailingZeros64(diff) >> 3) + break + } + l += 8 + a = a[8:] + b = b[8:] + } + if len(a) < 8 { + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + break + } + l++ + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + // Save the match found + if false { + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + } else { + // Inlined... + xoffset := uint32(s - t - baseMatchOffset) + xlength := l + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + dst.extraHist[lengthCodes1[uint8(xl)]]++ + dst.offHist[oc]++ + dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { + s = nextS + 1 + } + if s >= sLimit { + // Index first pair after match end. + if int(s+l+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-2) + o := e.cur + s - 2 + prevHash := hashLen(x, tableBits, hashBytes) + e.table[prevHash] = tableEntry{offset: o} + x >>= 16 + currHash := hashLen(x, tableBits, hashBytes) + candidate = e.table[currHash] + e.table[currHash] = tableEntry{offset: o + 2} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { + cv = x >> 8 + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go new file mode 100644 index 0000000..876dfbe --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -0,0 +1,214 @@ +package flate + +import "fmt" + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastEncL2 struct { + fastGen + table [bTableSize]tableEntry +} + +// EncodeL2 uses a similar algorithm to level 1, but is capable +// of matching across blocks giving better compression at a small slowdown. +func (e *fastEncL2) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashBytes = 5 + ) + + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + // When should we start skipping if we haven't found matches in a long while. + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, bTableBits, hashBytes) + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + candidate = e.table[nextHash] + now := load6432(src, nextS) + e.table[nextHash] = tableEntry{offset: s + e.cur} + nextHash = hashLen(now, bTableBits, hashBytes) + + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + + // Do one right away... + cv = now + s = nextS + nextS++ + candidate = e.table[nextHash] + now >>= 8 + e.table[nextHash] = tableEntry{offset: s + e.cur} + + offset = s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + break + } + cv = now + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index first pair after match end. + if int(s+l+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // Store every second hash in-between, but offset by 1. + for i := s - l + 2; i < s-5; i += 7 { + x := load6432(src, i) + nextHash := hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i} + // Skip one + x >>= 16 + nextHash = hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i + 2} + // Skip one + x >>= 16 + nextHash = hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i + 4} + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-2) + o := e.cur + s - 2 + prevHash := hashLen(x, bTableBits, hashBytes) + prevHash2 := hashLen(x>>8, bTableBits, hashBytes) + e.table[prevHash] = tableEntry{offset: o} + e.table[prevHash2] = tableEntry{offset: o + 1} + currHash := hashLen(x>>16, bTableBits, hashBytes) + candidate = e.table[currHash] + e.table[currHash] = tableEntry{offset: o + 2} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { + cv = x >> 24 + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go new file mode 100644 index 0000000..7aa2b72 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -0,0 +1,241 @@ +package flate + +import "fmt" + +// fastEncL3 +type fastEncL3 struct { + fastGen + table [1 << 16]tableEntryPrev +} + +// Encode uses a similar algorithm to level 2, will check up to two candidates. +func (e *fastEncL3) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + tableBits = 16 + tableSize = 1 << tableBits + hashBytes = 5 + ) + + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + } + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + e.table[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // Skip if too small. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 7 + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, tableBits, hashBytes) + s = nextS + nextS = s + 1 + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash] + now := load6432(src, nextS) + + // Safe offset distance until s + 4... + minOffset := e.cur + s - (maxMatchOffset - 4) + e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}} + + // Check both candidates + candidate = candidates.Cur + if candidate.offset < minOffset { + cv = now + // Previous will also be invalid, we have nothing. + continue + } + + if uint32(cv) == load3232(src, candidate.offset-e.cur) { + if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) { + break + } + // Both match and are valid, pick longest. + offset := s - (candidate.offset - e.cur) + o2 := s - (candidates.Prev.offset - e.cur) + l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) + if l2 > l1 { + candidate = candidates.Prev + } + break + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + break + } + } + cv = now + } + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+8) < len(src) && t > 0 { + cv = load6432(src, t) + nextHash := hashLen(cv, tableBits, hashBytes) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + t}, + } + } + goto emitRemainder + } + + // Store every 5th hash in-between. + for i := s - l + 2; i < s-5; i += 6 { + nextHash := hashLen(load6432(src, i), tableBits, hashBytes) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + i}} + } + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. + x := load6432(src, s-2) + prevHash := hashLen(x, tableBits, hashBytes) + + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 2}, + } + x >>= 8 + prevHash = hashLen(x, tableBits, hashBytes) + + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 1}, + } + x >>= 8 + currHash := hashLen(x, tableBits, hashBytes) + candidates := e.table[currHash] + cv = x + e.table[currHash] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur}, + } + + // Check both candidates + candidate = candidates.Cur + minOffset := e.cur + s - (maxMatchOffset - 4) + + if candidate.offset > minOffset { + if uint32(cv) == load3232(src, candidate.offset-e.cur) { + // Found a match... + continue + } + candidate = candidates.Prev + if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + // Match at prev... + continue + } + } + cv = x >> 8 + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go new file mode 100644 index 0000000..23c08b3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -0,0 +1,221 @@ +package flate + +import "fmt" + +type fastEncL4 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntry +} + +func (e *fastEncL4) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.bTable[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + e.bTable[nextHashL] = entry + + t = lCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { + // We got a long match. Use that. + break + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + lCandidate = e.bTable[hash7(next, tableBits)] + + // If the next long is a candidate, check if we should use that instead... + lOff := nextS - (lCandidate.offset - e.cur) + if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { + l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) + if l2 > l1 { + s = nextS + t = lCandidate.offset - e.cur + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic("s-t") + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index first pair after match end. + if int(s+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur} + e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // Store every 3rd hash in-between + if true { + i := nextS + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + e.bTable[hash7(cv, tableBits)] = t + e.bTable[hash7(cv>>8, tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + + i += 3 + for ; i < s-1; i += 3 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + e.bTable[hash7(cv, tableBits)] = t + e.bTable[hash7(cv>>8, tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + e.bTable[prevHashL] = tableEntry{offset: o} + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go new file mode 100644 index 0000000..1f61ec1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -0,0 +1,708 @@ +package flate + +import "fmt" + +type fastEncL5 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL5) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + lCandidate = e.bTable[nextHashL] + // Store the next match + + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // If the next long is a candidate, use that... + t2 := lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + if l == 0 { + // Extend the 4-byte match as long as possible. + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end of best match... + if sAt := s + l; l < 30 && sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset + t2 := eLong - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if t2 >= 0 && off < maxMatchOffset && off > 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + goto emitRemainder + } + + // Store every 3rd hash in-between. + if true { + const hashEvery = 3 + i := s - l + 1 + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // Do an long at i+1 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + eLong = &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // We only have enough bits for a short entry at i+2 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + + // Skip one - otherwise we risk hitting 's' + i += 4 + for ; i < s-1; i += hashEvery { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + eLong := &e.bTable[prevHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} + +// fastEncL5Window is a level 5 encoder, +// but with a custom window size. +type fastEncL5Window struct { + hist []byte + cur int32 + maxOffset int32 + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + maxMatchOffset := e.maxOffset + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + lCandidate = e.bTable[nextHashL] + // Store the next match + + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // If the next long is a candidate, use that... + t2 := lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + if l == 0 { + // Extend the 4-byte match as long as possible. + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end of best match... + if sAt := s + l; l < 30 && sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset + t2 := eLong - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if t2 >= 0 && off < maxMatchOffset && off > 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + goto emitRemainder + } + + // Store every 3rd hash in-between. + if true { + const hashEvery = 3 + i := s - l + 1 + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // Do an long at i+1 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + eLong = &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // We only have enough bits for a short entry at i+2 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + + // Skip one - otherwise we risk hitting 's' + i += 4 + for ; i < s-1; i += hashEvery { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + eLong := &e.bTable[prevHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} + +// Reset the encoding table. +func (e *fastEncL5Window) Reset() { + // We keep the same allocs, since we are compressing the same block sizes. + if cap(e.hist) < allocHistory { + e.hist = make([]byte, 0, allocHistory) + } + + // We offset current position so everything will be out of reach. + // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. + if e.cur <= int32(bufferReset) { + e.cur += e.maxOffset + int32(len(e.hist)) + } + e.hist = e.hist[:0] +} + +func (e *fastEncL5Window) addBlock(src []byte) int32 { + // check if we have space already + maxMatchOffset := e.maxOffset + + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.hist = make([]byte, 0, allocHistory) + } else { + if cap(e.hist) < int(maxMatchOffset*2) { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - maxMatchOffset + copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:maxMatchOffset] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// matchlen will return the match length between offsets and t in src. +// The maximum length returned is maxMatchLength - 4. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 { + if debugDecode { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > e.maxOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:s1], src[t:])) +} + +// matchlenLong will return the match length between offsets and t in src. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 { + if debugDeflate { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > e.maxOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go new file mode 100644 index 0000000..f1e9d98 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -0,0 +1,325 @@ +package flate + +import "fmt" + +type fastEncL6 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL6) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + // Repeat MUST be > 1 and within range + repeat := int32(1) + for { + const skipLog = 7 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + // Calculate hashes of 'next' + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Long candidate matches at least 4 bytes. + + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // Check the previous long candidate as well. + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + // Current value did not match, but check if previous long value does. + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + + // Look up next long candidate (at nextS) + lCandidate = e.bTable[nextHashL] + + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // Check repeat at s + repOff + const repOff = 1 + t2 := s - repeat + repOff + if load3232(src, t2) == uint32(cv>>(8*repOff)) { + ml := e.matchlen(s+4+repOff, t2+4, src) + 4 + if ml > l { + t = t2 + l = ml + s += repOff + // Not worth checking more. + break + } + } + + // If the next long is a candidate, use that... + t2 = lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + // This is ok, but check previous as well. + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + if l == 0 { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end-of-match... + if sAt := s + l; sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)] + // Test current + t2 := eLong.Cur.offset - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if off < maxMatchOffset { + if off > 0 && t2 >= 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + // Test next: + t2 = eLong.Prev.offset - e.cur - l + skipBeginning + off := s2 - t2 + if off > 0 && off < maxMatchOffset && t2 >= 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if false { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + repeat = s - t + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index after match end. + for i := nextS + 1; i < int32(len(src))-8; i += 2 { + cv := load6432(src, i) + e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur + } + goto emitRemainder + } + + // Store every long hash in-between and every second short. + if true { + for i := nextS + 1; i < s-1; i += 2 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong2 := &e.bTable[hash7(cv>>8, tableBits)] + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong.Cur, eLong.Prev = t, eLong.Cur + eLong2.Cur, eLong2.Prev = t2, eLong2.Cur + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + cv = load6432(src, s) + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go new file mode 100644 index 0000000..4bd3885 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package flate + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s new file mode 100644 index 0000000..9a7655c --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s @@ -0,0 +1,68 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + TESTQ BX, BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go new file mode 100644 index 0000000..ad5cd81 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go new file mode 100644 index 0000000..6ed2806 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go @@ -0,0 +1,37 @@ +package flate + +const ( + // Masks for shifts with register sizes of the shift value. + // This can be used to work around the x86 design of shifting by mod register size. + // It can be used when a variable shift is always smaller than the register size. + + // reg8SizeMaskX - shift value is 8 bits, shifted is X + reg8SizeMask8 = 7 + reg8SizeMask16 = 15 + reg8SizeMask32 = 31 + reg8SizeMask64 = 63 + + // reg16SizeMaskX - shift value is 16 bits, shifted is X + reg16SizeMask8 = reg8SizeMask8 + reg16SizeMask16 = reg8SizeMask16 + reg16SizeMask32 = reg8SizeMask32 + reg16SizeMask64 = reg8SizeMask64 + + // reg32SizeMaskX - shift value is 32 bits, shifted is X + reg32SizeMask8 = reg8SizeMask8 + reg32SizeMask16 = reg8SizeMask16 + reg32SizeMask32 = reg8SizeMask32 + reg32SizeMask64 = reg8SizeMask64 + + // reg64SizeMaskX - shift value is 64 bits, shifted is X + reg64SizeMask8 = reg8SizeMask8 + reg64SizeMask16 = reg8SizeMask16 + reg64SizeMask32 = reg8SizeMask32 + reg64SizeMask64 = reg8SizeMask64 + + // regSizeMaskUintX - shift value is uint, shifted is X + regSizeMaskUint8 = reg8SizeMask8 + regSizeMaskUint16 = reg8SizeMask16 + regSizeMaskUint32 = reg8SizeMask32 + regSizeMaskUint64 = reg8SizeMask64 +) diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go new file mode 100644 index 0000000..1b7a2cb --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go @@ -0,0 +1,40 @@ +//go:build !amd64 +// +build !amd64 + +package flate + +const ( + // Masks for shifts with register sizes of the shift value. + // This can be used to work around the x86 design of shifting by mod register size. + // It can be used when a variable shift is always smaller than the register size. + + // reg8SizeMaskX - shift value is 8 bits, shifted is X + reg8SizeMask8 = 0xff + reg8SizeMask16 = 0xff + reg8SizeMask32 = 0xff + reg8SizeMask64 = 0xff + + // reg16SizeMaskX - shift value is 16 bits, shifted is X + reg16SizeMask8 = 0xffff + reg16SizeMask16 = 0xffff + reg16SizeMask32 = 0xffff + reg16SizeMask64 = 0xffff + + // reg32SizeMaskX - shift value is 32 bits, shifted is X + reg32SizeMask8 = 0xffffffff + reg32SizeMask16 = 0xffffffff + reg32SizeMask32 = 0xffffffff + reg32SizeMask64 = 0xffffffff + + // reg64SizeMaskX - shift value is 64 bits, shifted is X + reg64SizeMask8 = 0xffffffffffffffff + reg64SizeMask16 = 0xffffffffffffffff + reg64SizeMask32 = 0xffffffffffffffff + reg64SizeMask64 = 0xffffffffffffffff + + // regSizeMaskUintX - shift value is uint, shifted is X + regSizeMaskUint8 = ^uint(0) + regSizeMaskUint16 = ^uint(0) + regSizeMaskUint32 = ^uint(0) + regSizeMaskUint64 = ^uint(0) +) diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go new file mode 100644 index 0000000..f3d4139 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -0,0 +1,318 @@ +package flate + +import ( + "io" + "math" + "sync" +) + +const ( + maxStatelessBlock = math.MaxInt16 + // dictionary will be taken from maxStatelessBlock, so limit it. + maxStatelessDict = 8 << 10 + + slTableBits = 13 + slTableSize = 1 << slTableBits + slTableShift = 32 - slTableBits +) + +type statelessWriter struct { + dst io.Writer + closed bool +} + +func (s *statelessWriter) Close() error { + if s.closed { + return nil + } + s.closed = true + // Emit EOF block + return StatelessDeflate(s.dst, nil, true, nil) +} + +func (s *statelessWriter) Write(p []byte) (n int, err error) { + err = StatelessDeflate(s.dst, p, false, nil) + if err != nil { + return 0, err + } + return len(p), nil +} + +func (s *statelessWriter) Reset(w io.Writer) { + s.dst = w + s.closed = false +} + +// NewStatelessWriter will do compression but without maintaining any state +// between Write calls. +// There will be no memory kept between Write calls, +// but compression and speed will be suboptimal. +// Because of this, the size of actual Write calls will affect output size. +func NewStatelessWriter(dst io.Writer) io.WriteCloser { + return &statelessWriter{dst: dst} +} + +// bitWriterPool contains bit writers that can be reused. +var bitWriterPool = sync.Pool{ + New: func() interface{} { + return newHuffmanBitWriter(nil) + }, +} + +// StatelessDeflate allows compressing directly to a Writer without retaining state. +// When returning everything will be flushed. +// Up to 8KB of an optional dictionary can be given which is presumed to precede the block. +// Longer dictionaries will be truncated and will still produce valid output. +// Sending nil dictionary is perfectly fine. +func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { + var dst tokens + bw := bitWriterPool.Get().(*huffmanBitWriter) + bw.reset(out) + defer func() { + // don't keep a reference to our output + bw.reset(nil) + bitWriterPool.Put(bw) + }() + if eof && len(in) == 0 { + // Just write an EOF block. + // Could be faster... + bw.writeStoredHeader(0, true) + bw.flush() + return bw.err + } + + // Truncate dict + if len(dict) > maxStatelessDict { + dict = dict[len(dict)-maxStatelessDict:] + } + + // For subsequent loops, keep shallow dict reference to avoid alloc+copy. + var inDict []byte + + for len(in) > 0 { + todo := in + if len(inDict) > 0 { + if len(todo) > maxStatelessBlock-maxStatelessDict { + todo = todo[:maxStatelessBlock-maxStatelessDict] + } + } else if len(todo) > maxStatelessBlock-len(dict) { + todo = todo[:maxStatelessBlock-len(dict)] + } + inOrg := in + in = in[len(todo):] + uncompressed := todo + if len(dict) > 0 { + // combine dict and source + bufLen := len(todo) + len(dict) + combined := make([]byte, bufLen) + copy(combined, dict) + copy(combined[len(dict):], todo) + todo = combined + } + // Compress + if len(inDict) == 0 { + statelessEnc(&dst, todo, int16(len(dict))) + } else { + statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) + } + isEof := eof && len(in) == 0 + + if dst.n == 0 { + bw.writeStoredHeader(len(uncompressed), isEof) + if bw.err != nil { + return bw.err + } + bw.writeBytes(uncompressed) + } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 { + // If we removed less than 1/16th, huffman compress the block. + bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) + } else { + bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) + } + if len(in) > 0 { + // Retain a dict if we have more + inDict = inOrg[len(uncompressed)-maxStatelessDict:] + dict = nil + dst.Reset() + } + if bw.err != nil { + return bw.err + } + } + if !eof { + // Align, only a stored block can do that. + bw.writeStoredHeader(0, false) + } + bw.flush() + return bw.err +} + +func hashSL(u uint32) uint32 { + return (u * 0x1e35a7bd) >> slTableShift +} + +func load3216(b []byte, i int16) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6416(b []byte, i int16) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func statelessEnc(dst *tokens, src []byte, startAt int16) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + type tableEntry struct { + offset int16 + } + + var table [slTableSize]tableEntry + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src)-int(startAt) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = 0 + return + } + // Index until startAt + if startAt > 0 { + cv := load3232(src, 0) + for i := int16(0); i < startAt; i++ { + table[hashSL(cv)] = tableEntry{offset: i} + cv = (cv >> 8) | (uint32(src[i+4]) << 24) + } + } + + s := startAt + 1 + nextEmit := startAt + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int16(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load3216(src, s) + + for { + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashSL(cv) + candidate = table[nextHash] + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit || nextS <= 0 { + goto emitRemainder + } + + now := load6416(src, nextS) + table[nextHash] = tableEntry{offset: s} + nextHash = hashSL(uint32(now)) + + if cv == load3216(src, candidate.offset) { + table[nextHash] = tableEntry{offset: nextS} + break + } + + // Do one right away... + cv = uint32(now) + s = nextS + nextS++ + candidate = table[nextHash] + now >>= 8 + table[nextHash] = tableEntry{offset: s} + + if cv == load3216(src, candidate.offset) { + table[nextHash] = tableEntry{offset: nextS} + break + } + cv = uint32(now) + s = nextS + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset + l := int16(matchLen(src[s+4:], src[t+4:]) + 4) + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + // Save the match found + dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6416(src, s-2) + o := s - 2 + prevHash := hashSL(uint32(x)) + table[prevHash] = tableEntry{offset: o} + x >>= 16 + currHash := hashSL(uint32(x)) + candidate = table[currHash] + table[currHash] = tableEntry{offset: o + 2} + + if uint32(x) != load3216(src, candidate.offset) { + cv = uint32(x >> 8) + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go new file mode 100644 index 0000000..d818790 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -0,0 +1,379 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits + // bits 16-22 offsetcode - 5 bits + // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits + // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits + lengthShift = 22 + offsetMask = 1<maxnumlit + offHist [32]uint16 // offset codes + litHist [256]uint16 // codes 0->255 + nFilled int + n uint16 // Must be able to contain maxStoreBlockSize + tokens [maxStoreBlockSize + 1]token +} + +func (t *tokens) Reset() { + if t.n == 0 { + return + } + t.n = 0 + t.nFilled = 0 + for i := range t.litHist[:] { + t.litHist[i] = 0 + } + for i := range t.extraHist[:] { + t.extraHist[i] = 0 + } + for i := range t.offHist[:] { + t.offHist[i] = 0 + } +} + +func (t *tokens) Fill() { + if t.n == 0 { + return + } + for i, v := range t.litHist[:] { + if v == 0 { + t.litHist[i] = 1 + t.nFilled++ + } + } + for i, v := range t.extraHist[:literalCount-256] { + if v == 0 { + t.nFilled++ + t.extraHist[i] = 1 + } + } + for i, v := range t.offHist[:offsetCodeCount] { + if v == 0 { + t.offHist[i] = 1 + } + } +} + +func indexTokens(in []token) tokens { + var t tokens + t.indexTokens(in) + return t +} + +func (t *tokens) indexTokens(in []token) { + t.Reset() + for _, tok := range in { + if tok < matchType { + t.AddLiteral(tok.literal()) + continue + } + t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask) + } +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst *tokens, lit []byte) { + for _, v := range lit { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } +} + +func (t *tokens) AddLiteral(lit byte) { + t.tokens[t.n] = token(lit) + t.litHist[lit]++ + t.n++ +} + +// from https://stackoverflow.com/a/28730362 +func mFastLog2(val float32) float32 { + ux := int32(math.Float32bits(val)) + log2 := (float32)(((ux >> 23) & 255) - 128) + ux &= -0x7f800001 + ux += 127 << 23 + uval := math.Float32frombits(uint32(ux)) + log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759 + return log2 +} + +// EstimatedBits will return an minimum size estimated by an *optimal* +// compression of the block. +// The size of the block +func (t *tokens) EstimatedBits() int { + shannon := float32(0) + bits := int(0) + nMatches := 0 + total := int(t.n) + t.nFilled + if total > 0 { + invTotal := 1.0 / float32(total) + for _, v := range t.litHist[:] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + } + } + // Just add 15 for EOB + shannon += 15 + for i, v := range t.extraHist[1 : literalCount-256] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + bits += int(lengthExtraBits[i&31]) * int(v) + nMatches += int(v) + } + } + } + if nMatches > 0 { + invTotal := 1.0 / float32(nMatches) + for i, v := range t.offHist[:offsetCodeCount] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + bits += int(offsetExtraBits[i&31]) * int(v) + } + } + } + return int(shannon) + bits +} + +// AddMatch adds a match to the tokens. +// This function is very sensitive to inlining and right on the border. +func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { + if debugDeflate { + if xlength >= maxMatchLength+baseMatchLength { + panic(fmt.Errorf("invalid length: %v", xlength)) + } + if xoffset >= maxMatchOffset+baseMatchOffset { + panic(fmt.Errorf("invalid offset: %v", xoffset)) + } + } + oCode := offsetCode(xoffset) + xoffset |= oCode << 16 + + t.extraHist[lengthCodes1[uint8(xlength)]]++ + t.offHist[oCode&31]++ + t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset { + panic(fmt.Errorf("invalid offset: %v", xoffset)) + } + } + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + // We need to have at least baseMatchLength left over for next loop. + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + t.extraHist[lengthCodes1[uint8(xl)]]++ + t.offHist[oc&31]++ + t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } + +// Convert length to code. +func lengthCode(len uint8) uint8 { return lengthCodes[len] } + +// Returns the offset code corresponding to a specific offset +func offsetCode(off uint32) uint32 { + if false { + if off < uint32(len(offsetCodes)) { + return offsetCodes[off&255] + } else if off>>7 < uint32(len(offsetCodes)) { + return offsetCodes[(off>>7)&255] + 14 + } else { + return offsetCodes[(off>>14)&255] + 28 + } + } + if off < uint32(len(offsetCodes)) { + return offsetCodes[uint8(off)] + } + return offsetCodes14[uint8(off>>7)] +} diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip.go b/vendor/github.com/klauspost/compress/gzip/gunzip.go new file mode 100644 index 0000000..dc2362a --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gunzip.go @@ -0,0 +1,375 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gzip implements reading and writing of gzip format compressed files, +// as specified in RFC 1952. +package gzip + +import ( + "bufio" + "compress/gzip" + "encoding/binary" + "hash/crc32" + "io" + "time" + + "github.com/klauspost/compress/flate" +) + +const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + flagText = 1 << 0 + flagHdrCrc = 1 << 1 + flagExtra = 1 << 2 + flagName = 1 << 3 + flagComment = 1 << 4 +) + +var ( + // ErrChecksum is returned when reading GZIP data that has an invalid checksum. + ErrChecksum = gzip.ErrChecksum + // ErrHeader is returned when reading GZIP data that has an invalid header. + ErrHeader = gzip.ErrHeader +) + +var le = binary.LittleEndian + +// noEOF converts io.EOF to io.ErrUnexpectedEOF. +func noEOF(err error) error { + if err == io.EOF { + return io.ErrUnexpectedEOF + } + return err +} + +// The gzip file stores a header giving metadata about the compressed file. +// That header is exposed as the fields of the Writer and Reader structs. +// +// Strings must be UTF-8 encoded and may only contain Unicode code points +// U+0001 through U+00FF, due to limitations of the GZIP file format. +type Header struct { + Comment string // comment + Extra []byte // "extra data" + ModTime time.Time // modification time + Name string // file name + OS byte // operating system type +} + +// A Reader is an io.Reader that can be read to retrieve +// uncompressed data from a gzip-format compressed file. +// +// In general, a gzip file can be a concatenation of gzip files, +// each with its own header. Reads from the Reader +// return the concatenation of the uncompressed data of each. +// Only the first header is recorded in the Reader fields. +// +// Gzip files store a length and checksum of the uncompressed data. +// The Reader will return a ErrChecksum when Read +// reaches the end of the uncompressed data if it does not +// have the expected length or checksum. Clients should treat data +// returned by Read as tentative until they receive the io.EOF +// marking the end of the data. +type Reader struct { + Header // valid after NewReader or Reader.Reset + r flate.Reader + br *bufio.Reader + decompressor io.ReadCloser + digest uint32 // CRC-32, IEEE polynomial (section 8) + size uint32 // Uncompressed size (section 2.3.1) + buf [512]byte + err error + multistream bool +} + +// NewReader creates a new Reader reading the given reader. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// +// It is the caller's responsibility to call Close on the Reader when done. +// +// The Reader.Header fields will be valid in the Reader returned. +func NewReader(r io.Reader) (*Reader, error) { + z := new(Reader) + if err := z.Reset(r); err != nil { + return nil, err + } + return z, nil +} + +// Reset discards the Reader z's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) error { + *z = Reader{ + decompressor: z.decompressor, + multistream: true, + br: z.br, + } + if rr, ok := r.(flate.Reader); ok { + z.r = rr + } else { + // Reuse if we can. + if z.br != nil { + z.br.Reset(r) + } else { + z.br = bufio.NewReader(r) + } + z.r = z.br + } + z.Header, z.err = z.readHeader() + return z.err +} + +// Multistream controls whether the reader supports multistream files. +// +// If enabled (the default), the Reader expects the input to be a sequence +// of individually gzipped data streams, each with its own header and +// trailer, ending at EOF. The effect is that the concatenation of a sequence +// of gzipped files is treated as equivalent to the gzip of the concatenation +// of the sequence. This is standard behavior for gzip readers. +// +// Calling Multistream(false) disables this behavior; disabling the behavior +// can be useful when reading file formats that distinguish individual gzip +// data streams or mix gzip data streams with other data streams. +// In this mode, when the Reader reaches the end of the data stream, +// Read returns io.EOF. If the underlying reader implements io.ByteReader, +// it will be left positioned just after the gzip stream. +// To start the next stream, call z.Reset(r) followed by z.Multistream(false). +// If there is no next stream, z.Reset(r) will return io.EOF. +func (z *Reader) Multistream(ok bool) { + z.multistream = ok +} + +// readString reads a NUL-terminated string from z.r. +// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and +// will output a string encoded using UTF-8. +// This method always updates z.digest with the data read. +func (z *Reader) readString() (string, error) { + var err error + needConv := false + for i := 0; ; i++ { + if i >= len(z.buf) { + return "", ErrHeader + } + z.buf[i], err = z.r.ReadByte() + if err != nil { + return "", err + } + if z.buf[i] > 0x7f { + needConv = true + } + if z.buf[i] == 0 { + // Digest covers the NUL terminator. + z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1]) + + // Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1). + if needConv { + s := make([]rune, 0, i) + for _, v := range z.buf[:i] { + s = append(s, rune(v)) + } + return string(s), nil + } + return string(z.buf[:i]), nil + } + } +} + +// readHeader reads the GZIP header according to section 2.3.1. +// This method does not set z.err. +func (z *Reader) readHeader() (hdr Header, err error) { + if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil { + // RFC 1952, section 2.2, says the following: + // A gzip file consists of a series of "members" (compressed data sets). + // + // Other than this, the specification does not clarify whether a + // "series" is defined as "one or more" or "zero or more". To err on the + // side of caution, Go interprets this to mean "zero or more". + // Thus, it is okay to return io.EOF here. + return hdr, err + } + if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { + return hdr, ErrHeader + } + flg := z.buf[3] + hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0) + // z.buf[8] is XFL and is currently ignored. + hdr.OS = z.buf[9] + z.digest = crc32.ChecksumIEEE(z.buf[:10]) + + if flg&flagExtra != 0 { + if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { + return hdr, noEOF(err) + } + z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2]) + data := make([]byte, le.Uint16(z.buf[:2])) + if _, err = io.ReadFull(z.r, data); err != nil { + return hdr, noEOF(err) + } + z.digest = crc32.Update(z.digest, crc32.IEEETable, data) + hdr.Extra = data + } + + var s string + if flg&flagName != 0 { + if s, err = z.readString(); err != nil { + return hdr, err + } + hdr.Name = s + } + + if flg&flagComment != 0 { + if s, err = z.readString(); err != nil { + return hdr, err + } + hdr.Comment = s + } + + if flg&flagHdrCrc != 0 { + if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { + return hdr, noEOF(err) + } + digest := le.Uint16(z.buf[:2]) + if digest != uint16(z.digest) { + return hdr, ErrHeader + } + } + + z.digest = 0 + if z.decompressor == nil { + z.decompressor = flate.NewReader(z.r) + } else { + z.decompressor.(flate.Resetter).Reset(z.r, nil) + } + return hdr, nil +} + +// Read implements io.Reader, reading uncompressed bytes from its underlying Reader. +func (z *Reader) Read(p []byte) (n int, err error) { + if z.err != nil { + return 0, z.err + } + + for n == 0 { + n, z.err = z.decompressor.Read(p) + z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n]) + z.size += uint32(n) + if z.err != io.EOF { + // In the normal case we return here. + return n, z.err + } + + // Finished file; check checksum and size. + if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil { + z.err = noEOF(err) + return n, z.err + } + digest := le.Uint32(z.buf[:4]) + size := le.Uint32(z.buf[4:8]) + if digest != z.digest || size != z.size { + z.err = ErrChecksum + return n, z.err + } + z.digest, z.size = 0, 0 + + // File is ok; check if there is another. + if !z.multistream { + return n, io.EOF + } + z.err = nil // Remove io.EOF + + if _, z.err = z.readHeader(); z.err != nil { + return n, z.err + } + } + + return n, nil +} + +type crcer interface { + io.Writer + Sum32() uint32 + Reset() +} +type crcUpdater struct { + z *Reader +} + +func (c *crcUpdater) Write(p []byte) (int, error) { + c.z.digest = crc32.Update(c.z.digest, crc32.IEEETable, p) + return len(p), nil +} + +func (c *crcUpdater) Sum32() uint32 { + return c.z.digest +} + +func (c *crcUpdater) Reset() { + c.z.digest = 0 +} + +// WriteTo support the io.WriteTo interface for io.Copy and friends. +func (z *Reader) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + crcWriter := crcer(crc32.NewIEEE()) + if z.digest != 0 { + crcWriter = &crcUpdater{z: z} + } + for { + if z.err != nil { + if z.err == io.EOF { + return total, nil + } + return total, z.err + } + + // We write both to output and digest. + mw := io.MultiWriter(w, crcWriter) + n, err := z.decompressor.(io.WriterTo).WriteTo(mw) + total += n + z.size += uint32(n) + if err != nil { + z.err = err + return total, z.err + } + + // Finished file; check checksum + size. + if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + z.err = err + return total, err + } + z.digest = crcWriter.Sum32() + digest := le.Uint32(z.buf[:4]) + size := le.Uint32(z.buf[4:8]) + if digest != z.digest || size != z.size { + z.err = ErrChecksum + return total, z.err + } + z.digest, z.size = 0, 0 + + // File is ok; check if there is another. + if !z.multistream { + return total, nil + } + crcWriter.Reset() + z.err = nil // Remove io.EOF + + if _, z.err = z.readHeader(); z.err != nil { + if z.err == io.EOF { + return total, nil + } + return total, z.err + } + } +} + +// Close closes the Reader. It does not close the underlying io.Reader. +// In order for the GZIP checksum to be verified, the reader must be +// fully consumed until the io.EOF. +func (z *Reader) Close() error { return z.decompressor.Close() } diff --git a/vendor/github.com/klauspost/compress/gzip/gzip.go b/vendor/github.com/klauspost/compress/gzip/gzip.go new file mode 100644 index 0000000..5bc7205 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gzip.go @@ -0,0 +1,290 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gzip + +import ( + "errors" + "fmt" + "hash/crc32" + "io" + + "github.com/klauspost/compress/flate" +) + +// These constants are copied from the flate package, so that code that imports +// "compress/gzip" does not also have to import "compress/flate". +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression + ConstantCompression = flate.ConstantCompression + HuffmanOnly = flate.HuffmanOnly + + // StatelessCompression will do compression but without maintaining any state + // between Write calls. + // There will be no memory kept between Write calls, + // but compression and speed will be suboptimal. + // Because of this, the size of actual Write calls will affect output size. + StatelessCompression = -3 +) + +// A Writer is an io.WriteCloser. +// Writes to a Writer are compressed and written to w. +type Writer struct { + Header // written at first call to Write, Flush, or Close + w io.Writer + level int + err error + compressor *flate.Writer + digest uint32 // CRC-32, IEEE polynomial (section 8) + size uint32 // Uncompressed size (section 2.3.1) + wroteHeader bool + closed bool + buf [10]byte +} + +// NewWriter returns a new Writer. +// Writes to the returned writer are compressed and written to w. +// +// It is the caller's responsibility to call Close on the WriteCloser when done. +// Writes may be buffered and not flushed until Close. +// +// Callers that wish to set the fields in Writer.Header must do so before +// the first call to Write, Flush, or Close. +func NewWriter(w io.Writer) *Writer { + z, _ := NewWriterLevel(w, DefaultCompression) + return z +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// +// The compression level can be DefaultCompression, NoCompression, or any +// integer value between BestSpeed and BestCompression inclusive. The error +// returned will be nil if the level is valid. +func NewWriterLevel(w io.Writer, level int) (*Writer, error) { + if level < StatelessCompression || level > BestCompression { + return nil, fmt.Errorf("gzip: invalid compression level: %d", level) + } + z := new(Writer) + z.init(w, level) + return z, nil +} + +// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow. +const MinCustomWindowSize = flate.MinCustomWindowSize + +// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow. +const MaxCustomWindowSize = flate.MaxCustomWindowSize + +// NewWriterWindow returns a new Writer compressing data with a custom window size. +// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize. +func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) { + if windowSize < MinCustomWindowSize { + return nil, errors.New("gzip: requested window size less than MinWindowSize") + } + if windowSize > MaxCustomWindowSize { + return nil, errors.New("gzip: requested window size bigger than MaxCustomWindowSize") + } + + z := new(Writer) + z.init(w, -windowSize) + return z, nil +} + +func (z *Writer) init(w io.Writer, level int) { + compressor := z.compressor + if level != StatelessCompression { + if compressor != nil { + compressor.Reset(w) + } + } + + *z = Writer{ + Header: Header{ + OS: 255, // unknown + }, + w: w, + level: level, + compressor: compressor, + } +} + +// Reset discards the Writer z's state and makes it equivalent to the +// result of its original state from NewWriter or NewWriterLevel, but +// writing to w instead. This permits reusing a Writer rather than +// allocating a new one. +func (z *Writer) Reset(w io.Writer) { + z.init(w, z.level) +} + +// writeBytes writes a length-prefixed byte slice to z.w. +func (z *Writer) writeBytes(b []byte) error { + if len(b) > 0xffff { + return errors.New("gzip.Write: Extra data is too large") + } + le.PutUint16(z.buf[:2], uint16(len(b))) + _, err := z.w.Write(z.buf[:2]) + if err != nil { + return err + } + _, err = z.w.Write(b) + return err +} + +// writeString writes a UTF-8 string s in GZIP's format to z.w. +// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). +func (z *Writer) writeString(s string) (err error) { + // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. + needconv := false + for _, v := range s { + if v == 0 || v > 0xff { + return errors.New("gzip.Write: non-Latin-1 header string") + } + if v > 0x7f { + needconv = true + } + } + if needconv { + b := make([]byte, 0, len(s)) + for _, v := range s { + b = append(b, byte(v)) + } + _, err = z.w.Write(b) + } else { + _, err = io.WriteString(z.w, s) + } + if err != nil { + return err + } + // GZIP strings are NUL-terminated. + z.buf[0] = 0 + _, err = z.w.Write(z.buf[:1]) + return err +} + +// Write writes a compressed form of p to the underlying io.Writer. The +// compressed bytes are not necessarily flushed until the Writer is closed. +func (z *Writer) Write(p []byte) (int, error) { + if z.err != nil { + return 0, z.err + } + var n int + // Write the GZIP header lazily. + if !z.wroteHeader { + z.wroteHeader = true + z.buf[0] = gzipID1 + z.buf[1] = gzipID2 + z.buf[2] = gzipDeflate + z.buf[3] = 0 + if z.Extra != nil { + z.buf[3] |= 0x04 + } + if z.Name != "" { + z.buf[3] |= 0x08 + } + if z.Comment != "" { + z.buf[3] |= 0x10 + } + le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix())) + if z.level == BestCompression { + z.buf[8] = 2 + } else if z.level == BestSpeed { + z.buf[8] = 4 + } else { + z.buf[8] = 0 + } + z.buf[9] = z.OS + n, z.err = z.w.Write(z.buf[:10]) + if z.err != nil { + return n, z.err + } + if z.Extra != nil { + z.err = z.writeBytes(z.Extra) + if z.err != nil { + return n, z.err + } + } + if z.Name != "" { + z.err = z.writeString(z.Name) + if z.err != nil { + return n, z.err + } + } + if z.Comment != "" { + z.err = z.writeString(z.Comment) + if z.err != nil { + return n, z.err + } + } + + if z.compressor == nil && z.level != StatelessCompression { + z.compressor, _ = flate.NewWriter(z.w, z.level) + } + } + z.size += uint32(len(p)) + z.digest = crc32.Update(z.digest, crc32.IEEETable, p) + if z.level == StatelessCompression { + return len(p), flate.StatelessDeflate(z.w, p, false, nil) + } + n, z.err = z.compressor.Write(p) + return n, z.err +} + +// Flush flushes any pending compressed data to the underlying writer. +// +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. Flush does +// not return until the data has been written. If the underlying +// writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (z *Writer) Flush() error { + if z.err != nil { + return z.err + } + if z.closed || z.level == StatelessCompression { + return nil + } + if !z.wroteHeader { + z.Write(nil) + if z.err != nil { + return z.err + } + } + z.err = z.compressor.Flush() + return z.err +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if z.err != nil { + return z.err + } + if z.closed { + return nil + } + z.closed = true + if !z.wroteHeader { + z.Write(nil) + if z.err != nil { + return z.err + } + } + if z.level == StatelessCompression { + z.err = flate.StatelessDeflate(z.w, nil, true, nil) + } else { + z.err = z.compressor.Close() + } + if z.err != nil { + return z.err + } + le.PutUint32(z.buf[:4], z.digest) + le.PutUint32(z.buf[4:8], z.size) + _, z.err = z.w.Write(z.buf[:8]) + return z.err +} diff --git a/vendor/github.com/klauspost/compress/zlib/reader.go b/vendor/github.com/klauspost/compress/zlib/reader.go new file mode 100644 index 0000000..f127d47 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zlib/reader.go @@ -0,0 +1,183 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package zlib implements reading and writing of zlib format compressed data, +as specified in RFC 1950. + +The implementation provides filters that uncompress during reading +and compress during writing. For example, to write compressed data +to a buffer: + + var b bytes.Buffer + w := zlib.NewWriter(&b) + w.Write([]byte("hello, world\n")) + w.Close() + +and to read that data back: + + r, err := zlib.NewReader(&b) + io.Copy(os.Stdout, r) + r.Close() +*/ +package zlib + +import ( + "bufio" + "compress/zlib" + "hash" + "hash/adler32" + "io" + + "github.com/klauspost/compress/flate" +) + +const zlibDeflate = 8 + +var ( + // ErrChecksum is returned when reading ZLIB data that has an invalid checksum. + ErrChecksum = zlib.ErrChecksum + // ErrDictionary is returned when reading ZLIB data that has an invalid dictionary. + ErrDictionary = zlib.ErrDictionary + // ErrHeader is returned when reading ZLIB data that has an invalid header. + ErrHeader = zlib.ErrHeader +) + +type reader struct { + r flate.Reader + decompressor io.ReadCloser + digest hash.Hash32 + err error + scratch [4]byte +} + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// NewReader creates a new ReadCloser. +// Reads from the returned ReadCloser read and decompress data from r. +// If r does not implement io.ByteReader, the decompressor may read more +// data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser when done. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) (io.ReadCloser, error) { + return NewReaderDict(r, nil) +} + +// NewReaderDict is like NewReader but uses a preset dictionary. +// NewReaderDict ignores the dictionary if the compressed data does not refer to it. +// If the compressed data refers to a different dictionary, NewReaderDict returns ErrDictionary. +// +// The ReadCloser returned by NewReaderDict also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) { + z := new(reader) + err := z.Reset(r, dict) + if err != nil { + return nil, err + } + return z, nil +} + +func (z *reader) Read(p []byte) (int, error) { + if z.err != nil { + return 0, z.err + } + + var n int + n, z.err = z.decompressor.Read(p) + z.digest.Write(p[0:n]) + if z.err != io.EOF { + // In the normal case we return here. + return n, z.err + } + + // Finished file; check checksum. + if _, err := io.ReadFull(z.r, z.scratch[0:4]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + z.err = err + return n, z.err + } + // ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952). + checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) + if checksum != z.digest.Sum32() { + z.err = ErrChecksum + return n, z.err + } + return n, io.EOF +} + +// Calling Close does not close the wrapped io.Reader originally passed to NewReader. +// In order for the ZLIB checksum to be verified, the reader must be +// fully consumed until the io.EOF. +func (z *reader) Close() error { + if z.err != nil && z.err != io.EOF { + return z.err + } + z.err = z.decompressor.Close() + return z.err +} + +func (z *reader) Reset(r io.Reader, dict []byte) error { + *z = reader{decompressor: z.decompressor, digest: z.digest} + if fr, ok := r.(flate.Reader); ok { + z.r = fr + } else { + z.r = bufio.NewReader(r) + } + + // Read the header (RFC 1950 section 2.2.). + _, z.err = io.ReadFull(z.r, z.scratch[0:2]) + if z.err != nil { + if z.err == io.EOF { + z.err = io.ErrUnexpectedEOF + } + return z.err + } + h := uint(z.scratch[0])<<8 | uint(z.scratch[1]) + if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) { + z.err = ErrHeader + return z.err + } + haveDict := z.scratch[1]&0x20 != 0 + if haveDict { + _, z.err = io.ReadFull(z.r, z.scratch[0:4]) + if z.err != nil { + if z.err == io.EOF { + z.err = io.ErrUnexpectedEOF + } + return z.err + } + checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) + if checksum != adler32.Checksum(dict) { + z.err = ErrDictionary + return z.err + } + } + + if z.decompressor == nil { + if haveDict { + z.decompressor = flate.NewReaderDict(z.r, dict) + } else { + z.decompressor = flate.NewReader(z.r) + } + } else { + z.decompressor.(flate.Resetter).Reset(z.r, dict) + } + + if z.digest != nil { + z.digest.Reset() + } else { + z.digest = adler32.New() + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zlib/writer.go b/vendor/github.com/klauspost/compress/zlib/writer.go new file mode 100644 index 0000000..605816b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zlib/writer.go @@ -0,0 +1,201 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zlib + +import ( + "fmt" + "hash" + "hash/adler32" + "io" + + "github.com/klauspost/compress/flate" +) + +// These constants are copied from the flate package, so that code that imports +// "compress/zlib" does not also have to import "compress/flate". +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression + ConstantCompression = flate.ConstantCompression + HuffmanOnly = flate.HuffmanOnly +) + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + w io.Writer + level int + dict []byte + compressor *flate.Writer + digest hash.Hash32 + err error + scratch [4]byte + wroteHeader bool +} + +// NewWriter creates a new Writer. +// Writes to the returned Writer are compressed and written to w. +// +// It is the caller's responsibility to call Close on the WriteCloser when done. +// Writes may be buffered and not flushed until Close. +func NewWriter(w io.Writer) *Writer { + z, _ := NewWriterLevelDict(w, DefaultCompression, nil) + return z +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// +// The compression level can be DefaultCompression, NoCompression, HuffmanOnly +// or any integer value between BestSpeed and BestCompression inclusive. +// The error returned will be nil if the level is valid. +func NewWriterLevel(w io.Writer, level int) (*Writer, error) { + return NewWriterLevelDict(w, level, nil) +} + +// NewWriterLevelDict is like NewWriterLevel but specifies a dictionary to +// compress with. +// +// The dictionary may be nil. If not, its contents should not be modified until +// the Writer is closed. +func NewWriterLevelDict(w io.Writer, level int, dict []byte) (*Writer, error) { + if level < HuffmanOnly || level > BestCompression { + return nil, fmt.Errorf("zlib: invalid compression level: %d", level) + } + return &Writer{ + w: w, + level: level, + dict: dict, + }, nil +} + +// Reset clears the state of the Writer z such that it is equivalent to its +// initial state from NewWriterLevel or NewWriterLevelDict, but instead writing +// to w. +func (z *Writer) Reset(w io.Writer) { + z.w = w + // z.level and z.dict left unchanged. + if z.compressor != nil { + z.compressor.Reset(w) + } + if z.digest != nil { + z.digest.Reset() + } + z.err = nil + z.scratch = [4]byte{} + z.wroteHeader = false +} + +// writeHeader writes the ZLIB header. +func (z *Writer) writeHeader() (err error) { + z.wroteHeader = true + // ZLIB has a two-byte header (as documented in RFC 1950). + // The first four bits is the CINFO (compression info), which is 7 for the default deflate window size. + // The next four bits is the CM (compression method), which is 8 for deflate. + z.scratch[0] = 0x78 + // The next two bits is the FLEVEL (compression level). The four values are: + // 0=fastest, 1=fast, 2=default, 3=best. + // The next bit, FDICT, is set if a dictionary is given. + // The final five FCHECK bits form a mod-31 checksum. + switch z.level { + case -2, 0, 1: + z.scratch[1] = 0 << 6 + case 2, 3, 4, 5: + z.scratch[1] = 1 << 6 + case 6, -1: + z.scratch[1] = 2 << 6 + case 7, 8, 9: + z.scratch[1] = 3 << 6 + default: + panic("unreachable") + } + if z.dict != nil { + z.scratch[1] |= 1 << 5 + } + z.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31) + if _, err = z.w.Write(z.scratch[0:2]); err != nil { + return err + } + if z.dict != nil { + // The next four bytes are the Adler-32 checksum of the dictionary. + checksum := adler32.Checksum(z.dict) + z.scratch[0] = uint8(checksum >> 24) + z.scratch[1] = uint8(checksum >> 16) + z.scratch[2] = uint8(checksum >> 8) + z.scratch[3] = uint8(checksum >> 0) + if _, err = z.w.Write(z.scratch[0:4]); err != nil { + return err + } + } + if z.compressor == nil { + // Initialize deflater unless the Writer is being reused + // after a Reset call. + z.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict) + if err != nil { + return err + } + z.digest = adler32.New() + } + return nil +} + +// Write writes a compressed form of p to the underlying io.Writer. The +// compressed bytes are not necessarily flushed until the Writer is closed or +// explicitly flushed. +func (z *Writer) Write(p []byte) (n int, err error) { + if !z.wroteHeader { + z.err = z.writeHeader() + } + if z.err != nil { + return 0, z.err + } + if len(p) == 0 { + return 0, nil + } + n, err = z.compressor.Write(p) + if err != nil { + z.err = err + return + } + z.digest.Write(p) + return +} + +// Flush flushes the Writer to its underlying io.Writer. +func (z *Writer) Flush() error { + if !z.wroteHeader { + z.err = z.writeHeader() + } + if z.err != nil { + return z.err + } + z.err = z.compressor.Flush() + return z.err +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if !z.wroteHeader { + z.err = z.writeHeader() + } + if z.err != nil { + return z.err + } + z.err = z.compressor.Close() + if z.err != nil { + return z.err + } + checksum := z.digest.Sum32() + // ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952). + z.scratch[0] = uint8(checksum >> 24) + z.scratch[1] = uint8(checksum >> 16) + z.scratch[2] = uint8(checksum >> 8) + z.scratch[3] = uint8(checksum >> 0) + _, z.err = z.w.Write(z.scratch[0:4]) + return z.err +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go index a014ac9..063e778 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && gc && !purego -// +build amd64,gc,!purego package argon2 diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s index b2cc051..f3b653a 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && gc && !purego -// +build amd64,gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go index 167c59d..16d58c6 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_ref.go +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !amd64 || purego || !gc -// +build !amd64 purego !gc package argon2 diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go index 56bfaaa..4f506f8 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.7 && amd64 && gc && !purego -// +build go1.7,amd64,gc,!purego package blake2b diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s index 4b9daa1..353bb7c 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.7 && amd64 && gc && !purego -// +build go1.7,amd64,gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go index 5fa1b32..1d0770a 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.7 && amd64 && gc && !purego -// +build !go1.7,amd64,gc,!purego package blake2b diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s index ae75eb9..adfac00 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && gc && !purego -// +build amd64,gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go index b0137cd..6e28668 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !amd64 || purego || !gc -// +build !amd64 purego !gc package blake2b diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go index 9d86339..d9fcac3 100644 --- a/vendor/golang.org/x/crypto/blake2b/register.go +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.9 -// +build go1.9 package blake2b diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go index 5dfacbb..661ea13 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s index f1f6623..7dd2638 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go index 02ff3d0..db42e66 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (!arm64 && !s390x && !ppc64le) || !gc || purego -// +build !arm64,!s390x,!ppc64le !gc purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go index da420b2..3a4287f 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s index 5c0fed2..66aebae 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s @@ -20,7 +20,6 @@ // due to the calling conventions and initialization of constants. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go index 4652247..683ccfd 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s index f3ef5a0..1eda91a 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "go_asm.h" #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go index 0c408c5..50695a1 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package chacha20poly1305 diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s index 867c181..731d2ac 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s @@ -5,7 +5,6 @@ // This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" // General register allocation @@ -184,11 +183,31 @@ GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 #define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 #define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 #define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 + // Some macros + +// ROL rotates the uint32s in register R left by N bits, using temporary T. +#define ROL(N, R, T) \ + MOVO R, T; PSLLL $(N), T; PSRLL $(32-(N)), R; PXOR T, R + +// ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. +#ifdef GOAMD64_v2 +#define ROL16(R, T) PSHUFB ·rol16<>(SB), R +#else +#define ROL16(R, T) ROL(16, R, T) +#endif + +// ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. +#ifdef GOAMD64_v2 +#define ROL8(R, T) PSHUFB ·rol8<>(SB), R +#else +#define ROL8(R, T) ROL(8, R, T) +#endif + #define chachaQR(A, B, C, D, T) \ - PADDD B, A; PXOR A, D; PSHUFB ·rol16<>(SB), D \ + PADDD B, A; PXOR A, D; ROL16(D, T) \ PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ - PADDD B, A; PXOR A, D; PSHUFB ·rol8<>(SB), D \ + PADDD B, A; PXOR A, D; ROL8(D, T) \ PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B #define chachaQR_AVX2(A, B, C, D, T) \ diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go index f832b33..34e6ab1 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !amd64 || !gc || purego -// +build !amd64 !gc purego package chacha20poly1305 diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go index edcf163..70c5416 100644 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go @@ -1,7 +1,6 @@ // Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. //go:build amd64 && gc && !purego -// +build amd64,gc,!purego package field diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s index 293f013..60817ac 100644 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s @@ -1,7 +1,6 @@ // Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. //go:build amd64 && gc && !purego -// +build amd64,gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go index ddb6c9b..9da280d 100644 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !amd64 || !gc || purego -// +build !amd64 !gc purego package field diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go index af459ef..075fe9b 100644 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && gc && !purego -// +build arm64,gc,!purego package field diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s index 5c91e45..3126a43 100644 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && gc && !purego -// +build arm64,gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go index 234a5b2..fc029ac 100644 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !arm64 || !gc || purego -// +build !arm64 !gc purego package field diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go index dda3f14..f4ded5f 100644 --- a/vendor/golang.org/x/crypto/hkdf/hkdf.go +++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -56,7 +56,9 @@ func (f *hkdf) Read(p []byte) (int, error) { // Fill the rest of the buffer for len(p) > 0 { - f.expander.Reset() + if f.counter > 1 { + f.expander.Reset() + } f.expander.Write(f.prev) f.expander.Write(f.info) f.expander.Write([]byte{f.counter}) diff --git a/vendor/golang.org/x/crypto/internal/alias/alias.go b/vendor/golang.org/x/crypto/internal/alias/alias.go index 69c17f8..551ff0c 100644 --- a/vendor/golang.org/x/crypto/internal/alias/alias.go +++ b/vendor/golang.org/x/crypto/internal/alias/alias.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !purego -// +build !purego // Package alias implements memory aliasing tests. package alias diff --git a/vendor/golang.org/x/crypto/internal/alias/alias_purego.go b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go index 4775b0a..6fe61b5 100644 --- a/vendor/golang.org/x/crypto/internal/alias/alias_purego.go +++ b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build purego -// +build purego // Package alias implements memory aliasing tests. package alias diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go index 45b5c96..d33c889 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.13 -// +build !go1.13 package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go index ed52b34..495c1fa 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.13 -// +build go1.13 package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go index f184b67..333da28 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (!amd64 && !ppc64le && !s390x) || !gc || purego -// +build !amd64,!ppc64le,!s390x !gc purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go index 6d52233..164cd47 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s index 1d74f0f..e0d3c64 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go index 4a06994..4aec487 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s index 58422aa..d2ca5de 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go index ec95966..e1d033a 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s index aa9e049..0fe3a7c 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go index c400dfc..e76b44f 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && !purego && gc -// +build amd64,!purego,gc package salsa diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s index c089277..fcce023 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && !purego && gc -// +build amd64,!purego,gc // This code was translated into a form compatible with 6a from the public // domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go index 4392cc1..9448760 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !amd64 || purego || !gc -// +build !amd64 purego !gc package salsa diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go new file mode 100644 index 0000000..decd8cf --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/doc.go @@ -0,0 +1,62 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha3 implements the SHA-3 fixed-output-length hash functions and +// the SHAKE variable-output-length hash functions defined by FIPS-202. +// +// Both types of hash function use the "sponge" construction and the Keccak +// permutation. For a detailed specification see http://keccak.noekeon.org/ +// +// # Guidance +// +// If you aren't sure what function you need, use SHAKE256 with at least 64 +// bytes of output. The SHAKE instances are faster than the SHA3 instances; +// the latter have to allocate memory to conform to the hash.Hash interface. +// +// If you need a secret-key MAC (message authentication code), prepend the +// secret key to the input, hash with SHAKE256 and read at least 32 bytes of +// output. +// +// # Security strengths +// +// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security +// strength against preimage attacks of x bits. Since they only produce "x" +// bits of output, their collision-resistance is only "x/2" bits. +// +// The SHAKE-256 and -128 functions have a generic security strength of 256 and +// 128 bits against all attacks, provided that at least 2x bits of their output +// is used. Requesting more than 64 or 32 bytes of output, respectively, does +// not increase the collision-resistance of the SHAKE functions. +// +// # The sponge construction +// +// A sponge builds a pseudo-random function from a public pseudo-random +// permutation, by applying the permutation to a state of "rate + capacity" +// bytes, but hiding "capacity" of the bytes. +// +// A sponge starts out with a zero state. To hash an input using a sponge, up +// to "rate" bytes of the input are XORed into the sponge's state. The sponge +// is then "full" and the permutation is applied to "empty" it. This process is +// repeated until all the input has been "absorbed". The input is then padded. +// The digest is "squeezed" from the sponge in the same way, except that output +// is copied out instead of input being XORed in. +// +// A sponge is parameterized by its generic security strength, which is equal +// to half its capacity; capacity + rate is equal to the permutation's width. +// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means +// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. +// +// # Recommendations +// +// The SHAKE functions are recommended for most new uses. They can produce +// output of arbitrary length. SHAKE256, with an output length of at least +// 64 bytes, provides 256-bit security against all attacks. The Keccak team +// recommends it for most applications upgrading from SHA2-512. (NIST chose a +// much stronger, but much slower, sponge instance for SHA3-512.) +// +// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions. +// They produce output of the same length, with the same security strengths +// against all attacks. This means, in particular, that SHA3-256 only has +// 128-bit collision resistance, because its output length is 32 bytes. +package sha3 // import "golang.org/x/crypto/sha3" diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go new file mode 100644 index 0000000..0d8043f --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -0,0 +1,97 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file provides functions for creating instances of the SHA-3 +// and SHAKE hash functions, as well as utility functions for hashing +// bytes. + +import ( + "hash" +) + +// New224 creates a new SHA3-224 hash. +// Its generic security strength is 224 bits against preimage attacks, +// and 112 bits against collision attacks. +func New224() hash.Hash { + if h := new224Asm(); h != nil { + return h + } + return &state{rate: 144, outputLen: 28, dsbyte: 0x06} +} + +// New256 creates a new SHA3-256 hash. +// Its generic security strength is 256 bits against preimage attacks, +// and 128 bits against collision attacks. +func New256() hash.Hash { + if h := new256Asm(); h != nil { + return h + } + return &state{rate: 136, outputLen: 32, dsbyte: 0x06} +} + +// New384 creates a new SHA3-384 hash. +// Its generic security strength is 384 bits against preimage attacks, +// and 192 bits against collision attacks. +func New384() hash.Hash { + if h := new384Asm(); h != nil { + return h + } + return &state{rate: 104, outputLen: 48, dsbyte: 0x06} +} + +// New512 creates a new SHA3-512 hash. +// Its generic security strength is 512 bits against preimage attacks, +// and 256 bits against collision attacks. +func New512() hash.Hash { + if h := new512Asm(); h != nil { + return h + } + return &state{rate: 72, outputLen: 64, dsbyte: 0x06} +} + +// NewLegacyKeccak256 creates a new Keccak-256 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New256 instead. +func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} } + +// NewLegacyKeccak512 creates a new Keccak-512 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New512 instead. +func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} } + +// Sum224 returns the SHA3-224 digest of the data. +func Sum224(data []byte) (digest [28]byte) { + h := New224() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum256 returns the SHA3-256 digest of the data. +func Sum256(data []byte) (digest [32]byte) { + h := New256() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum384 returns the SHA3-384 digest of the data. +func Sum384(data []byte) (digest [48]byte) { + h := New384() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum512 returns the SHA3-512 digest of the data. +func Sum512(data []byte) (digest [64]byte) { + h := New512() + h.Write(data) + h.Sum(digest[:0]) + return +} diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go new file mode 100644 index 0000000..fe8c847 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes_generic.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +import ( + "hash" +) + +// new224Asm returns an assembly implementation of SHA3-224 if available, +// otherwise it returns nil. +func new224Asm() hash.Hash { return nil } + +// new256Asm returns an assembly implementation of SHA3-256 if available, +// otherwise it returns nil. +func new256Asm() hash.Hash { return nil } + +// new384Asm returns an assembly implementation of SHA3-384 if available, +// otherwise it returns nil. +func new384Asm() hash.Hash { return nil } + +// new512Asm returns an assembly implementation of SHA3-512 if available, +// otherwise it returns nil. +func new512Asm() hash.Hash { return nil } diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go new file mode 100644 index 0000000..ce48b1d --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf.go @@ -0,0 +1,414 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package sha3 + +import "math/bits" + +// rc stores the round constants for use in the ι step. +var rc = [24]uint64{ + 0x0000000000000001, + 0x0000000000008082, + 0x800000000000808A, + 0x8000000080008000, + 0x000000000000808B, + 0x0000000080000001, + 0x8000000080008081, + 0x8000000000008009, + 0x000000000000008A, + 0x0000000000000088, + 0x0000000080008009, + 0x000000008000000A, + 0x000000008000808B, + 0x800000000000008B, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800A, + 0x800000008000000A, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, +} + +// keccakF1600 applies the Keccak permutation to a 1600b-wide +// state represented as a slice of 25 uint64s. +func keccakF1600(a *[25]uint64) { + // Implementation translated from Keccak-inplace.c + // in the keccak reference code. + var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 + + for i := 0; i < 24; i += 4 { + // Combines the 5 steps in each round into 2 steps. + // Unrolls 4 rounds per loop and spreads some steps across rounds. + + // Round 1 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[6] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[12] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[18] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[24] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i] + a[6] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[16] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[22] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[3] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[10] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[1] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[7] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[19] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[20] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[11] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[23] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[4] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[5] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[2] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[8] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[14] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[15] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + // Round 2 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[16] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[7] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[23] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[14] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1] + a[16] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[11] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[2] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[18] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[20] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[6] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[22] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[4] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[15] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[1] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[8] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[24] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[10] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[12] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[3] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[19] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[5] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + // Round 3 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[11] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[22] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[8] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[19] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2] + a[11] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[1] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[12] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[23] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[15] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[16] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[2] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[24] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[5] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[6] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[3] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[14] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[20] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[7] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[18] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[4] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[10] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + // Round 4 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[1] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[2] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[3] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[4] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3] + a[1] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[6] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[7] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[8] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[5] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[11] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[12] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[14] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[10] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[16] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[18] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[19] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[15] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[22] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[23] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[24] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[20] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + } +} diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go new file mode 100644 index 0000000..b908696 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc + +package sha3 + +// This function is implemented in keccakf_amd64.s. + +//go:noescape + +func keccakF1600(a *[25]uint64) diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s new file mode 100644 index 0000000..8fb26ae --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -0,0 +1,390 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc + +// This code was translated into a form compatible with 6a from the public +// domain sources at https://github.com/gvanas/KeccakCodePackage + +// Offsets in state +#define _ba (0*8) +#define _be (1*8) +#define _bi (2*8) +#define _bo (3*8) +#define _bu (4*8) +#define _ga (5*8) +#define _ge (6*8) +#define _gi (7*8) +#define _go (8*8) +#define _gu (9*8) +#define _ka (10*8) +#define _ke (11*8) +#define _ki (12*8) +#define _ko (13*8) +#define _ku (14*8) +#define _ma (15*8) +#define _me (16*8) +#define _mi (17*8) +#define _mo (18*8) +#define _mu (19*8) +#define _sa (20*8) +#define _se (21*8) +#define _si (22*8) +#define _so (23*8) +#define _su (24*8) + +// Temporary registers +#define rT1 AX + +// Round vars +#define rpState DI +#define rpStack SP + +#define rDa BX +#define rDe CX +#define rDi DX +#define rDo R8 +#define rDu R9 + +#define rBa R10 +#define rBe R11 +#define rBi R12 +#define rBo R13 +#define rBu R14 + +#define rCa SI +#define rCe BP +#define rCi rBi +#define rCo rBo +#define rCu R15 + +#define MOVQ_RBI_RCE MOVQ rBi, rCe +#define XORQ_RT1_RCA XORQ rT1, rCa +#define XORQ_RT1_RCE XORQ rT1, rCe +#define XORQ_RBA_RCU XORQ rBa, rCu +#define XORQ_RBE_RCU XORQ rBe, rCu +#define XORQ_RDU_RCU XORQ rDu, rCu +#define XORQ_RDA_RCA XORQ rDa, rCa +#define XORQ_RDE_RCE XORQ rDe, rCe + +#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \ + /* Prepare round */ \ + MOVQ rCe, rDa; \ + ROLQ $1, rDa; \ + \ + MOVQ _bi(iState), rCi; \ + XORQ _gi(iState), rDi; \ + XORQ rCu, rDa; \ + XORQ _ki(iState), rCi; \ + XORQ _mi(iState), rDi; \ + XORQ rDi, rCi; \ + \ + MOVQ rCi, rDe; \ + ROLQ $1, rDe; \ + \ + MOVQ _bo(iState), rCo; \ + XORQ _go(iState), rDo; \ + XORQ rCa, rDe; \ + XORQ _ko(iState), rCo; \ + XORQ _mo(iState), rDo; \ + XORQ rDo, rCo; \ + \ + MOVQ rCo, rDi; \ + ROLQ $1, rDi; \ + \ + MOVQ rCu, rDo; \ + XORQ rCe, rDi; \ + ROLQ $1, rDo; \ + \ + MOVQ rCa, rDu; \ + XORQ rCi, rDo; \ + ROLQ $1, rDu; \ + \ + /* Result b */ \ + MOVQ _ba(iState), rBa; \ + MOVQ _ge(iState), rBe; \ + XORQ rCo, rDu; \ + MOVQ _ki(iState), rBi; \ + MOVQ _mo(iState), rBo; \ + MOVQ _su(iState), rBu; \ + XORQ rDe, rBe; \ + ROLQ $44, rBe; \ + XORQ rDi, rBi; \ + XORQ rDa, rBa; \ + ROLQ $43, rBi; \ + \ + MOVQ rBe, rCa; \ + MOVQ rc, rT1; \ + ORQ rBi, rCa; \ + XORQ rBa, rT1; \ + XORQ rT1, rCa; \ + MOVQ rCa, _ba(oState); \ + \ + XORQ rDu, rBu; \ + ROLQ $14, rBu; \ + MOVQ rBa, rCu; \ + ANDQ rBe, rCu; \ + XORQ rBu, rCu; \ + MOVQ rCu, _bu(oState); \ + \ + XORQ rDo, rBo; \ + ROLQ $21, rBo; \ + MOVQ rBo, rT1; \ + ANDQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _bi(oState); \ + \ + NOTQ rBi; \ + ORQ rBa, rBu; \ + ORQ rBo, rBi; \ + XORQ rBo, rBu; \ + XORQ rBe, rBi; \ + MOVQ rBu, _bo(oState); \ + MOVQ rBi, _be(oState); \ + B_RBI_RCE; \ + \ + /* Result g */ \ + MOVQ _gu(iState), rBe; \ + XORQ rDu, rBe; \ + MOVQ _ka(iState), rBi; \ + ROLQ $20, rBe; \ + XORQ rDa, rBi; \ + ROLQ $3, rBi; \ + MOVQ _bo(iState), rBa; \ + MOVQ rBe, rT1; \ + ORQ rBi, rT1; \ + XORQ rDo, rBa; \ + MOVQ _me(iState), rBo; \ + MOVQ _si(iState), rBu; \ + ROLQ $28, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ga(oState); \ + G_RT1_RCA; \ + \ + XORQ rDe, rBo; \ + ROLQ $45, rBo; \ + MOVQ rBi, rT1; \ + ANDQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _ge(oState); \ + G_RT1_RCE; \ + \ + XORQ rDi, rBu; \ + ROLQ $61, rBu; \ + MOVQ rBu, rT1; \ + ORQ rBa, rT1; \ + XORQ rBo, rT1; \ + MOVQ rT1, _go(oState); \ + \ + ANDQ rBe, rBa; \ + XORQ rBu, rBa; \ + MOVQ rBa, _gu(oState); \ + NOTQ rBu; \ + G_RBA_RCU; \ + \ + ORQ rBu, rBo; \ + XORQ rBi, rBo; \ + MOVQ rBo, _gi(oState); \ + \ + /* Result k */ \ + MOVQ _be(iState), rBa; \ + MOVQ _gi(iState), rBe; \ + MOVQ _ko(iState), rBi; \ + MOVQ _mu(iState), rBo; \ + MOVQ _sa(iState), rBu; \ + XORQ rDi, rBe; \ + ROLQ $6, rBe; \ + XORQ rDo, rBi; \ + ROLQ $25, rBi; \ + MOVQ rBe, rT1; \ + ORQ rBi, rT1; \ + XORQ rDe, rBa; \ + ROLQ $1, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ka(oState); \ + K_RT1_RCA; \ + \ + XORQ rDu, rBo; \ + ROLQ $8, rBo; \ + MOVQ rBi, rT1; \ + ANDQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _ke(oState); \ + K_RT1_RCE; \ + \ + XORQ rDa, rBu; \ + ROLQ $18, rBu; \ + NOTQ rBo; \ + MOVQ rBo, rT1; \ + ANDQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _ki(oState); \ + \ + MOVQ rBu, rT1; \ + ORQ rBa, rT1; \ + XORQ rBo, rT1; \ + MOVQ rT1, _ko(oState); \ + \ + ANDQ rBe, rBa; \ + XORQ rBu, rBa; \ + MOVQ rBa, _ku(oState); \ + K_RBA_RCU; \ + \ + /* Result m */ \ + MOVQ _ga(iState), rBe; \ + XORQ rDa, rBe; \ + MOVQ _ke(iState), rBi; \ + ROLQ $36, rBe; \ + XORQ rDe, rBi; \ + MOVQ _bu(iState), rBa; \ + ROLQ $10, rBi; \ + MOVQ rBe, rT1; \ + MOVQ _mi(iState), rBo; \ + ANDQ rBi, rT1; \ + XORQ rDu, rBa; \ + MOVQ _so(iState), rBu; \ + ROLQ $27, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ma(oState); \ + M_RT1_RCA; \ + \ + XORQ rDi, rBo; \ + ROLQ $15, rBo; \ + MOVQ rBi, rT1; \ + ORQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _me(oState); \ + M_RT1_RCE; \ + \ + XORQ rDo, rBu; \ + ROLQ $56, rBu; \ + NOTQ rBo; \ + MOVQ rBo, rT1; \ + ORQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _mi(oState); \ + \ + ORQ rBa, rBe; \ + XORQ rBu, rBe; \ + MOVQ rBe, _mu(oState); \ + \ + ANDQ rBa, rBu; \ + XORQ rBo, rBu; \ + MOVQ rBu, _mo(oState); \ + M_RBE_RCU; \ + \ + /* Result s */ \ + MOVQ _bi(iState), rBa; \ + MOVQ _go(iState), rBe; \ + MOVQ _ku(iState), rBi; \ + XORQ rDi, rBa; \ + MOVQ _ma(iState), rBo; \ + ROLQ $62, rBa; \ + XORQ rDo, rBe; \ + MOVQ _se(iState), rBu; \ + ROLQ $55, rBe; \ + \ + XORQ rDu, rBi; \ + MOVQ rBa, rDu; \ + XORQ rDe, rBu; \ + ROLQ $2, rBu; \ + ANDQ rBe, rDu; \ + XORQ rBu, rDu; \ + MOVQ rDu, _su(oState); \ + \ + ROLQ $39, rBi; \ + S_RDU_RCU; \ + NOTQ rBe; \ + XORQ rDa, rBo; \ + MOVQ rBe, rDa; \ + ANDQ rBi, rDa; \ + XORQ rBa, rDa; \ + MOVQ rDa, _sa(oState); \ + S_RDA_RCA; \ + \ + ROLQ $41, rBo; \ + MOVQ rBi, rDe; \ + ORQ rBo, rDe; \ + XORQ rBe, rDe; \ + MOVQ rDe, _se(oState); \ + S_RDE_RCE; \ + \ + MOVQ rBo, rDi; \ + MOVQ rBu, rDo; \ + ANDQ rBu, rDi; \ + ORQ rBa, rDo; \ + XORQ rBi, rDi; \ + XORQ rBo, rDo; \ + MOVQ rDi, _si(oState); \ + MOVQ rDo, _so(oState) \ + +// func keccakF1600(state *[25]uint64) +TEXT ·keccakF1600(SB), 0, $200-8 + MOVQ state+0(FP), rpState + + // Convert the user state into an internal state + NOTQ _be(rpState) + NOTQ _bi(rpState) + NOTQ _go(rpState) + NOTQ _ki(rpState) + NOTQ _mi(rpState) + NOTQ _sa(rpState) + + // Execute the KeccakF permutation + MOVQ _ba(rpState), rCa + MOVQ _be(rpState), rCe + MOVQ _bu(rpState), rCu + + XORQ _ga(rpState), rCa + XORQ _ge(rpState), rCe + XORQ _gu(rpState), rCu + + XORQ _ka(rpState), rCa + XORQ _ke(rpState), rCe + XORQ _ku(rpState), rCu + + XORQ _ma(rpState), rCa + XORQ _me(rpState), rCe + XORQ _mu(rpState), rCu + + XORQ _sa(rpState), rCa + XORQ _se(rpState), rCe + MOVQ _si(rpState), rDi + MOVQ _so(rpState), rDo + XORQ _su(rpState), rCu + + mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP) + + // Revert the internal state to the user state + NOTQ _be(rpState) + NOTQ _bi(rpState) + NOTQ _go(rpState) + NOTQ _ki(rpState) + NOTQ _mi(rpState) + NOTQ _sa(rpState) + + RET diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go new file mode 100644 index 0000000..addfd50 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/register.go @@ -0,0 +1,18 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.4 + +package sha3 + +import ( + "crypto" +) + +func init() { + crypto.RegisterHash(crypto.SHA3_224, New224) + crypto.RegisterHash(crypto.SHA3_256, New256) + crypto.RegisterHash(crypto.SHA3_384, New384) + crypto.RegisterHash(crypto.SHA3_512, New512) +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go new file mode 100644 index 0000000..4884d17 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3.go @@ -0,0 +1,197 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// spongeDirection indicates the direction bytes are flowing through the sponge. +type spongeDirection int + +const ( + // spongeAbsorbing indicates that the sponge is absorbing input. + spongeAbsorbing spongeDirection = iota + // spongeSqueezing indicates that the sponge is being squeezed. + spongeSqueezing +) + +const ( + // maxRate is the maximum size of the internal buffer. SHAKE-256 + // currently needs the largest buffer. + maxRate = 168 +) + +type state struct { + // Generic sponge components. + a [25]uint64 // main state of the hash + buf []byte // points into storage + rate int // the number of bytes of state to use + + // dsbyte contains the "domain separation" bits and the first bit of + // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the + // SHA-3 and SHAKE functions by appending bitstrings to the message. + // Using a little-endian bit-ordering convention, these are "01" for SHA-3 + // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the + // padding rule from section 5.1 is applied to pad the message to a multiple + // of the rate, which involves adding a "1" bit, zero or more "0" bits, and + // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, + // giving 00000110b (0x06) and 00011111b (0x1f). + // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf + // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and + // Extendable-Output Functions (May 2014)" + dsbyte byte + + storage storageBuf + + // Specific to SHA-3 and SHAKE. + outputLen int // the default output size in bytes + state spongeDirection // whether the sponge is absorbing or squeezing +} + +// BlockSize returns the rate of sponge underlying this hash function. +func (d *state) BlockSize() int { return d.rate } + +// Size returns the output size of the hash function in bytes. +func (d *state) Size() int { return d.outputLen } + +// Reset clears the internal state by zeroing the sponge state and +// the byte buffer, and setting Sponge.state to absorbing. +func (d *state) Reset() { + // Zero the permutation's state. + for i := range d.a { + d.a[i] = 0 + } + d.state = spongeAbsorbing + d.buf = d.storage.asBytes()[:0] +} + +func (d *state) clone() *state { + ret := *d + if ret.state == spongeAbsorbing { + ret.buf = ret.storage.asBytes()[:len(ret.buf)] + } else { + ret.buf = ret.storage.asBytes()[d.rate-cap(d.buf) : d.rate] + } + + return &ret +} + +// permute applies the KeccakF-1600 permutation. It handles +// any input-output buffering. +func (d *state) permute() { + switch d.state { + case spongeAbsorbing: + // If we're absorbing, we need to xor the input into the state + // before applying the permutation. + xorIn(d, d.buf) + d.buf = d.storage.asBytes()[:0] + keccakF1600(&d.a) + case spongeSqueezing: + // If we're squeezing, we need to apply the permutation before + // copying more output. + keccakF1600(&d.a) + d.buf = d.storage.asBytes()[:d.rate] + copyOut(d, d.buf) + } +} + +// pads appends the domain separation bits in dsbyte, applies +// the multi-bitrate 10..1 padding rule, and permutes the state. +func (d *state) padAndPermute(dsbyte byte) { + if d.buf == nil { + d.buf = d.storage.asBytes()[:0] + } + // Pad with this instance's domain-separator bits. We know that there's + // at least one byte of space in d.buf because, if it were full, + // permute would have been called to empty it. dsbyte also contains the + // first one bit for the padding. See the comment in the state struct. + d.buf = append(d.buf, dsbyte) + zerosStart := len(d.buf) + d.buf = d.storage.asBytes()[:d.rate] + for i := zerosStart; i < d.rate; i++ { + d.buf[i] = 0 + } + // This adds the final one bit for the padding. Because of the way that + // bits are numbered from the LSB upwards, the final bit is the MSB of + // the last byte. + d.buf[d.rate-1] ^= 0x80 + // Apply the permutation + d.permute() + d.state = spongeSqueezing + d.buf = d.storage.asBytes()[:d.rate] + copyOut(d, d.buf) +} + +// Write absorbs more data into the hash's state. It panics if any +// output has already been read. +func (d *state) Write(p []byte) (written int, err error) { + if d.state != spongeAbsorbing { + panic("sha3: Write after Read") + } + if d.buf == nil { + d.buf = d.storage.asBytes()[:0] + } + written = len(p) + + for len(p) > 0 { + if len(d.buf) == 0 && len(p) >= d.rate { + // The fast path; absorb a full "rate" bytes of input and apply the permutation. + xorIn(d, p[:d.rate]) + p = p[d.rate:] + keccakF1600(&d.a) + } else { + // The slow path; buffer the input until we can fill the sponge, and then xor it in. + todo := d.rate - len(d.buf) + if todo > len(p) { + todo = len(p) + } + d.buf = append(d.buf, p[:todo]...) + p = p[todo:] + + // If the sponge is full, apply the permutation. + if len(d.buf) == d.rate { + d.permute() + } + } + } + + return +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (d *state) Read(out []byte) (n int, err error) { + // If we're still absorbing, pad and apply the permutation. + if d.state == spongeAbsorbing { + d.padAndPermute(d.dsbyte) + } + + n = len(out) + + // Now, do the squeezing. + for len(out) > 0 { + n := copy(out, d.buf) + d.buf = d.buf[n:] + out = out[n:] + + // Apply the permutation if we've squeezed the sponge dry. + if len(d.buf) == 0 { + d.permute() + } + } + + return +} + +// Sum applies padding to the hash state and then squeezes out the desired +// number of output bytes. It panics if any output has already been read. +func (d *state) Sum(in []byte) []byte { + if d.state != spongeAbsorbing { + panic("sha3: Sum after Read") + } + + // Make a copy of the original hash so that caller can keep writing + // and summing. + dup := d.clone() + hash := make([]byte, dup.outputLen, 64) // explicit cap to allow stack allocation + dup.Read(hash) + return append(in, hash...) +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go new file mode 100644 index 0000000..d861bca --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -0,0 +1,288 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package sha3 + +// This file contains code for using the 'compute intermediate +// message digest' (KIMD) and 'compute last message digest' (KLMD) +// instructions to compute SHA-3 and SHAKE hashes on IBM Z. + +import ( + "hash" + + "golang.org/x/sys/cpu" +) + +// codes represent 7-bit KIMD/KLMD function codes as defined in +// the Principles of Operation. +type code uint64 + +const ( + // function codes for KIMD/KLMD + sha3_224 code = 32 + sha3_256 = 33 + sha3_384 = 34 + sha3_512 = 35 + shake_128 = 36 + shake_256 = 37 + nopad = 0x100 +) + +// kimd is a wrapper for the 'compute intermediate message digest' instruction. +// src must be a multiple of the rate for the given function code. +// +//go:noescape +func kimd(function code, chain *[200]byte, src []byte) + +// klmd is a wrapper for the 'compute last message digest' instruction. +// src padding is handled by the instruction. +// +//go:noescape +func klmd(function code, chain *[200]byte, dst, src []byte) + +type asmState struct { + a [200]byte // 1600 bit state + buf []byte // care must be taken to ensure cap(buf) is a multiple of rate + rate int // equivalent to block size + storage [3072]byte // underlying storage for buf + outputLen int // output length for full security + function code // KIMD/KLMD function code + state spongeDirection // whether the sponge is absorbing or squeezing +} + +func newAsmState(function code) *asmState { + var s asmState + s.function = function + switch function { + case sha3_224: + s.rate = 144 + s.outputLen = 28 + case sha3_256: + s.rate = 136 + s.outputLen = 32 + case sha3_384: + s.rate = 104 + s.outputLen = 48 + case sha3_512: + s.rate = 72 + s.outputLen = 64 + case shake_128: + s.rate = 168 + s.outputLen = 32 + case shake_256: + s.rate = 136 + s.outputLen = 64 + default: + panic("sha3: unrecognized function code") + } + + // limit s.buf size to a multiple of s.rate + s.resetBuf() + return &s +} + +func (s *asmState) clone() *asmState { + c := *s + c.buf = c.storage[:len(s.buf):cap(s.buf)] + return &c +} + +// copyIntoBuf copies b into buf. It will panic if there is not enough space to +// store all of b. +func (s *asmState) copyIntoBuf(b []byte) { + bufLen := len(s.buf) + s.buf = s.buf[:len(s.buf)+len(b)] + copy(s.buf[bufLen:], b) +} + +// resetBuf points buf at storage, sets the length to 0 and sets cap to be a +// multiple of the rate. +func (s *asmState) resetBuf() { + max := (cap(s.storage) / s.rate) * s.rate + s.buf = s.storage[:0:max] +} + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (s *asmState) Write(b []byte) (int, error) { + if s.state != spongeAbsorbing { + panic("sha3: Write after Read") + } + length := len(b) + for len(b) > 0 { + if len(s.buf) == 0 && len(b) >= cap(s.buf) { + // Hash the data directly and push any remaining bytes + // into the buffer. + remainder := len(b) % s.rate + kimd(s.function, &s.a, b[:len(b)-remainder]) + if remainder != 0 { + s.copyIntoBuf(b[len(b)-remainder:]) + } + return length, nil + } + + if len(s.buf) == cap(s.buf) { + // flush the buffer + kimd(s.function, &s.a, s.buf) + s.buf = s.buf[:0] + } + + // copy as much as we can into the buffer + n := len(b) + if len(b) > cap(s.buf)-len(s.buf) { + n = cap(s.buf) - len(s.buf) + } + s.copyIntoBuf(b[:n]) + b = b[n:] + } + return length, nil +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (s *asmState) Read(out []byte) (n int, err error) { + n = len(out) + + // need to pad if we were absorbing + if s.state == spongeAbsorbing { + s.state = spongeSqueezing + + // write hash directly into out if possible + if len(out)%s.rate == 0 { + klmd(s.function, &s.a, out, s.buf) // len(out) may be 0 + s.buf = s.buf[:0] + return + } + + // write hash into buffer + max := cap(s.buf) + if max > len(out) { + max = (len(out)/s.rate)*s.rate + s.rate + } + klmd(s.function, &s.a, s.buf[:max], s.buf) + s.buf = s.buf[:max] + } + + for len(out) > 0 { + // flush the buffer + if len(s.buf) != 0 { + c := copy(out, s.buf) + out = out[c:] + s.buf = s.buf[c:] + continue + } + + // write hash directly into out if possible + if len(out)%s.rate == 0 { + klmd(s.function|nopad, &s.a, out, nil) + return + } + + // write hash into buffer + s.resetBuf() + if cap(s.buf) > len(out) { + s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate] + } + klmd(s.function|nopad, &s.a, s.buf, nil) + } + return +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (s *asmState) Sum(b []byte) []byte { + if s.state != spongeAbsorbing { + panic("sha3: Sum after Read") + } + + // Copy the state to preserve the original. + a := s.a + + // Hash the buffer. Note that we don't clear it because we + // aren't updating the state. + klmd(s.function, &a, nil, s.buf) + return append(b, a[:s.outputLen]...) +} + +// Reset resets the Hash to its initial state. +func (s *asmState) Reset() { + for i := range s.a { + s.a[i] = 0 + } + s.resetBuf() + s.state = spongeAbsorbing +} + +// Size returns the number of bytes Sum will return. +func (s *asmState) Size() int { + return s.outputLen +} + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (s *asmState) BlockSize() int { + return s.rate +} + +// Clone returns a copy of the ShakeHash in its current state. +func (s *asmState) Clone() ShakeHash { + return s.clone() +} + +// new224Asm returns an assembly implementation of SHA3-224 if available, +// otherwise it returns nil. +func new224Asm() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_224) + } + return nil +} + +// new256Asm returns an assembly implementation of SHA3-256 if available, +// otherwise it returns nil. +func new256Asm() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_256) + } + return nil +} + +// new384Asm returns an assembly implementation of SHA3-384 if available, +// otherwise it returns nil. +func new384Asm() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_384) + } + return nil +} + +// new512Asm returns an assembly implementation of SHA3-512 if available, +// otherwise it returns nil. +func new512Asm() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_512) + } + return nil +} + +// newShake128Asm returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns nil. +func newShake128Asm() ShakeHash { + if cpu.S390X.HasSHA3 { + return newAsmState(shake_128) + } + return nil +} + +// newShake256Asm returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns nil. +func newShake256Asm() ShakeHash { + if cpu.S390X.HasSHA3 { + return newAsmState(shake_256) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s new file mode 100644 index 0000000..826b862 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +#include "textflag.h" + +// func kimd(function code, chain *[200]byte, src []byte) +TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40 + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG src+16(FP), R2, R3 // R2=base, R3=len + +continue: + WORD $0xB93E0002 // KIMD --, R2 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET + +// func klmd(function code, chain *[200]byte, dst, src []byte) +TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64 + // TODO: SHAKE support + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG dst+16(FP), R2, R3 // R2=base, R3=len + LMG src+40(FP), R4, R5 // R4=base, R5=len + +continue: + WORD $0xB93F0024 // KLMD R2, R4 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go new file mode 100644 index 0000000..bb69984 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -0,0 +1,172 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file defines the ShakeHash interface, and provides +// functions for creating SHAKE and cSHAKE instances, as well as utility +// functions for hashing bytes to arbitrary-length output. +// +// +// SHAKE implementation is based on FIPS PUB 202 [1] +// cSHAKE implementations is based on NIST SP 800-185 [2] +// +// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +// [2] https://doi.org/10.6028/NIST.SP.800-185 + +import ( + "encoding/binary" + "hash" + "io" +) + +// ShakeHash defines the interface to hash functions that support +// arbitrary-length output. When used as a plain [hash.Hash], it +// produces minimum-length outputs that provide full-strength generic +// security. +type ShakeHash interface { + hash.Hash + + // Read reads more output from the hash; reading affects the hash's + // state. (ShakeHash.Read is thus very different from Hash.Sum) + // It never returns an error, but subsequent calls to Write or Sum + // will panic. + io.Reader + + // Clone returns a copy of the ShakeHash in its current state. + Clone() ShakeHash +} + +// cSHAKE specific context +type cshakeState struct { + *state // SHA-3 state context and Read/Write operations + + // initBlock is the cSHAKE specific initialization set of bytes. It is initialized + // by newCShake function and stores concatenation of N followed by S, encoded + // by the method specified in 3.3 of [1]. + // It is stored here in order for Reset() to be able to put context into + // initial state. + initBlock []byte +} + +// Consts for configuring initial SHA-3 state +const ( + dsbyteShake = 0x1f + dsbyteCShake = 0x04 + rate128 = 168 + rate256 = 136 +) + +func bytepad(input []byte, w int) []byte { + // leftEncode always returns max 9 bytes + buf := make([]byte, 0, 9+len(input)+w) + buf = append(buf, leftEncode(uint64(w))...) + buf = append(buf, input...) + padlen := w - (len(buf) % w) + return append(buf, make([]byte, padlen)...) +} + +func leftEncode(value uint64) []byte { + var b [9]byte + binary.BigEndian.PutUint64(b[1:], value) + // Trim all but last leading zero bytes + i := byte(1) + for i < 8 && b[i] == 0 { + i++ + } + // Prepend number of encoded bytes + b[i-1] = 9 - i + return b[i-1:] +} + +func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash { + c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}} + + // leftEncode returns max 9 bytes + c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...) + c.initBlock = append(c.initBlock, N...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...) + c.initBlock = append(c.initBlock, S...) + c.Write(bytepad(c.initBlock, c.rate)) + return &c +} + +// Reset resets the hash to initial state. +func (c *cshakeState) Reset() { + c.state.Reset() + c.Write(bytepad(c.initBlock, c.rate)) +} + +// Clone returns copy of a cSHAKE context within its current state. +func (c *cshakeState) Clone() ShakeHash { + b := make([]byte, len(c.initBlock)) + copy(b, c.initBlock) + return &cshakeState{state: c.clone(), initBlock: b} +} + +// Clone returns copy of SHAKE context within its current state. +func (c *state) Clone() ShakeHash { + return c.clone() +} + +// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +func NewShake128() ShakeHash { + if h := newShake128Asm(); h != nil { + return h + } + return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake} +} + +// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +func NewShake256() ShakeHash { + if h := newShake256Asm(); h != nil { + return h + } + return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake} +} + +// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash, +// a customizable variant of SHAKE128. +// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is +// desired. S is a customization byte string used for domain separation - two cSHAKE +// computations on same input with different S yield unrelated outputs. +// When N and S are both empty, this is equivalent to NewShake128. +func NewCShake128(N, S []byte) ShakeHash { + if len(N) == 0 && len(S) == 0 { + return NewShake128() + } + return newCShake(N, S, rate128, 32, dsbyteCShake) +} + +// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash, +// a customizable variant of SHAKE256. +// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is +// desired. S is a customization byte string used for domain separation - two cSHAKE +// computations on same input with different S yield unrelated outputs. +// When N and S are both empty, this is equivalent to NewShake256. +func NewCShake256(N, S []byte) ShakeHash { + if len(N) == 0 && len(S) == 0 { + return NewShake256() + } + return newCShake(N, S, rate256, 64, dsbyteCShake) +} + +// ShakeSum128 writes an arbitrary-length digest of data into hash. +func ShakeSum128(hash, data []byte) { + h := NewShake128() + h.Write(data) + h.Read(hash) +} + +// ShakeSum256 writes an arbitrary-length digest of data into hash. +func ShakeSum256(hash, data []byte) { + h := NewShake256() + h.Write(data) + h.Read(hash) +} diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go new file mode 100644 index 0000000..8d31cf5 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake_generic.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +// newShake128Asm returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns nil. +func newShake128Asm() ShakeHash { + return nil +} + +// newShake256Asm returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns nil. +func newShake256Asm() ShakeHash { + return nil +} diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go new file mode 100644 index 0000000..7337cca --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !386 && !ppc64le) || purego + +package sha3 + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate]byte + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(b) +} + +var ( + xorIn = xorInGeneric + copyOut = copyOutGeneric + xorInUnaligned = xorInGeneric + copyOutUnaligned = copyOutGeneric +) + +const xorImplementationUnaligned = "generic" diff --git a/vendor/golang.org/x/crypto/sha3/xor_generic.go b/vendor/golang.org/x/crypto/sha3/xor_generic.go new file mode 100644 index 0000000..8d94771 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor_generic.go @@ -0,0 +1,28 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +import "encoding/binary" + +// xorInGeneric xors the bytes in buf into the state; it +// makes no non-portable assumptions about memory layout +// or alignment. +func xorInGeneric(d *state, buf []byte) { + n := len(buf) / 8 + + for i := 0; i < n; i++ { + a := binary.LittleEndian.Uint64(buf) + d.a[i] ^= a + buf = buf[8:] + } +} + +// copyOutGeneric copies uint64s to a byte buffer. +func copyOutGeneric(d *state, b []byte) { + for i := 0; len(b) >= 8; i++ { + binary.LittleEndian.PutUint64(b, d.a[i]) + b = b[8:] + } +} diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go new file mode 100644 index 0000000..870e2d1 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go @@ -0,0 +1,66 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (amd64 || 386 || ppc64le) && !purego + +package sha3 + +import "unsafe" + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate / 8]uint64 + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(unsafe.Pointer(b)) +} + +// xorInUnaligned uses unaligned reads and writes to update d.a to contain d.a +// XOR buf. +func xorInUnaligned(d *state, buf []byte) { + n := len(buf) + bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] + if n >= 72 { + d.a[0] ^= bw[0] + d.a[1] ^= bw[1] + d.a[2] ^= bw[2] + d.a[3] ^= bw[3] + d.a[4] ^= bw[4] + d.a[5] ^= bw[5] + d.a[6] ^= bw[6] + d.a[7] ^= bw[7] + d.a[8] ^= bw[8] + } + if n >= 104 { + d.a[9] ^= bw[9] + d.a[10] ^= bw[10] + d.a[11] ^= bw[11] + d.a[12] ^= bw[12] + } + if n >= 136 { + d.a[13] ^= bw[13] + d.a[14] ^= bw[14] + d.a[15] ^= bw[15] + d.a[16] ^= bw[16] + } + if n >= 144 { + d.a[17] ^= bw[17] + } + if n >= 168 { + d.a[18] ^= bw[18] + d.a[19] ^= bw[19] + d.a[20] ^= bw[20] + } +} + +func copyOutUnaligned(d *state, buf []byte) { + ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) + copy(buf, ab[:]) +} + +var ( + xorIn = xorInUnaligned + copyOut = copyOutUnaligned +) + +const xorImplementationUnaligned = "unaligned" diff --git a/vendor/modules.txt b/vendor/modules.txt index c51312f..b9786fb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -17,13 +17,18 @@ github.com/cloudflare/circl/sign/ed448 # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew +# github.com/google/uuid v1.4.0 +## explicit # github.com/klauspost/compress v1.17.2 ## explicit; go 1.18 github.com/klauspost/compress +github.com/klauspost/compress/flate github.com/klauspost/compress/fse +github.com/klauspost/compress/gzip github.com/klauspost/compress/huff0 github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref +github.com/klauspost/compress/zlib github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash # github.com/pmezard/go-difflib v1.0.0 @@ -33,8 +38,8 @@ github.com/pmezard/go-difflib/difflib ## explicit; go 1.20 github.com/stretchr/testify/assert github.com/stretchr/testify/require -# golang.org/x/crypto v0.14.0 -## explicit; go 1.17 +# golang.org/x/crypto v0.15.0 +## explicit; go 1.18 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b golang.org/x/crypto/chacha20 @@ -50,6 +55,7 @@ golang.org/x/crypto/pbkdf2 golang.org/x/crypto/poly1305 golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/scrypt +golang.org/x/crypto/sha3 # golang.org/x/sys v0.14.0 ## explicit; go 1.18 golang.org/x/sys/cpu From 18024d50c27741b7d9045ef2132963eacda83198 Mon Sep 17 00:00:00 2001 From: D3v Date: Tue, 14 Nov 2023 22:24:50 +0100 Subject: [PATCH 02/12] Incorrect conversion between integer types --- hash/kdf.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hash/kdf.go b/hash/kdf.go index 9050c94..37d19f0 100644 --- a/hash/kdf.go +++ b/hash/kdf.go @@ -3,6 +3,7 @@ package hash import ( "encoding/base64" "errors" + "math" "regexp" "strconv" @@ -122,7 +123,11 @@ func (a *Argon2ID) Validate(data []byte, argonString string) (bool, error) { if err != nil { return false, errors.New(generic.StrCnct([]string{"parallelism parameter parsing error: ", err.Error()}...)) } - a.Parallelism = uint8(parsed) + if parsed > 0 && parsed <= math.MaxInt32 { + a.Parallelism = uint8(parsed) + } else { + return false, errors.New("parallelism parameter parsing error, can't parse that number") + } } if a.KeyLen == 0 { From 9139189f9500a3a58f5805b8f34d1f2b5db37f4d Mon Sep 17 00:00:00 2001 From: D3v Date: Wed, 15 Nov 2023 00:24:48 +0100 Subject: [PATCH 03/12] Aged fix --- aged/age_bind.go | 20 ++++--- aged/age_bind_test.go | 58 ++++++++++--------- aged/obf_test.go | 4 +- hash/kdf.go | 130 ++++++++++++++++++++++-------------------- hash/kdf_test.go | 4 +- 5 files changed, 115 insertions(+), 101 deletions(-) diff --git a/aged/age_bind.go b/aged/age_bind.go index 875bbab..9b3dfc8 100644 --- a/aged/age_bind.go +++ b/aged/age_bind.go @@ -32,6 +32,9 @@ func SetupKeychain(keychainSetup KeychainSetup) (Keychain, error) { keychain.secretKey = identity for _, e := range keychainSetup.PublicKeys { + if e == "" { + continue + } if identity.Recipient().String() != e { publicKey, err := age.ParseX25519Recipient(e) if err != nil { @@ -92,7 +95,6 @@ func (k Keychain) Decrypt(p Parameters) ([]byte, error) { if err != nil { return []byte{}, err } - r, err := age.Decrypt(bytes.NewReader(cipherData), k.secretKey) if err != nil { return []byte{}, err @@ -102,7 +104,7 @@ func (k Keychain) Decrypt(p Parameters) ([]byte, error) { return []byte{}, err } - return decompressor(p) + return decompressor(p, out.Bytes()) } func EncryptWithPwd(p Parameters, pwd string) ([]byte, error) { @@ -157,17 +159,17 @@ func DecryptWithPwd(p Parameters, pwd string) ([]byte, error) { return []byte{}, err } - return decompressor(p) + return decompressor(p, out.Bytes()) } func compressor(p Parameters) (*bytes.Reader, error) { var in *bytes.Reader if p.Compress { - var writer *bytes.Buffer + var writer bytes.Buffer compressorIn := bytes.NewReader(p.Data) - err := p.Compressor.CompressStream(compressorIn, writer) + err := p.Compressor.CompressStream(compressorIn, &writer) if err != nil { return nil, err } @@ -180,15 +182,15 @@ func compressor(p Parameters) (*bytes.Reader, error) { return in, nil } -func decompressor(p Parameters) ([]byte, error) { +func decompressor(p Parameters, data []byte) ([]byte, error) { if p.Compress { - raw, err := p.Compressor.Decompress(p.Data) + raw, err := p.Compressor.Decompress(data) if err != nil { return []byte{}, err } return raw, nil } - return p.Data, nil + return data, nil } func obfuscator(p Parameters, in []byte) ([]byte, error) { @@ -217,7 +219,7 @@ func deobfuscator(p Parameters) ([]byte, error) { } func (k Keychain) KeychainExport() []string { - keys := make([]string, len(k.recipients)) + var keys []string for _, key := range k.recipients { keys = append(keys, fmt.Sprint(key)) } diff --git a/aged/age_bind_test.go b/aged/age_bind_test.go index ddcb2d8..61823f7 100644 --- a/aged/age_bind_test.go +++ b/aged/age_bind_test.go @@ -69,6 +69,38 @@ func keychainInit(t *testing.T) chains { } } + +func TestGenKeypair(t *testing.T) { + _, err := aged.GenKeypair() + r.NoError(t, err) +} + +func TestKeychain(t *testing.T) { + identity, err := aged.GenKeypair() + r.NoError(t, err) + a.Len(t, identity.Recipient().String(), 62) + a.Len(t, identity.String(), 74) +} + +func TestKeychainImportExport(t *testing.T) { + keychain := keychainInit(t) + + s := aged.KeychainSetup{ + SecretKey: keychain.keychain.KeychainExportSecretKey(), + PublicKeys: keychain.keychain.KeychainExport(), + SelfRecipient: true, + } + + t.Log("Public Keys: ", s.PublicKeys) + t.Log("Secret Key: ", s.SecretKey) + + keychainExpected, err := aged.SetupKeychain(s) + r.NoError(t, err) + + r.Equal(t, keychain.keychain.KeychainExportSecretKey(), keychainExpected.KeychainExportSecretKey()) + r.Equal(t, keychain.keychain.KeychainExport(), keychainExpected.KeychainExport()) +} + func TestRoundTrips(t *testing.T) { config := keychainInit(t) @@ -279,33 +311,7 @@ func TestRoundTrips(t *testing.T) { r.Equal(t, plainData, decryptedData) } */ -func TestGenKeypair(t *testing.T) { - _, err := aged.GenKeypair() - r.NoError(t, err) -} -func TestKeychainImportExport(t *testing.T) { - keychain := keychainInit(t) - s := aged.KeychainSetup{ - SecretKey: keychain.keychain.KeychainExportSecretKey(), - PublicKeys: keychain.keychain.KeychainExport(), - SelfRecipient: true, - } - t.Log("Public Keys: ", s.PublicKeys) - t.Log("Secret Key: ", s.SecretKey) - - keychainExpected, err := aged.SetupKeychain(s) - r.NoError(t, err) - - r.Equal(t, keychain.keychain.KeychainExportSecretKey(), keychainExpected.KeychainExportSecretKey()) - r.Equal(t, keychain.keychain.KeychainExport(), keychainExpected.KeychainExport()) -} -func TestKeychain(t *testing.T) { - identity, err := aged.GenKeypair() - r.NoError(t, err) - a.Len(t, identity.Recipient().String(), 62) - a.Len(t, identity.String(), 74) -} diff --git a/aged/obf_test.go b/aged/obf_test.go index 46e25d0..c8bd4d3 100644 --- a/aged/obf_test.go +++ b/aged/obf_test.go @@ -31,8 +31,8 @@ func TestObf(t *testing.T) { obfEncrypted, err := obfKeychain.Encrypt(aged.Parameters{ Data: obfTestData, Compress: true, + Compressor: &compression.Zstd{Level: 11}, Obfuscation: false, - Obfuscator: &aged.AgeV1Obf{}, }) r.NoError(t, err) @@ -54,7 +54,7 @@ func TestObf(t *testing.T) { Level: 11, }, Compress: true, - Obfuscation: true, + Obfuscation: false, Obfuscator: &aged.AgeV1Obf{}, }) r.NoError(t, err) diff --git a/hash/kdf.go b/hash/kdf.go index 37d19f0..ba8fb8c 100644 --- a/hash/kdf.go +++ b/hash/kdf.go @@ -3,12 +3,16 @@ package hash import ( "encoding/base64" "errors" + "hash" + "io" "math" "regexp" "strconv" "github.com/D3vl0per/crypt/generic" "golang.org/x/crypto/argon2" + "golang.org/x/crypto/hkdf" + "golang.org/x/crypto/sha3" ) const ( @@ -24,6 +28,13 @@ type Kdf interface { Validate([]byte, string) (bool, error) } +type Hkdf struct { + Salt []byte + Key []byte + HashMode func() hash.Hash + Encoder generic.Hex +} + type Argon2ID struct { Salt []byte Memory uint32 @@ -139,68 +150,6 @@ func (a *Argon2ID) Validate(data []byte, argonString string) (bool, error) { return generic.Compare(hashed.Hash, providedHash), nil } -/* - type Hkdf struct { - Salt []byte - Secret []byte - HashMode func() hash.Hash - } - -// Easy to user HKDF toolset. - - func (h *Hkdf) Hash(data []byte) ([]byte, error) { - kdf := hkdf.New(h.HashMode, h.Secret, h.Salt, data) - - key := make([]byte, HKDFKeysize) - - if _, err := io.ReadFull(kdf, key); err != nil { - return []byte{}, err - } - return key, nil - } - - func HKDFRecreate(secret, msg []byte, salt string) ([]byte, error) { - salt_raw, err := hex.DecodeString(salt) - if err != nil { - return []byte{}, err - } - - return HKDFBase(secret, salt_raw, msg) - } - - func HKDF(secret, msg []byte) (keys, error) { - hash := sha256.New - salt, err := generic.CSPRNG(int64(hash().Size())) - if err != nil { - return keys{}, err - } - - key, err := HKDFBase(secret, salt, msg) - if err != nil { - return keys{}, err - } - - return keys{ - Salt: hex.EncodeToString(salt), - Hash: hex.EncodeToString(key), - }, nil - } - - func HKDFVerify(secret, msg []byte, salt, hash string) (bool, error) { - hash_to_validate, err := HKDFRecreate(secret, msg, salt) - if err != nil { - return false, err - } - - hash_raw, err := hex.DecodeString(hash) - if err != nil { - return false, err - } - - return generic.Compare(hash_raw, hash_to_validate), nil - } -*/ - func (a *Argon2ID) ExtractParameters(input string) (map[string]string, error) { pattern := `\$(argon2id)\$v=(\d+)\$m=(\d+),t=(\d+),p=(\d+)\$([^$]+)\$([^$]+)$` @@ -240,3 +189,60 @@ func (a *Argon2ID) ExtractParameters(input string) (map[string]string, error) { return parameters, nil } + +func (h *Hkdf) Hash(data []byte) (string, error) { + if h.Salt != nil { + if len(h.Salt) != h.HashMode().Size() { + return "", errors.New(generic.StrCnct([]string{"salt must be", strconv.Itoa(h.HashMode().Size()), " byte long"}...)) + } + } else { + var err error + h.Salt, err = generic.CSPRNG(int64(h.HashMode().Size())) + if err != nil { + return "", err + } + } + + if h.HashMode == nil { + h.HashMode = sha3.New512 + } + + kdf := hkdf.New(h.HashMode, h.Key , h.Salt, data) + + key := make([]byte, HKDFKeysize) + + if _, err := io.ReadFull(kdf, key); err != nil { + return "", err + } + + return generic.StrCnct([]string{h.Encoder.Encode(key), "#", h.Encoder.Encode(h.Salt), }...), nil +} + +func (h *Hkdf) Validate(data []byte, hash string) (bool, error) { + + if len(h.Salt) == 0 || len(h.Salt) != h.HashMode().Size() { + return false, errors.New(generic.StrCnct([]string{"salt must be ", strconv.Itoa(h.HashMode().Size()), " byte long"}...)) + } + + if h.HashMode == nil { + h.HashMode = sha3.New512 + } + + kdf := hkdf.New(h.HashMode, h.Key , h.Salt, data) + + key := make([]byte, HKDFKeysize) + + if _, err := io.ReadFull(kdf, key); err != nil { + return false, err + } + + + hash_raw, err := h.Encoder.Decode(hash) + if err != nil { + return false, err + } + + return generic.Compare(hash_raw, h.Salt), nil + //return generic.Compare(hash_raw, hash_to_validate), nil +} + diff --git a/hash/kdf_test.go b/hash/kdf_test.go index 92159d1..a4b87e9 100644 --- a/hash/kdf_test.go +++ b/hash/kdf_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/D3vl0per/crypt/generic" - "github.com/D3vl0per/crypt/hash" + hasher "github.com/D3vl0per/crypt/hash" a "github.com/stretchr/testify/assert" r "github.com/stretchr/testify/require" ) @@ -13,7 +13,7 @@ func TestArgon2ID(t *testing.T) { data := []byte("Correct Horse Battery Staple") salt, err := generic.CSPRNG(16) r.NoError(t, err) - argon := []hash.Argon2ID{ + argon := []hasher.Argon2ID{ {}, { Memory: 2 * 64 * 1024, From bcdeb94ed803661d5431437a9f1dc00434f86b73 Mon Sep 17 00:00:00 2001 From: D3v Date: Wed, 15 Nov 2023 01:13:56 +0100 Subject: [PATCH 04/12] Tests and GitHub Action CI added --- .github/workflows/test_test.yaml | 27 ++ aged/age_bind.go | 2 +- aged/age_bind_test.go | 52 ++- coverage.txt | 584 +++++++++++++++++++++++++++++++ symmetric/symmetric_test.go | 12 + 5 files changed, 666 insertions(+), 11 deletions(-) create mode 100644 .github/workflows/test_test.yaml create mode 100644 coverage.txt diff --git a/.github/workflows/test_test.yaml b/.github/workflows/test_test.yaml new file mode 100644 index 0000000..9e5dce1 --- /dev/null +++ b/.github/workflows/test_test.yaml @@ -0,0 +1,27 @@ +name: Go Test + +on: [push, pull_request] + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.21.3 + + - name: Install dependencies + run: go mod download + + - name: Run tests + run: go test -race -coverprofile=coverage.txt -covermode=atomic ./... + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + \ No newline at end of file diff --git a/aged/age_bind.go b/aged/age_bind.go index 9b3dfc8..7310be6 100644 --- a/aged/age_bind.go +++ b/aged/age_bind.go @@ -210,7 +210,7 @@ func deobfuscator(p Parameters) ([]byte, error) { var err error cipherData, err = p.Obfuscator.Deobfuscate(p.Data) if err != nil { - return []byte{}, errors.New("failed to deobfuscate header, maybe not encrypted") + return []byte{}, errors.New("failed to deobfuscate header, maybe not obfuscated?") } } else { cipherData = p.Data diff --git a/aged/age_bind_test.go b/aged/age_bind_test.go index 61823f7..288ec9f 100644 --- a/aged/age_bind_test.go +++ b/aged/age_bind_test.go @@ -194,6 +194,9 @@ func TestRoundTrips(t *testing.T) { for _, encryptParam := range p { decryptParam := encryptParam + encryptPwdParam := encryptParam + decryptPwdParam := encryptPwdParam + var err error decryptParam.Data, err = config.keychain.Encrypt(encryptParam) @@ -211,8 +214,46 @@ func TestRoundTrips(t *testing.T) { decryptedData3, err4 := config.keychainWrong.Decrypt(decryptParam) r.Equal(t, []byte{}, decryptedData3) r.EqualError(t, err4, "no identity matched any of the recipients") + + + pwd, err := generic.CSPRNG(32) + r.NoError(t, err) + + decryptPwdParam.Data, err = aged.EncryptWithPwd(encryptPwdParam, string(pwd)) + r.NoError(t, err, "Encryption without error") + t.Logf("Pwd protected data: %d", decryptPwdParam.Data) + + decryptedPwdData, err := aged.DecryptWithPwd(decryptPwdParam, string(pwd)) + r.NoError(t, err, "Decryption without error") + r.Equal(t, encryptPwdParam.Data, decryptedPwdData) + } +} + +func TestWrongSecretKeyKeyringSetup(t *testing.T) { + keychain := keychainInit(t) + + s := aged.KeychainSetup{ + SecretKey: "correct horse battery staple", + PublicKeys: []string{keychain.publicKey1.Recipient().String(), keychain.publicKey2.Recipient().String()}, + SelfRecipient: true, } + _, err := aged.SetupKeychain(s) + r.Error(t, err) +} + +func TestWrongPublicKeyKeyringSetup(t *testing.T) { + keychain := keychainInit(t) + + s := aged.KeychainSetup{ + SecretKey: keychain.keychain.KeychainExportSecretKey(), + PublicKeys: []string{keychain.publicKey1.Recipient().String(), keychain.publicKey2.Recipient().String(), "correct horse battery staple"}, + SelfRecipient: true, + } + + _, err := aged.SetupKeychain(s) + r.Error(t, err) + t.Log(err.Error()) } /* @@ -299,16 +340,7 @@ func TestRoundTrips(t *testing.T) { } func TestEncryptWithPwd(t *testing.T) { - key, err := generic.CSPRNG(32) - r.NoError(t, err, "CSPRNG without error") - - cipherData, err := aged.EncryptWithPwd(string(key), plainData, true, true) - r.NoError(t, err, "Encryption without error") - t.Logf("Size: %d", len(cipherData)) - - decryptedData, err := aged.DecryptWithPwd(string(key), cipherData, true, true) - r.NoError(t, err, "Decryption without error") - r.Equal(t, plainData, decryptedData) + } */ diff --git a/coverage.txt b/coverage.txt new file mode 100644 index 0000000..0f65e1f --- /dev/null +++ b/coverage.txt @@ -0,0 +1,584 @@ +mode: atomic +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:40.36,43.16 3 2 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:43.16,45.3 1 0 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:47.2,47.12 1 2 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:50.55,51.43 1 2 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:51.43,53.3 1 0 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:54.2,57.12 4 2 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:60.43,62.2 1 1 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:64.64,67.16 2 1 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:67.16,69.3 1 0 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:71.2,71.55 1 1 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:78.34,81.16 3 1 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:81.16,83.3 1 0 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:84.2,84.12 1 1 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:87.53,88.41 1 4 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:88.41,90.3 1 2 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:91.2,94.12 4 2 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:97.41,99.2 1 0 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:101.62,103.16 2 0 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:103.16,105.3 1 0 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:107.2,107.64 1 0 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:110.74,111.27 1 2 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:112.25,113.18 1 2 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:114.10,115.44 1 0 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:119.70,120.27 1 2 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:121.23,122.18 1 2 +github.com/D3vl0per/crypt/asymmetric/asymmetric.go:123.10,124.44 1 0 +github.com/D3vl0per/crypt/generic/csprng.go:10.38,12.60 2 10 +github.com/D3vl0per/crypt/generic/csprng.go:12.60,14.3 1 0 +github.com/D3vl0per/crypt/generic/csprng.go:15.2,15.20 1 10 +github.com/D3vl0per/crypt/generic/csprng.go:18.41,21.2 2 9 +github.com/D3vl0per/crypt/generic/csprng.go:23.37,25.16 2 1 +github.com/D3vl0per/crypt/generic/csprng.go:25.16,27.3 1 0 +github.com/D3vl0per/crypt/generic/csprng.go:28.2,31.52 3 1 +github.com/D3vl0per/crypt/generic/csprng.go:31.52,33.3 1 0 +github.com/D3vl0per/crypt/generic/csprng.go:34.2,34.20 1 1 +github.com/D3vl0per/crypt/generic/csprng.go:37.23,39.2 1 1 +github.com/D3vl0per/crypt/generic/fs.go:10.49,11.16 1 0 +github.com/D3vl0per/crypt/generic/fs.go:11.16,13.3 1 0 +github.com/D3vl0per/crypt/generic/fs.go:15.2,16.16 2 0 +github.com/D3vl0per/crypt/generic/fs.go:16.16,18.3 1 0 +github.com/D3vl0per/crypt/generic/fs.go:19.2,22.16 3 0 +github.com/D3vl0per/crypt/generic/fs.go:22.16,24.3 1 0 +github.com/D3vl0per/crypt/generic/fs.go:26.2,29.29 3 0 +github.com/D3vl0per/crypt/generic/fs.go:29.29,32.17 2 0 +github.com/D3vl0per/crypt/generic/fs.go:32.17,34.4 1 0 +github.com/D3vl0per/crypt/generic/fs.go:35.3,36.17 2 0 +github.com/D3vl0per/crypt/generic/fs.go:36.17,38.4 1 0 +github.com/D3vl0per/crypt/generic/fs.go:39.3,39.32 1 0 +github.com/D3vl0per/crypt/generic/fs.go:39.32,41.4 1 0 +github.com/D3vl0per/crypt/generic/fs.go:43.3,44.17 2 0 +github.com/D3vl0per/crypt/generic/fs.go:44.17,46.4 1 0 +github.com/D3vl0per/crypt/generic/fs.go:47.3,48.17 2 0 +github.com/D3vl0per/crypt/generic/fs.go:48.17,50.4 1 0 +github.com/D3vl0per/crypt/generic/fs.go:51.3,52.17 2 0 +github.com/D3vl0per/crypt/generic/fs.go:52.17,54.4 1 0 +github.com/D3vl0per/crypt/generic/fs.go:56.3,56.32 1 0 +github.com/D3vl0per/crypt/generic/fs.go:56.32,58.4 1 0 +github.com/D3vl0per/crypt/generic/fs.go:61.2,62.16 2 0 +github.com/D3vl0per/crypt/generic/fs.go:62.16,64.3 1 0 +github.com/D3vl0per/crypt/generic/fs.go:65.2,65.12 1 0 +github.com/D3vl0per/crypt/generic/fs.go:68.65,69.16 1 0 +github.com/D3vl0per/crypt/generic/fs.go:69.16,71.3 1 0 +github.com/D3vl0per/crypt/generic/fs.go:74.2,75.16 2 0 +github.com/D3vl0per/crypt/generic/fs.go:75.16,77.3 1 0 +github.com/D3vl0per/crypt/generic/fs.go:78.2,81.16 3 0 +github.com/D3vl0per/crypt/generic/fs.go:81.16,83.3 1 0 +github.com/D3vl0per/crypt/generic/fs.go:85.2,88.29 3 0 +github.com/D3vl0per/crypt/generic/fs.go:88.29,91.17 2 0 +github.com/D3vl0per/crypt/generic/fs.go:91.17,93.4 1 0 +github.com/D3vl0per/crypt/generic/fs.go:94.3,95.17 2 0 +github.com/D3vl0per/crypt/generic/fs.go:95.17,97.4 1 0 +github.com/D3vl0per/crypt/generic/fs.go:99.3,99.32 1 0 +github.com/D3vl0per/crypt/generic/fs.go:99.32,101.4 1 0 +github.com/D3vl0per/crypt/generic/fs.go:103.3,104.17 2 0 +github.com/D3vl0per/crypt/generic/fs.go:104.17,106.4 1 0 +github.com/D3vl0per/crypt/generic/fs.go:108.3,109.17 2 0 +github.com/D3vl0per/crypt/generic/fs.go:109.17,111.4 1 0 +github.com/D3vl0per/crypt/generic/fs.go:112.3,113.17 2 0 +github.com/D3vl0per/crypt/generic/fs.go:113.17,115.4 1 0 +github.com/D3vl0per/crypt/generic/fs.go:116.3,116.32 1 0 +github.com/D3vl0per/crypt/generic/fs.go:116.32,118.4 1 0 +github.com/D3vl0per/crypt/generic/fs.go:120.2,121.16 2 0 +github.com/D3vl0per/crypt/generic/fs.go:121.16,123.3 1 0 +github.com/D3vl0per/crypt/generic/fs.go:124.2,125.16 2 0 +github.com/D3vl0per/crypt/generic/fs.go:125.16,127.3 1 0 +github.com/D3vl0per/crypt/generic/fs.go:128.2,128.31 1 0 +github.com/D3vl0per/crypt/generic/fs.go:128.31,130.3 1 0 +github.com/D3vl0per/crypt/generic/fs.go:131.2,131.12 1 0 +github.com/D3vl0per/crypt/generic/fs.go:134.63,137.87 2 0 +github.com/D3vl0per/crypt/generic/fs.go:137.87,138.17 1 0 +github.com/D3vl0per/crypt/generic/fs.go:138.17,140.4 1 0 +github.com/D3vl0per/crypt/generic/fs.go:142.3,142.43 1 0 +github.com/D3vl0per/crypt/generic/fs.go:142.43,144.4 1 0 +github.com/D3vl0per/crypt/generic/fs.go:146.3,146.13 1 0 +github.com/D3vl0per/crypt/generic/fs.go:148.2,148.16 1 0 +github.com/D3vl0per/crypt/generic/fs.go:148.16,150.3 1 0 +github.com/D3vl0per/crypt/generic/fs.go:151.2,151.19 1 0 +github.com/D3vl0per/crypt/generic/fs.go:154.51,157.16 2 0 +github.com/D3vl0per/crypt/generic/fs.go:157.16,159.3 1 0 +github.com/D3vl0per/crypt/generic/fs.go:160.2,162.16 3 0 +github.com/D3vl0per/crypt/generic/fs.go:162.16,164.3 1 0 +github.com/D3vl0per/crypt/generic/fs.go:165.2,165.18 1 0 +github.com/D3vl0per/crypt/generic/utils.go:22.45,24.2 1 0 +github.com/D3vl0per/crypt/generic/utils.go:26.54,28.2 1 0 +github.com/D3vl0per/crypt/generic/utils.go:30.42,32.2 1 0 +github.com/D3vl0per/crypt/generic/utils.go:34.51,36.2 1 0 +github.com/D3vl0per/crypt/generic/utils.go:38.32,40.2 1 0 +github.com/D3vl0per/crypt/generic/utils.go:42.55,44.16 2 0 +github.com/D3vl0per/crypt/generic/utils.go:44.16,46.3 1 0 +github.com/D3vl0per/crypt/generic/utils.go:48.2,52.59 2 0 +github.com/D3vl0per/crypt/generic/utils.go:55.57,57.16 2 0 +github.com/D3vl0per/crypt/generic/utils.go:57.16,59.3 1 0 +github.com/D3vl0per/crypt/generic/utils.go:61.2,65.59 2 0 +github.com/D3vl0per/crypt/generic/utils.go:68.56,70.16 2 0 +github.com/D3vl0per/crypt/generic/utils.go:70.16,72.3 1 0 +github.com/D3vl0per/crypt/generic/utils.go:74.2,77.16 3 0 +github.com/D3vl0per/crypt/generic/utils.go:77.16,79.3 1 0 +github.com/D3vl0per/crypt/generic/utils.go:81.2,83.37 2 0 +github.com/D3vl0per/crypt/generic/utils.go:86.56,88.16 2 0 +github.com/D3vl0per/crypt/generic/utils.go:88.16,90.3 1 0 +github.com/D3vl0per/crypt/generic/utils.go:92.2,95.16 3 0 +github.com/D3vl0per/crypt/generic/utils.go:95.16,97.3 1 0 +github.com/D3vl0per/crypt/generic/utils.go:99.2,101.37 2 0 +github.com/D3vl0per/crypt/generic/utils.go:104.29,105.22 1 0 +github.com/D3vl0per/crypt/generic/utils.go:105.22,106.13 1 0 +github.com/D3vl0per/crypt/generic/utils.go:106.13,108.4 1 0 +github.com/D3vl0per/crypt/generic/utils.go:110.2,110.13 1 0 +github.com/D3vl0per/crypt/generic/utils.go:113.36,116.24 2 0 +github.com/D3vl0per/crypt/generic/utils.go:116.24,118.3 1 0 +github.com/D3vl0per/crypt/generic/utils.go:120.2,120.24 1 0 +github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:20.45,22.16 2 3 +github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:22.16,24.3 1 0 +github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:26.2,29.8 1 3 +github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:32.74,43.16 8 1 +github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:43.16,45.3 1 0 +github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:47.2,49.88 3 1 +github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:52.75,65.9 10 1 +github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:65.9,67.3 1 0 +github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:68.2,68.23 1 1 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:11.65,12.23 1 1 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:12.23,14.3 1 0 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:16.2,20.16 4 1 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:20.16,22.3 1 0 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:24.2,27.69 3 1 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:30.66,31.23 1 1 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:31.23,33.3 1 0 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:35.2,42.9 6 1 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:42.9,44.3 1 0 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:46.2,46.23 1 1 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:49.54,51.16 2 0 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:51.16,53.3 1 0 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:54.2,57.67 3 0 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:57.67,59.3 1 0 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:61.2,61.23 1 0 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:64.54,66.16 2 0 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:66.16,68.3 1 0 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:69.2,72.67 3 0 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:72.67,74.3 1 0 +github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:76.2,76.23 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:36.68,37.42 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:37.42,39.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:41.2,42.16 2 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:42.16,44.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:46.2,47.54 2 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:47.54,48.13 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:51.2,51.53 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:54.69,55.42 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:55.42,57.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:59.2,60.16 2 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:60.16,62.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:64.2,64.51 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:64.51,66.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:68.2,71.16 3 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:71.16,73.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:74.2,74.23 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:80.60,81.30 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:81.30,83.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:85.2,86.25 2 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:86.25,88.3 1 4 +github.com/D3vl0per/crypt/symmetric/symmetric.go:90.2,90.58 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:90.58,92.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:94.2,94.19 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:97.60,99.2 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:101.70,102.44 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:102.44,104.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:106.2,107.19 2 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:107.19,111.3 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:111.8,113.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:114.2,115.16 2 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:115.16,117.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:119.2,119.42 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:119.42,121.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:122.2,122.34 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:122.34,124.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:125.2,125.12 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:128.70,129.44 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:129.44,131.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:133.2,134.19 2 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:134.19,138.3 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:138.8,140.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:142.2,143.16 2 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:143.16,145.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:147.2,147.43 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:147.43,149.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:150.2,150.12 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:157.71,159.51 2 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:159.51,161.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:163.2,164.16 2 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:164.16,166.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:168.2,168.41 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:171.76,173.54 2 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:173.54,175.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:177.2,177.44 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:177.44,179.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:181.2,182.16 2 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:182.16,184.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:186.2,186.41 1 1 +github.com/D3vl0per/crypt/symmetric/symmetric.go:189.61,192.53 3 2 +github.com/D3vl0per/crypt/symmetric/symmetric.go:192.53,194.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:195.2,195.31 1 2 +github.com/D3vl0per/crypt/symmetric/symmetric.go:195.31,197.3 1 0 +github.com/D3vl0per/crypt/symmetric/symmetric.go:198.2,198.23 1 2 +github.com/D3vl0per/crypt/compression/compression.go:25.52,30.16 4 25 +github.com/D3vl0per/crypt/compression/compression.go:30.16,32.3 1 0 +github.com/D3vl0per/crypt/compression/compression.go:34.2,34.36 1 25 +github.com/D3vl0per/crypt/compression/compression.go:37.66,40.16 2 40 +github.com/D3vl0per/crypt/compression/compression.go:40.16,42.3 1 0 +github.com/D3vl0per/crypt/compression/compression.go:44.2,45.16 2 40 +github.com/D3vl0per/crypt/compression/compression.go:45.16,48.3 2 0 +github.com/D3vl0per/crypt/compression/compression.go:49.2,49.20 1 40 +github.com/D3vl0per/crypt/compression/compression.go:52.54,57.16 4 25 +github.com/D3vl0per/crypt/compression/compression.go:57.16,59.3 1 0 +github.com/D3vl0per/crypt/compression/compression.go:61.2,61.38 1 25 +github.com/D3vl0per/crypt/compression/compression.go:64.68,66.16 2 40 +github.com/D3vl0per/crypt/compression/compression.go:66.16,69.3 2 0 +github.com/D3vl0per/crypt/compression/compression.go:70.2,72.12 3 40 +github.com/D3vl0per/crypt/compression/compression.go:75.31,77.2 1 15 +github.com/D3vl0per/crypt/compression/compression.go:83.52,88.16 4 12 +github.com/D3vl0per/crypt/compression/compression.go:88.16,90.3 1 0 +github.com/D3vl0per/crypt/compression/compression.go:92.2,92.36 1 12 +github.com/D3vl0per/crypt/compression/compression.go:95.66,98.16 2 24 +github.com/D3vl0per/crypt/compression/compression.go:98.16,100.3 1 0 +github.com/D3vl0per/crypt/compression/compression.go:101.2,102.16 2 24 +github.com/D3vl0per/crypt/compression/compression.go:102.16,105.3 2 0 +github.com/D3vl0per/crypt/compression/compression.go:106.2,106.20 1 24 +github.com/D3vl0per/crypt/compression/compression.go:109.54,114.16 4 12 +github.com/D3vl0per/crypt/compression/compression.go:114.16,116.3 1 0 +github.com/D3vl0per/crypt/compression/compression.go:118.2,118.38 1 12 +github.com/D3vl0per/crypt/compression/compression.go:121.68,123.16 2 25 +github.com/D3vl0per/crypt/compression/compression.go:123.16,126.3 2 0 +github.com/D3vl0per/crypt/compression/compression.go:127.2,129.12 3 25 +github.com/D3vl0per/crypt/compression/compression.go:132.31,134.2 1 12 +github.com/D3vl0per/crypt/compression/compression.go:140.53,145.16 4 15 +github.com/D3vl0per/crypt/compression/compression.go:145.16,147.3 1 0 +github.com/D3vl0per/crypt/compression/compression.go:149.2,149.36 1 15 +github.com/D3vl0per/crypt/compression/compression.go:152.67,155.16 2 30 +github.com/D3vl0per/crypt/compression/compression.go:155.16,157.3 1 0 +github.com/D3vl0per/crypt/compression/compression.go:158.2,159.16 2 30 +github.com/D3vl0per/crypt/compression/compression.go:159.16,162.3 2 0 +github.com/D3vl0per/crypt/compression/compression.go:163.2,163.20 1 30 +github.com/D3vl0per/crypt/compression/compression.go:166.55,171.16 4 15 +github.com/D3vl0per/crypt/compression/compression.go:171.16,173.3 1 0 +github.com/D3vl0per/crypt/compression/compression.go:175.2,175.38 1 15 +github.com/D3vl0per/crypt/compression/compression.go:178.69,183.2 4 30 +github.com/D3vl0per/crypt/compression/compression.go:185.32,187.2 1 15 +github.com/D3vl0per/crypt/compression/compression.go:193.53,198.16 4 15 +github.com/D3vl0per/crypt/compression/compression.go:198.16,200.3 1 0 +github.com/D3vl0per/crypt/compression/compression.go:202.2,202.36 1 15 +github.com/D3vl0per/crypt/compression/compression.go:205.67,208.16 2 30 +github.com/D3vl0per/crypt/compression/compression.go:208.16,210.3 1 0 +github.com/D3vl0per/crypt/compression/compression.go:211.2,212.16 2 30 +github.com/D3vl0per/crypt/compression/compression.go:212.16,215.3 2 0 +github.com/D3vl0per/crypt/compression/compression.go:216.2,216.20 1 30 +github.com/D3vl0per/crypt/compression/compression.go:219.55,224.16 4 15 +github.com/D3vl0per/crypt/compression/compression.go:224.16,226.3 1 0 +github.com/D3vl0per/crypt/compression/compression.go:228.2,228.38 1 15 +github.com/D3vl0per/crypt/compression/compression.go:231.69,233.16 2 30 +github.com/D3vl0per/crypt/compression/compression.go:233.16,236.3 2 0 +github.com/D3vl0per/crypt/compression/compression.go:237.2,239.12 3 30 +github.com/D3vl0per/crypt/compression/compression.go:242.32,244.2 1 15 +github.com/D3vl0per/crypt/hash/fs.go:5.75,7.16 2 0 +github.com/D3vl0per/crypt/hash/fs.go:7.16,9.3 1 0 +github.com/D3vl0per/crypt/hash/fs.go:10.2,11.16 2 0 +github.com/D3vl0per/crypt/hash/fs.go:11.16,13.3 1 0 +github.com/D3vl0per/crypt/hash/fs.go:14.2,14.18 1 0 +github.com/D3vl0per/crypt/hash/hash.go:33.56,35.2 1 1 +github.com/D3vl0per/crypt/hash/hash.go:37.81,39.16 2 2 +github.com/D3vl0per/crypt/hash/hash.go:39.16,41.3 1 0 +github.com/D3vl0per/crypt/hash/hash.go:43.2,43.51 1 2 +github.com/D3vl0per/crypt/hash/hash.go:46.61,48.2 1 1 +github.com/D3vl0per/crypt/hash/hash.go:50.81,52.16 2 2 +github.com/D3vl0per/crypt/hash/hash.go:52.16,54.3 1 0 +github.com/D3vl0per/crypt/hash/hash.go:56.2,56.51 1 2 +github.com/D3vl0per/crypt/hash/hash.go:63.56,65.2 1 1 +github.com/D3vl0per/crypt/hash/hash.go:67.81,69.16 2 2 +github.com/D3vl0per/crypt/hash/hash.go:69.16,71.3 1 0 +github.com/D3vl0per/crypt/hash/hash.go:73.2,73.51 1 2 +github.com/D3vl0per/crypt/hash/hash.go:76.61,78.2 1 1 +github.com/D3vl0per/crypt/hash/hash.go:80.81,82.16 2 2 +github.com/D3vl0per/crypt/hash/hash.go:82.16,84.3 1 0 +github.com/D3vl0per/crypt/hash/hash.go:86.2,86.51 1 2 +github.com/D3vl0per/crypt/hash/hash.go:93.56,95.2 1 1 +github.com/D3vl0per/crypt/hash/hash.go:97.81,99.16 2 2 +github.com/D3vl0per/crypt/hash/hash.go:99.16,101.3 1 0 +github.com/D3vl0per/crypt/hash/hash.go:103.2,103.51 1 2 +github.com/D3vl0per/crypt/hash/hash.go:106.61,108.2 1 1 +github.com/D3vl0per/crypt/hash/hash.go:110.81,112.16 2 2 +github.com/D3vl0per/crypt/hash/hash.go:112.16,114.3 1 0 +github.com/D3vl0per/crypt/hash/hash.go:116.2,116.51 1 2 +github.com/D3vl0per/crypt/hash/hash.go:119.62,123.16 3 18 +github.com/D3vl0per/crypt/hash/hash.go:123.16,125.17 2 9 +github.com/D3vl0per/crypt/hash/hash.go:125.17,127.4 1 0 +github.com/D3vl0per/crypt/hash/hash.go:128.8,130.17 2 9 +github.com/D3vl0per/crypt/hash/hash.go:130.17,132.4 1 0 +github.com/D3vl0per/crypt/hash/hash.go:135.2,135.44 1 18 +github.com/D3vl0per/crypt/hash/hash.go:135.44,137.3 1 0 +github.com/D3vl0per/crypt/hash/hash.go:138.2,138.27 1 18 +github.com/D3vl0per/crypt/hash/hash.go:145.53,147.2 1 1 +github.com/D3vl0per/crypt/hash/hash.go:149.78,151.16 2 2 +github.com/D3vl0per/crypt/hash/hash.go:151.16,153.3 1 0 +github.com/D3vl0per/crypt/hash/hash.go:155.2,155.51 1 2 +github.com/D3vl0per/crypt/hash/hash.go:158.58,160.2 1 0 +github.com/D3vl0per/crypt/hash/hash.go:162.78,164.16 2 0 +github.com/D3vl0per/crypt/hash/hash.go:164.16,166.3 1 0 +github.com/D3vl0per/crypt/hash/hash.go:168.2,168.51 1 0 +github.com/D3vl0per/crypt/hash/hash.go:171.52,175.16 3 3 +github.com/D3vl0per/crypt/hash/hash.go:175.16,177.44 2 0 +github.com/D3vl0per/crypt/hash/hash.go:177.44,179.4 1 0 +github.com/D3vl0per/crypt/hash/hash.go:180.8,182.17 2 3 +github.com/D3vl0per/crypt/hash/hash.go:182.17,184.4 1 0 +github.com/D3vl0per/crypt/hash/hash.go:187.2,187.44 1 3 +github.com/D3vl0per/crypt/hash/hash.go:187.44,189.3 1 0 +github.com/D3vl0per/crypt/hash/hash.go:191.2,191.27 1 3 +github.com/D3vl0per/crypt/hash/hash.go:198.53,200.2 1 1 +github.com/D3vl0per/crypt/hash/hash.go:202.78,204.16 2 2 +github.com/D3vl0per/crypt/hash/hash.go:204.16,206.3 1 0 +github.com/D3vl0per/crypt/hash/hash.go:208.2,208.51 1 2 +github.com/D3vl0per/crypt/hash/hash.go:211.58,213.2 1 0 +github.com/D3vl0per/crypt/hash/hash.go:215.78,217.16 2 0 +github.com/D3vl0per/crypt/hash/hash.go:217.16,219.3 1 0 +github.com/D3vl0per/crypt/hash/hash.go:221.2,221.51 1 0 +github.com/D3vl0per/crypt/hash/hash.go:224.52,228.16 3 3 +github.com/D3vl0per/crypt/hash/hash.go:228.16,230.44 2 0 +github.com/D3vl0per/crypt/hash/hash.go:230.44,232.4 1 0 +github.com/D3vl0per/crypt/hash/hash.go:233.8,235.17 2 3 +github.com/D3vl0per/crypt/hash/hash.go:235.17,237.4 1 0 +github.com/D3vl0per/crypt/hash/hash.go:240.2,240.44 1 3 +github.com/D3vl0per/crypt/hash/hash.go:240.44,242.3 1 0 +github.com/D3vl0per/crypt/hash/hash.go:244.2,244.27 1 3 +github.com/D3vl0per/crypt/hash/hash.go:251.53,253.2 1 1 +github.com/D3vl0per/crypt/hash/hash.go:255.78,257.16 2 2 +github.com/D3vl0per/crypt/hash/hash.go:257.16,259.3 1 0 +github.com/D3vl0per/crypt/hash/hash.go:261.2,261.51 1 2 +github.com/D3vl0per/crypt/hash/hash.go:264.58,266.2 1 0 +github.com/D3vl0per/crypt/hash/hash.go:268.78,270.16 2 0 +github.com/D3vl0per/crypt/hash/hash.go:270.16,272.3 1 0 +github.com/D3vl0per/crypt/hash/hash.go:274.2,274.51 1 0 +github.com/D3vl0per/crypt/hash/hash.go:277.52,281.16 3 3 +github.com/D3vl0per/crypt/hash/hash.go:281.16,283.44 2 0 +github.com/D3vl0per/crypt/hash/hash.go:283.44,285.4 1 0 +github.com/D3vl0per/crypt/hash/hash.go:286.8,288.17 2 3 +github.com/D3vl0per/crypt/hash/hash.go:288.17,290.4 1 0 +github.com/D3vl0per/crypt/hash/hash.go:293.2,293.44 1 3 +github.com/D3vl0per/crypt/hash/hash.go:293.44,295.3 1 0 +github.com/D3vl0per/crypt/hash/hash.go:297.2,297.27 1 3 +github.com/D3vl0per/crypt/hash/kdf.go:54.54,76.2 5 14 +github.com/D3vl0per/crypt/hash/kdf.go:78.54,79.19 1 7 +github.com/D3vl0per/crypt/hash/kdf.go:79.19,80.24 1 2 +github.com/D3vl0per/crypt/hash/kdf.go:80.24,82.4 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:83.8,86.17 3 5 +github.com/D3vl0per/crypt/hash/kdf.go:86.17,88.4 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:91.2,97.32 6 7 +github.com/D3vl0per/crypt/hash/kdf.go:100.76,102.16 2 7 +github.com/D3vl0per/crypt/hash/kdf.go:102.16,104.3 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:106.2,107.16 2 7 +github.com/D3vl0per/crypt/hash/kdf.go:107.16,109.3 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:111.2,112.16 2 7 +github.com/D3vl0per/crypt/hash/kdf.go:112.16,114.3 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:116.2,116.23 1 7 +github.com/D3vl0per/crypt/hash/kdf.go:116.23,118.17 2 0 +github.com/D3vl0per/crypt/hash/kdf.go:118.17,120.4 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:121.3,121.32 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:124.2,124.19 1 7 +github.com/D3vl0per/crypt/hash/kdf.go:124.19,126.17 2 0 +github.com/D3vl0per/crypt/hash/kdf.go:126.17,128.4 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:129.3,129.28 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:132.2,132.24 1 7 +github.com/D3vl0per/crypt/hash/kdf.go:132.24,134.17 2 0 +github.com/D3vl0per/crypt/hash/kdf.go:134.17,136.4 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:137.3,137.44 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:137.44,139.4 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:139.9,141.4 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:144.2,144.19 1 7 +github.com/D3vl0per/crypt/hash/kdf.go:144.19,146.3 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:148.2,150.56 2 7 +github.com/D3vl0per/crypt/hash/kdf.go:153.79,160.23 4 14 +github.com/D3vl0per/crypt/hash/kdf.go:160.23,162.3 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:164.2,174.80 2 14 +github.com/D3vl0per/crypt/hash/kdf.go:174.80,176.3 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:178.2,178.110 1 14 +github.com/D3vl0per/crypt/hash/kdf.go:178.110,180.3 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:182.2,182.34 1 14 +github.com/D3vl0per/crypt/hash/kdf.go:182.34,184.3 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:186.2,186.34 1 14 +github.com/D3vl0per/crypt/hash/kdf.go:186.34,188.3 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:190.2,190.24 1 14 +github.com/D3vl0per/crypt/hash/kdf.go:193.50,194.19 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:194.19,195.41 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:195.41,197.4 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:198.8,201.17 3 0 +github.com/D3vl0per/crypt/hash/kdf.go:201.17,203.4 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:206.2,206.23 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:206.23,208.3 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:210.2,214.49 3 0 +github.com/D3vl0per/crypt/hash/kdf.go:214.49,216.3 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:218.2,218.98 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:221.65,223.60 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:223.60,225.3 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:227.2,227.23 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:227.23,229.3 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:231.2,235.49 3 0 +github.com/D3vl0per/crypt/hash/kdf.go:235.49,237.3 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:240.2,241.16 2 0 +github.com/D3vl0per/crypt/hash/kdf.go:241.16,243.3 1 0 +github.com/D3vl0per/crypt/hash/kdf.go:245.2,245.47 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:24.67,28.16 3 16 +github.com/D3vl0per/crypt/aged/age_bind.go:28.16,30.3 1 1 +github.com/D3vl0per/crypt/aged/age_bind.go:32.2,34.45 2 15 +github.com/D3vl0per/crypt/aged/age_bind.go:34.45,35.14 1 31 +github.com/D3vl0per/crypt/aged/age_bind.go:35.14,36.12 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:38.3,38.41 1 31 +github.com/D3vl0per/crypt/aged/age_bind.go:38.41,40.18 2 30 +github.com/D3vl0per/crypt/aged/age_bind.go:40.18,42.5 1 1 +github.com/D3vl0per/crypt/aged/age_bind.go:43.4,43.64 1 29 +github.com/D3vl0per/crypt/aged/age_bind.go:47.2,47.33 1 14 +github.com/D3vl0per/crypt/aged/age_bind.go:47.33,49.3 1 14 +github.com/D3vl0per/crypt/aged/age_bind.go:51.2,51.22 1 14 +github.com/D3vl0per/crypt/aged/age_bind.go:54.48,56.16 2 20 +github.com/D3vl0per/crypt/aged/age_bind.go:56.16,58.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:59.2,59.22 1 20 +github.com/D3vl0per/crypt/aged/age_bind.go:70.57,73.16 2 12 +github.com/D3vl0per/crypt/aged/age_bind.go:73.16,75.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:77.2,79.16 3 12 +github.com/D3vl0per/crypt/aged/age_bind.go:79.16,81.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:83.2,83.42 1 12 +github.com/D3vl0per/crypt/aged/age_bind.go:83.42,85.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:86.2,86.34 1 12 +github.com/D3vl0per/crypt/aged/age_bind.go:86.34,88.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:90.2,90.35 1 12 +github.com/D3vl0per/crypt/aged/age_bind.go:93.57,95.16 2 34 +github.com/D3vl0per/crypt/aged/age_bind.go:95.16,97.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:98.2,99.16 2 34 +github.com/D3vl0per/crypt/aged/age_bind.go:99.16,101.3 1 11 +github.com/D3vl0per/crypt/aged/age_bind.go:102.2,103.43 2 23 +github.com/D3vl0per/crypt/aged/age_bind.go:103.43,105.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:107.2,107.37 1 23 +github.com/D3vl0per/crypt/aged/age_bind.go:110.63,112.16 2 11 +github.com/D3vl0per/crypt/aged/age_bind.go:112.16,114.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:116.2,117.16 2 11 +github.com/D3vl0per/crypt/aged/age_bind.go:117.16,119.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:121.2,123.16 3 11 +github.com/D3vl0per/crypt/aged/age_bind.go:123.16,125.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:127.2,127.16 1 11 +github.com/D3vl0per/crypt/aged/age_bind.go:127.16,129.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:131.2,131.42 1 11 +github.com/D3vl0per/crypt/aged/age_bind.go:131.42,133.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:134.2,134.34 1 11 +github.com/D3vl0per/crypt/aged/age_bind.go:134.34,136.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:138.2,138.35 1 11 +github.com/D3vl0per/crypt/aged/age_bind.go:141.63,143.16 2 11 +github.com/D3vl0per/crypt/aged/age_bind.go:143.16,145.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:147.2,148.16 2 11 +github.com/D3vl0per/crypt/aged/age_bind.go:148.16,150.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:152.2,153.16 2 11 +github.com/D3vl0per/crypt/aged/age_bind.go:153.16,155.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:157.2,158.43 2 11 +github.com/D3vl0per/crypt/aged/age_bind.go:158.43,160.3 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:162.2,162.37 1 11 +github.com/D3vl0per/crypt/aged/age_bind.go:165.54,168.16 2 23 +github.com/D3vl0per/crypt/aged/age_bind.go:168.16,173.17 4 17 +github.com/D3vl0per/crypt/aged/age_bind.go:173.17,175.4 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:177.3,177.39 1 17 +github.com/D3vl0per/crypt/aged/age_bind.go:179.8,181.3 1 6 +github.com/D3vl0per/crypt/aged/age_bind.go:182.2,182.16 1 23 +github.com/D3vl0per/crypt/aged/age_bind.go:185.62,186.16 1 34 +github.com/D3vl0per/crypt/aged/age_bind.go:186.16,188.17 2 25 +github.com/D3vl0per/crypt/aged/age_bind.go:188.17,190.4 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:191.3,191.18 1 25 +github.com/D3vl0per/crypt/aged/age_bind.go:193.2,193.18 1 9 +github.com/D3vl0per/crypt/aged/age_bind.go:196.58,197.19 1 23 +github.com/D3vl0per/crypt/aged/age_bind.go:197.19,199.17 2 12 +github.com/D3vl0per/crypt/aged/age_bind.go:199.17,201.4 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:202.3,202.18 1 12 +github.com/D3vl0per/crypt/aged/age_bind.go:204.2,204.16 1 11 +github.com/D3vl0per/crypt/aged/age_bind.go:207.49,209.19 2 45 +github.com/D3vl0per/crypt/aged/age_bind.go:209.19,212.17 3 24 +github.com/D3vl0per/crypt/aged/age_bind.go:212.17,214.4 1 0 +github.com/D3vl0per/crypt/aged/age_bind.go:215.8,217.3 1 21 +github.com/D3vl0per/crypt/aged/age_bind.go:218.2,218.24 1 45 +github.com/D3vl0per/crypt/aged/age_bind.go:221.45,223.35 2 3 +github.com/D3vl0per/crypt/aged/age_bind.go:223.35,225.3 1 9 +github.com/D3vl0per/crypt/aged/age_bind.go:226.2,226.13 1 3 +github.com/D3vl0per/crypt/aged/age_bind.go:229.52,231.2 1 4 +github.com/D3vl0per/crypt/aged/obf.go:26.62,29.23 2 13 +github.com/D3vl0per/crypt/aged/obf.go:29.23,31.3 1 0 +github.com/D3vl0per/crypt/aged/obf.go:32.2,32.44 1 13 +github.com/D3vl0per/crypt/aged/obf.go:32.44,34.3 1 0 +github.com/D3vl0per/crypt/aged/obf.go:35.2,39.27 4 13 +github.com/D3vl0per/crypt/aged/obf.go:39.27,42.3 2 3337 +github.com/D3vl0per/crypt/aged/obf.go:44.2,45.58 2 13 +github.com/D3vl0per/crypt/aged/obf.go:48.64,50.23 2 25 +github.com/D3vl0per/crypt/aged/obf.go:50.23,52.3 1 0 +github.com/D3vl0per/crypt/aged/obf.go:53.2,53.45 1 25 +github.com/D3vl0per/crypt/aged/obf.go:53.45,55.3 1 0 +github.com/D3vl0per/crypt/aged/obf.go:56.2,57.32 2 25 +github.com/D3vl0per/crypt/aged/obf.go:57.32,59.3 1 0 +github.com/D3vl0per/crypt/aged/obf.go:61.2,64.54 3 25 +github.com/D3vl0per/crypt/aged/obf.go:64.54,67.3 2 7693 +github.com/D3vl0per/crypt/aged/obf.go:69.2,69.52 1 25 +github.com/D3vl0per/crypt/aged/stream.go:37.60,38.20 1 16 +github.com/D3vl0per/crypt/aged/stream.go:38.20,40.3 1 0 +github.com/D3vl0per/crypt/aged/stream.go:42.2,43.16 2 16 +github.com/D3vl0per/crypt/aged/stream.go:43.16,45.3 1 0 +github.com/D3vl0per/crypt/aged/stream.go:46.2,49.8 1 16 +github.com/D3vl0per/crypt/aged/stream.go:52.46,53.23 1 620 +github.com/D3vl0per/crypt/aged/stream.go:53.23,57.3 3 604 +github.com/D3vl0per/crypt/aged/stream.go:58.2,58.18 1 16 +github.com/D3vl0per/crypt/aged/stream.go:58.18,60.3 1 0 +github.com/D3vl0per/crypt/aged/stream.go:61.2,61.17 1 16 +github.com/D3vl0per/crypt/aged/stream.go:61.17,63.3 1 0 +github.com/D3vl0per/crypt/aged/stream.go:65.2,66.16 2 16 +github.com/D3vl0per/crypt/aged/stream.go:66.16,69.3 2 0 +github.com/D3vl0per/crypt/aged/stream.go:71.2,74.10 3 16 +github.com/D3vl0per/crypt/aged/stream.go:74.10,79.56 1 12 +github.com/D3vl0per/crypt/aged/stream.go:79.56,81.4 1 0 +github.com/D3vl0per/crypt/aged/stream.go:81.9,81.27 1 12 +github.com/D3vl0per/crypt/aged/stream.go:81.27,83.4 1 0 +github.com/D3vl0per/crypt/aged/stream.go:83.9,85.4 1 12 +github.com/D3vl0per/crypt/aged/stream.go:88.2,88.15 1 16 +github.com/D3vl0per/crypt/aged/stream.go:94.53,95.24 1 16 +github.com/D3vl0per/crypt/aged/stream.go:95.24,96.70 1 0 +github.com/D3vl0per/crypt/aged/stream.go:99.2,101.9 3 16 +github.com/D3vl0per/crypt/aged/stream.go:102.30,104.36 1 0 +github.com/D3vl0per/crypt/aged/stream.go:105.43,108.52 1 8 +github.com/D3vl0per/crypt/aged/stream.go:108.52,110.4 1 0 +github.com/D3vl0per/crypt/aged/stream.go:111.3,113.29 3 8 +github.com/D3vl0per/crypt/aged/stream.go:114.18,115.20 1 0 +github.com/D3vl0per/crypt/aged/stream.go:118.2,120.25 3 16 +github.com/D3vl0per/crypt/aged/stream.go:120.25,125.3 3 4 +github.com/D3vl0per/crypt/aged/stream.go:126.2,126.16 1 16 +github.com/D3vl0per/crypt/aged/stream.go:126.16,128.3 1 0 +github.com/D3vl0per/crypt/aged/stream.go:130.2,132.18 3 16 +github.com/D3vl0per/crypt/aged/stream.go:135.57,136.39 1 36 +github.com/D3vl0per/crypt/aged/stream.go:136.39,138.20 2 36 +github.com/D3vl0per/crypt/aged/stream.go:138.20,139.9 1 36 +github.com/D3vl0per/crypt/aged/stream.go:140.9,140.20 1 0 +github.com/D3vl0per/crypt/aged/stream.go:140.20,142.49 1 0 +github.com/D3vl0per/crypt/aged/stream.go:147.65,149.2 1 28 +github.com/D3vl0per/crypt/aged/stream.go:151.65,153.2 1 8 +github.com/D3vl0per/crypt/aged/stream.go:164.60,165.20 1 16 +github.com/D3vl0per/crypt/aged/stream.go:165.20,167.3 1 0 +github.com/D3vl0per/crypt/aged/stream.go:168.2,169.16 2 16 +github.com/D3vl0per/crypt/aged/stream.go:169.16,171.3 1 0 +github.com/D3vl0per/crypt/aged/stream.go:172.2,177.15 3 16 +github.com/D3vl0per/crypt/aged/stream.go:180.53,183.18 1 1236 +github.com/D3vl0per/crypt/aged/stream.go:183.18,185.3 1 0 +github.com/D3vl0per/crypt/aged/stream.go:186.2,186.17 1 1236 +github.com/D3vl0per/crypt/aged/stream.go:186.17,188.3 1 618 +github.com/D3vl0per/crypt/aged/stream.go:190.2,191.17 2 618 +github.com/D3vl0per/crypt/aged/stream.go:191.17,197.50 5 622 +github.com/D3vl0per/crypt/aged/stream.go:197.50,198.53 1 4 +github.com/D3vl0per/crypt/aged/stream.go:198.53,201.5 2 0 +github.com/D3vl0per/crypt/aged/stream.go:204.2,204.19 1 618 +github.com/D3vl0per/crypt/aged/stream.go:208.32,209.18 1 16 +github.com/D3vl0per/crypt/aged/stream.go:209.18,211.3 1 0 +github.com/D3vl0per/crypt/aged/stream.go:213.2,214.18 2 16 +github.com/D3vl0per/crypt/aged/stream.go:214.18,216.3 1 0 +github.com/D3vl0per/crypt/aged/stream.go:218.2,219.12 2 16 +github.com/D3vl0per/crypt/aged/stream.go:227.46,228.44 1 20 +github.com/D3vl0per/crypt/aged/stream.go:228.44,229.67 1 0 +github.com/D3vl0per/crypt/aged/stream.go:232.2,232.10 1 20 +github.com/D3vl0per/crypt/aged/stream.go:232.10,234.3 1 16 +github.com/D3vl0per/crypt/aged/stream.go:235.2,239.12 5 20 diff --git a/symmetric/symmetric_test.go b/symmetric/symmetric_test.go index e08c7be..7a9e955 100644 --- a/symmetric/symmetric_test.go +++ b/symmetric/symmetric_test.go @@ -62,3 +62,15 @@ func TestXChaCha20(t *testing.T) { r.Equal(t, payload, plaintext) } + +func TestXOR(t *testing.T) { + a := []byte{0x0f, 0x1a, 0x2b, 0x3c} + b := []byte{0x2a, 0x1b, 0x0c, 0x3d} + + sym := symmetric.Xor{} + expected := []byte{0x25, 0x01, 0x27, 0x01} + result, err := sym.Encrypt(a, b) + r.NoError(t, err) + + r.Equal(t, expected, result) +} \ No newline at end of file From 8f14989ded0442d9fcaf6e0c0033b953760ce24d Mon Sep 17 00:00:00 2001 From: D3v Date: Wed, 15 Nov 2023 01:21:18 +0100 Subject: [PATCH 05/12] Remove hwrnd test due to permission error --- generic/csprng_test.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/generic/csprng_test.go b/generic/csprng_test.go index 9b1c0e1..d45bb31 100644 --- a/generic/csprng_test.go +++ b/generic/csprng_test.go @@ -31,14 +31,19 @@ func TestCSPRNGHex(t *testing.T) { } } +/* func TestHWRng(t *testing.T) { length := 32 rnd, err := generic.HWRng(int64(length)) - r.NoError(t, err) - r.Len(t, rnd, length) - t.Log(hex.EncodeToString(rnd)) + if err.Error() == "open /dev/hwrng: no such file or directory" { + t.Skip("Hardware random number generator not found") + } else{ + r.NoError(t, err) + r.Len(t, rnd, length) + t.Log(hex.EncodeToString(rnd)) + } } - +*/ func TestRand(t *testing.T) { reader := generic.Rand() r.Equal(t, reflect.TypeOf(reader), reflect.TypeOf(rand.Reader)) From dd1e4eb274d3e8cc3ca847436ea2665c63e518e8 Mon Sep 17 00:00:00 2001 From: D3v Date: Sat, 18 Nov 2023 16:41:50 +0100 Subject: [PATCH 06/12] Test improvements --- Makefile | 2 +- README.md | 3 + aged/age_bind.go | 2 +- aged/age_bind_test.go | 206 ++++++++++++++++------------- aged/obf.go | 1 + aged/stream.go | 3 + generic/csprng.go | 3 + generic/csprng_test.go | 16 ++- generic/encoder.go | 90 +++++++++++++ generic/fs.go | 6 +- generic/fs_test.go | 87 +++++++++++++ generic/ports.go | 71 ++++++++++ generic/subtle.go | 11 ++ generic/subtle_test.go | 35 +++++ generic/utils.go | 100 +------------- generic/utils_test.go | 75 +++++++++++ hash/hash_test.go | 253 +++++++++++++++++++----------------- hash/kdf.go | 41 +++--- hash/kdf_test.go | 184 ++++++++++++++++++++------ symmetric/symmetric.go | 8 +- symmetric/symmetric_test.go | 14 +- 21 files changed, 824 insertions(+), 387 deletions(-) create mode 100644 generic/encoder.go create mode 100644 generic/fs_test.go create mode 100644 generic/ports.go create mode 100644 generic/subtle.go create mode 100644 generic/subtle_test.go create mode 100644 generic/utils_test.go diff --git a/Makefile b/Makefile index debc931..659699a 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ lint: golangci-lint run --fix test: - go clean -testcache && go test -cover ./... + go clean -testcache && go test -race -cover ./... test-v: go clean -testcache && go test ./... -v diff --git a/README.md b/README.md index 6b8875f..f5f1b28 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,9 @@ Crypto suite: - CSPRNG - CSPRNGHex - /dev/hwrng + - Encoders + - Base64 + - Hex - Symmetric - XChacha20-poly1305 - XChacha20-poly1305 Stream (modified age code) diff --git a/aged/age_bind.go b/aged/age_bind.go index 7310be6..ae1b781 100644 --- a/aged/age_bind.go +++ b/aged/age_bind.go @@ -219,7 +219,7 @@ func deobfuscator(p Parameters) ([]byte, error) { } func (k Keychain) KeychainExport() []string { - var keys []string + keys := make([]string, len(k.recipients)) for _, key := range k.recipients { keys = append(keys, fmt.Sprint(key)) } diff --git a/aged/age_bind_test.go b/aged/age_bind_test.go index 288ec9f..2fbe921 100644 --- a/aged/age_bind_test.go +++ b/aged/age_bind_test.go @@ -69,7 +69,6 @@ func keychainInit(t *testing.T) chains { } } - func TestGenKeypair(t *testing.T) { _, err := aged.GenKeypair() r.NoError(t, err) @@ -107,125 +106,150 @@ func TestRoundTrips(t *testing.T) { big, err := generic.CSPRNG(10485760) r.NoError(t, err) - p := []aged.Parameters{ - // No compress, No obfuscator + tests := []struct { + name string + parameter aged.Parameters + }{ { - Data: config.plainData, - Obfuscation: false, - Compress: false, + name: "No compress, No obfuscator", + parameter: aged.Parameters{ + Data: config.plainData, + Obfuscation: false, + Compress: false, + }, }, - // No compress, obfuscate { - Data: config.plainData, - Obfuscator: &aged.AgeV1Obf{}, - Obfuscation: true, - Compress: false, + name: "No compress, obfuscate", + parameter: aged.Parameters{ + Data: config.plainData, + Obfuscator: &aged.AgeV1Obf{}, + Obfuscation: true, + Compress: false, + }, }, - // Compress with Gzip, no obfuscate { - Data: config.plainData, - Obfuscation: false, - Compressor: &compression.Gzip{}, - Compress: true, + name: "Compress with Gzip, no obfuscate", + parameter: aged.Parameters{ + Data: config.plainData, + Obfuscation: false, + Compressor: &compression.Gzip{}, + Compress: true, + }, }, - // Compress with Gzip, obfuscate { - Data: config.plainData, - Obfuscator: &aged.AgeV1Obf{}, - Obfuscation: true, - Compressor: &compression.Gzip{}, - Compress: true, + name: "Compress with Gzip, obfuscate", + parameter: aged.Parameters{ + Data: config.plainData, + Obfuscator: &aged.AgeV1Obf{}, + Obfuscation: true, + Compressor: &compression.Gzip{}, + Compress: true, + }, }, - // Compress with Zstd, no obfuscate { - Data: config.plainData, - Obfuscation: false, - Compressor: &compression.Zstd{}, - Compress: true, + name: "Compress with Zstd, no obfuscate", + parameter: aged.Parameters{ + Data: config.plainData, + Obfuscation: false, + Compressor: &compression.Zstd{}, + Compress: true, + }, }, - // Compress with Zstd, obfuscate { - Data: config.plainData, - Obfuscator: &aged.AgeV1Obf{}, - Obfuscation: true, - Compressor: &compression.Zstd{}, - Compress: true, + name: "Compress with Zstd, obfuscate", + parameter: aged.Parameters{ + Data: config.plainData, + Obfuscator: &aged.AgeV1Obf{}, + Obfuscation: true, + Compressor: &compression.Zstd{}, + Compress: true, + }, }, - // Compress with Flate, no obfuscate { - Data: config.plainData, - Obfuscation: false, - Compressor: &compression.Flate{}, - Compress: true, + name: "Compress with Flate, no obfuscate", + parameter: aged.Parameters{ + Data: config.plainData, + Obfuscation: false, + Compressor: &compression.Flate{}, + Compress: true, + }, }, - // Compess with Flate, obfuscate { - Data: config.plainData, - Obfuscator: &aged.AgeV1Obf{}, - Obfuscation: true, - Compressor: &compression.Flate{}, - Compress: true, + name: "Compess with Flate, obfuscate", + parameter: aged.Parameters{ + Data: config.plainData, + Obfuscator: &aged.AgeV1Obf{}, + Obfuscation: true, + Compressor: &compression.Flate{}, + Compress: true, + }, }, - // Compress with Zlib, no obfuscate { - Data: config.plainData, - Obfuscation: false, - Compressor: &compression.Zlib{}, - Compress: true, + name: "Compress with Zlib, no obfuscate", + parameter: aged.Parameters{ + Data: config.plainData, + Obfuscation: false, + Compressor: &compression.Zlib{}, + Compress: true, + }, }, - // Compress with Zlib, obfuscate { - Data: config.plainData, - Obfuscator: &aged.AgeV1Obf{}, - Obfuscation: true, - Compressor: &compression.Zlib{}, - Compress: false, + name: "Compress with Zlib, obfuscate", + parameter: aged.Parameters{ + Data: config.plainData, + Obfuscator: &aged.AgeV1Obf{}, + Obfuscation: true, + Compressor: &compression.Zlib{}, + Compress: false, + }, }, - // Compress big file with Zstd, obfuscate { - Data: big, - Obfuscator: &aged.AgeV1Obf{}, - Obfuscation: true, - Compressor: &compression.Zlib{}, - Compress: true, + name: "Compress big file with Zstd, obfuscate", + parameter: aged.Parameters{ + Data: big, + Obfuscator: &aged.AgeV1Obf{}, + Obfuscation: true, + Compressor: &compression.Zlib{}, + Compress: true, + }, }, } - for _, encryptParam := range p { - - decryptParam := encryptParam - encryptPwdParam := encryptParam - decryptPwdParam := encryptPwdParam + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + decryptParam := tt.parameter + encryptPwdParam := tt.parameter + decryptPwdParam := encryptPwdParam - var err error + var err error - decryptParam.Data, err = config.keychain.Encrypt(encryptParam) - r.NoError(t, err, "Encryption without error") - t.Logf("Original size:%d Processed size: %d", len(encryptParam.Data), len(decryptParam.Data)) + decryptParam.Data, err = config.keychain.Encrypt(tt.parameter) + r.NoError(t, err, "Encryption without error") + t.Logf("Original size:%d Processed size: %d", len(tt.parameter.Data), len(decryptParam.Data)) - decryptedData, err2 := config.keychain.Decrypt(decryptParam) - r.NoError(t, err2, "Decryption without error") - r.Equal(t, encryptParam.Data, decryptedData, "Decrypted data is equal with the plaintext data by the same keychain") + decryptedData, err2 := config.keychain.Decrypt(decryptParam) + r.NoError(t, err2, "Decryption without error") + r.Equal(t, tt.parameter.Data, decryptedData, "Decrypted data is equal with the plaintext data by the same keychain") - decryptedData2, err3 := config.keychain2.Decrypt(decryptParam) - r.NoError(t, err3, "Decryption two without error") - r.Equal(t, encryptParam.Data, decryptedData2, "Decrypted data is equal with the plaintext data by different valid keychain") - - decryptedData3, err4 := config.keychainWrong.Decrypt(decryptParam) - r.Equal(t, []byte{}, decryptedData3) - r.EqualError(t, err4, "no identity matched any of the recipients") + decryptedData2, err3 := config.keychain2.Decrypt(decryptParam) + r.NoError(t, err3, "Decryption two without error") + r.Equal(t, tt.parameter.Data, decryptedData2, "Decrypted data is equal with the plaintext data by different valid keychain") + decryptedData3, err4 := config.keychainWrong.Decrypt(decryptParam) + r.Equal(t, []byte{}, decryptedData3) + r.EqualError(t, err4, "no identity matched any of the recipients") - pwd, err := generic.CSPRNG(32) - r.NoError(t, err) + pwd, err := generic.CSPRNG(32) + r.NoError(t, err) - decryptPwdParam.Data, err = aged.EncryptWithPwd(encryptPwdParam, string(pwd)) - r.NoError(t, err, "Encryption without error") - t.Logf("Pwd protected data: %d", decryptPwdParam.Data) + decryptPwdParam.Data, err = aged.EncryptWithPwd(encryptPwdParam, string(pwd)) + r.NoError(t, err, "Encryption without error") + t.Logf("Pwd protected data: %d", decryptPwdParam.Data) - decryptedPwdData, err := aged.DecryptWithPwd(decryptPwdParam, string(pwd)) - r.NoError(t, err, "Decryption without error") - r.Equal(t, encryptPwdParam.Data, decryptedPwdData) + decryptedPwdData, err := aged.DecryptWithPwd(decryptPwdParam, string(pwd)) + r.NoError(t, err, "Decryption without error") + r.Equal(t, encryptPwdParam.Data, decryptedPwdData) + }) } } @@ -340,10 +364,6 @@ func TestWrongPublicKeyKeyringSetup(t *testing.T) { } func TestEncryptWithPwd(t *testing.T) { - + } */ - - - - diff --git a/aged/obf.go b/aged/obf.go index 4052f96..bc217b9 100644 --- a/aged/obf.go +++ b/aged/obf.go @@ -11,6 +11,7 @@ type Obfuscation interface { Deobfuscate([]byte) ([]byte, error) } +// AgeV1Obf is a obfuscation for age encryption header type AgeV1Obf struct{} var ( diff --git a/aged/stream.go b/aged/stream.go index c9bde2c..763f45d 100644 --- a/aged/stream.go +++ b/aged/stream.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // https://github.com/FiloSottile/age/blob/main/internal/stream/stream.go + +// Modified age stream.go to use xchacha20poly1305 instead of chacha20poly1305 + package aged import ( diff --git a/generic/csprng.go b/generic/csprng.go index 4f3cc46..4dc4635 100644 --- a/generic/csprng.go +++ b/generic/csprng.go @@ -7,6 +7,7 @@ import ( "os" ) +// CSPRNG is a cryptographically secure pseudo-random number generator for byte slices func CSPRNG(n int64) ([]byte, error) { random := make([]byte, n) if _, err := io.ReadFull(rand.Reader, random); err != nil { @@ -15,11 +16,13 @@ func CSPRNG(n int64) ([]byte, error) { return random, nil } +// CSPRNGHex is a CSPRNG in hex format func CSPRNGHex(n int64) (string, error) { rnd, err := CSPRNG(n) return hex.EncodeToString(rnd), err } +// HWRng is a hardware random number generator func HWRng(n int64) ([]byte, error) { file, err := os.Open("/dev/hwrng") if err != nil { diff --git a/generic/csprng_test.go b/generic/csprng_test.go index d45bb31..1383070 100644 --- a/generic/csprng_test.go +++ b/generic/csprng_test.go @@ -31,19 +31,25 @@ func TestCSPRNGHex(t *testing.T) { } } -/* func TestHWRng(t *testing.T) { length := 32 rnd, err := generic.HWRng(int64(length)) - if err.Error() == "open /dev/hwrng: no such file or directory" { - t.Skip("Hardware random number generator not found") - } else{ + if err == nil { r.NoError(t, err) r.Len(t, rnd, length) t.Log(hex.EncodeToString(rnd)) + } else { + switch err.Error() { + case "open /dev/hwrng: permission denied": + t.Skip("Hardware random number generator permission denied") + case "open /dev/hwrng: no such file or directory": + t.Skip("Hardware random number generator not found") + default: + t.Log(err) + } } } -*/ + func TestRand(t *testing.T) { reader := generic.Rand() r.Equal(t, reflect.TypeOf(reader), reflect.TypeOf(rand.Reader)) diff --git a/generic/encoder.go b/generic/encoder.go new file mode 100644 index 0000000..d7305ea --- /dev/null +++ b/generic/encoder.go @@ -0,0 +1,90 @@ +package generic + +import ( + "encoding/base32" + "encoding/base64" + "encoding/hex" +) + +type Encoder interface { + Encode([]byte) string + Decode([]byte) ([]byte, error) +} + +// StdEncoding is the standard base64 encoding, as defined in RFC 4648. +type Base64 struct{} + +// URLEncoding is the alternate base64 encoding defined in RFC 4648. It is typically used in URLs and file names. +type UrlBase64 struct{} + +// nolint: lll +// RawURLEncoding is the unpadded alternate base64 encoding defined in RFC 4648. It is typically used in URLs and file names. This is the same as URLEncoding but omits padding characters. +type RawUrlBase64 struct{} + +// nolint: lll +// RawStdEncoding is the standard raw, unpadded base64 encoding, as defined in RFC 4648 section 3.2. This is the same as StdEncoding but omits padding characters. +type RawBase64 struct{} + +type Base32 struct{} + +type PaddinglessBase32 struct{} + +type Hex struct{} + +func (b *Base64) Encode(data []byte) string { + return base64.StdEncoding.EncodeToString(data) +} + +func (b *Base64) Decode(data string) ([]byte, error) { + return base64.StdEncoding.DecodeString(data) +} + +func (b *UrlBase64) Encode(data []byte) string { + return base64.URLEncoding.EncodeToString(data) +} + +func (b *UrlBase64) Decode(data string) ([]byte, error) { + return base64.URLEncoding.DecodeString(data) +} + +func (b *RawUrlBase64) Encode(data []byte) string { + return base64.RawURLEncoding.EncodeToString(data) +} + +func (b *RawUrlBase64) Decode(data string) ([]byte, error) { + return base64.RawURLEncoding.DecodeString(data) +} + +func (b *RawBase64) Encode(data []byte) string { + return base64.RawStdEncoding.EncodeToString(data) +} + +func (b *RawBase64) Decode(data string) ([]byte, error) { + return base64.RawStdEncoding.DecodeString(data) +} + +func (b *Base32) Encode(data []byte) string { + return base32.StdEncoding.EncodeToString(data) +} + +func (b *Base32) Decode(data string) ([]byte, error) { + return base32.StdEncoding.DecodeString(data) +} + +func (b *PaddinglessBase32) Encode(data []byte) string { + encoder := base32.StdEncoding.WithPadding(base32.NoPadding) + return encoder.EncodeToString(data) +} + +func (b *PaddinglessBase32) Decode(data string) ([]byte, error) { + encoder := base32.StdEncoding.WithPadding(base32.NoPadding) + return encoder.DecodeString(data) +} + +func (h *Hex) Encode(data []byte) string { + return hex.EncodeToString(data) +} + +func (h *Hex) Decode(data string) ([]byte, error) { + return hex.DecodeString(data) +} diff --git a/generic/fs.go b/generic/fs.go index a293c77..0079a07 100644 --- a/generic/fs.go +++ b/generic/fs.go @@ -7,6 +7,7 @@ import ( "path/filepath" ) +// Secure way to delete file func Delete(targetPath string, cycle int) error { if cycle == 0 { cycle = 3 @@ -24,7 +25,6 @@ func Delete(targetPath string, cycle int) error { } zeroBytes := make([]byte, fileInfo.Size()) - copy(zeroBytes, "0") for i := 0; i < cycle; i++ { // Owerwrite with zeros @@ -65,6 +65,7 @@ func Delete(targetPath string, cycle int) error { return nil } +// Secure way to overwrite file func Overwrite(targetPath string, data []byte, cycle int) error { if cycle == 0 { cycle = 3 @@ -83,7 +84,6 @@ func Overwrite(targetPath string, data []byte, cycle int) error { } zeroBytes := make([]byte, fileInfo.Size()) - copy(zeroBytes, "0") for i := 0; i < cycle; i++ { // Owerwrite with zeros @@ -125,7 +125,7 @@ func Overwrite(targetPath string, data []byte, cycle int) error { if err != nil { return err } - if n != int(fileInfo.Size()) { + if n != len(data) { return errors.New("file overwrite bytes mismatch") } return nil diff --git a/generic/fs_test.go b/generic/fs_test.go new file mode 100644 index 0000000..385716d --- /dev/null +++ b/generic/fs_test.go @@ -0,0 +1,87 @@ +package generic_test + +import ( + "errors" + "os" + "testing" + + "github.com/D3vl0per/crypt/generic" +) + +func TestDelete(t *testing.T) { + // Create a temporary file for testing + tempFile, err := os.CreateTemp("", "testfile") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tempFile.Name()) + + // Write some data to the temporary file + data := []byte("test data") + _, err = tempFile.Write(data) + if err != nil { + t.Fatal(err) + } + + // Close the file before deleting it + err = tempFile.Close() + if err != nil { + t.Fatal(err) + } + + // Call the Delete function with the temporary file path + err = generic.Delete(tempFile.Name(), 3) + if err != nil { + t.Fatal(err) + } + + // Check if the file has been deleted + _, err = os.Stat(tempFile.Name()) + if !errors.Is(err, os.ErrNotExist) { + t.Errorf("expected file to be deleted, got error: %v", err) + } +} + +/* +func TestOverwrite(t *testing.T) { + // Create a temporary file for testing + tempFile, err := os.CreateTemp("", "testfile") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tempFile.Name()) + + // Write some data to the temporary file + data := []byte("test data") + _, err = tempFile.Write(data) + if err != nil { + t.Fatal(err) + } + + // Close the file before overwriting it + err = tempFile.Close() + if err != nil { + t.Fatal(err) + } + + expectedContents := []byte("new data") + // Call the Overwrite function with the temporary file path + err = generic.Overwrite(tempFile.Name(), expectedContents, 3) + if err != nil { + t.Fatal(err) + } + + // Read the contents of the file + fileContents, err := generic.ReadFileContent(tempFile.Name()) + if err != nil { + t.Fatal(err) + } + + t.Log(string(fileContents)) + + // Check if the file has been overwritten correctly + if !bytes.Equal(fileContents, expectedContents) { + t.Errorf("expected file contents to be %q, got %q", expectedContents, fileContents) + } +} +*/ diff --git a/generic/ports.go b/generic/ports.go new file mode 100644 index 0000000..47f9af9 --- /dev/null +++ b/generic/ports.go @@ -0,0 +1,71 @@ +package generic + +import ( + "crypto" + "crypto/ed25519" + "crypto/x509" + "encoding/hex" + "encoding/pem" +) + +func ExportECCPK(pk crypto.PublicKey) (string, error) { + b, err := x509.MarshalPKIXPublicKey(pk) + if err != nil { + return "", err + } + + block := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: b, + } + return hex.EncodeToString(pem.EncodeToMemory(block)), nil +} + +func ExportECCSK(sk ed25519.PrivateKey) (string, error) { + b, err := x509.MarshalPKCS8PrivateKey(sk) + if err != nil { + return "", err + } + + block := &pem.Block{ + Type: "PRIVATE KEY", + Bytes: b, + } + return hex.EncodeToString(pem.EncodeToMemory(block)), nil +} + +func ImportECCPK(pk string) (ed25519.PublicKey, error) { + pkPem, err := hex.DecodeString(pk) + if err != nil { + return ed25519.PublicKey{}, err + } + + pemBlock, _ := pem.Decode(pkPem) + + pkRaw, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return ed25519.PublicKey{}, err + } + // nolint:errcheck + pkC := pkRaw.(crypto.PublicKey) + // nolint:errcheck + return pkC.(ed25519.PublicKey), nil +} + +func ImportECCSK(sk string) (ed25519.PublicKey, error) { + skPem, err := hex.DecodeString(sk) + if err != nil { + return ed25519.PublicKey{}, err + } + + pemBlock, _ := pem.Decode(skPem) + + pkRaw, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes) + if err != nil { + return ed25519.PublicKey{}, err + } + // nolint:errcheck + pkC := pkRaw.(crypto.PublicKey) + // nolint:errcheck + return pkC.(ed25519.PublicKey), nil +} diff --git a/generic/subtle.go b/generic/subtle.go new file mode 100644 index 0000000..d7013a1 --- /dev/null +++ b/generic/subtle.go @@ -0,0 +1,11 @@ +package generic + +import "crypto/subtle" + +func Compare(x, y []byte) bool { + return subtle.ConstantTimeCompare(x, y) == 1 +} + +func CompareString(x, y string) bool { + return Compare([]byte(x), []byte(y)) +} diff --git a/generic/subtle_test.go b/generic/subtle_test.go new file mode 100644 index 0000000..cfb2592 --- /dev/null +++ b/generic/subtle_test.go @@ -0,0 +1,35 @@ +package generic_test + +import ( + s "crypto/subtle" + "testing" + + "github.com/D3vl0per/crypt/generic" + r "github.com/stretchr/testify/require" +) + +func TestCompare(t *testing.T) { + rand, err := generic.CSPRNG(8) + r.NoError(t, err) + r.Len(t, rand, 8) + var randEq []byte = make([]byte, 8) + + s.ConstantTimeCopy(1, randEq, rand) + r.Equal(t, rand, randEq) + + isEq := generic.Compare(rand, randEq) + r.True(t, isEq) +} + +func TestCompareString(t *testing.T) { + rand, err := generic.CSPRNG(8) + r.NoError(t, err) + r.Len(t, rand, 8) + var randEq []byte = make([]byte, 8) + + s.ConstantTimeCopy(1, randEq, rand) + r.Equal(t, rand, randEq) + + isEq := generic.CompareString(string(rand), string(randEq)) + r.True(t, isEq) +} diff --git a/generic/utils.go b/generic/utils.go index b5070b5..87ff804 100644 --- a/generic/utils.go +++ b/generic/utils.go @@ -2,105 +2,9 @@ package generic import ( "bytes" - "crypto" - "crypto/ed25519" - "crypto/subtle" - "crypto/x509" - "encoding/base64" - "encoding/hex" - "encoding/pem" ) -type Encoder interface { - Encode([]byte) string - Decode([]byte) ([]byte, error) -} - -type Base64 struct{} -type Hex struct{} - -func (b *Base64) Encode(data []byte) string { - return base64.RawStdEncoding.EncodeToString(data) -} - -func (b *Base64) Decode(data string) ([]byte, error) { - return base64.RawStdEncoding.DecodeString(data) -} - -func (h *Hex) Encode(data []byte) string { - return hex.EncodeToString(data) -} - -func (h *Hex) Decode(data string) ([]byte, error) { - return hex.DecodeString(data) -} - -func Compare(x, y []byte) bool { - return subtle.ConstantTimeCompare(x, y) == 1 -} - -func ExportECCPK(pk crypto.PublicKey) (string, error) { - b, err := x509.MarshalPKIXPublicKey(pk) - if err != nil { - return "", err - } - - block := &pem.Block{ - Type: "PUBLIC KEY", - Bytes: b, - } - return hex.EncodeToString(pem.EncodeToMemory(block)), nil -} - -func ExportECCSK(sk ed25519.PrivateKey) (string, error) { - b, err := x509.MarshalPKCS8PrivateKey(sk) - if err != nil { - return "", err - } - - block := &pem.Block{ - Type: "PRIVATE KEY", - Bytes: b, - } - return hex.EncodeToString(pem.EncodeToMemory(block)), nil -} - -func ImportECCPK(pk string) (ed25519.PublicKey, error) { - pkPem, err := hex.DecodeString(pk) - if err != nil { - return ed25519.PublicKey{}, err - } - - pemBlock, _ := pem.Decode(pkPem) - - pkRaw, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) - if err != nil { - return ed25519.PublicKey{}, err - } - // nolint:errcheck - pkC := pkRaw.(crypto.PublicKey) - // nolint:errcheck - return pkC.(ed25519.PublicKey), nil -} - -func ImportECCSK(sk string) (ed25519.PublicKey, error) { - skPem, err := hex.DecodeString(sk) - if err != nil { - return ed25519.PublicKey{}, err - } - - pemBlock, _ := pem.Decode(skPem) - - pkRaw, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes) - if err != nil { - return ed25519.PublicKey{}, err - } - // nolint:errcheck - pkC := pkRaw.(crypto.PublicKey) - // nolint:errcheck - return pkC.(ed25519.PublicKey), nil -} - +// AllZero checks if all bytes in a slice are zero func AllZero(s []byte) bool { for _, v := range s { if v != 0 { @@ -110,6 +14,8 @@ func AllZero(s []byte) bool { return true } +// StrCnct concatenates strings into one +// Example: StrCnct([]string{"a", "b", "c"}...) -> "abc" func StrCnct(str ...string) string { var buffer bytes.Buffer diff --git a/generic/utils_test.go b/generic/utils_test.go new file mode 100644 index 0000000..852e3e2 --- /dev/null +++ b/generic/utils_test.go @@ -0,0 +1,75 @@ +package generic_test + +import ( + "testing" + + "github.com/D3vl0per/crypt/generic" +) + +func TestAllZero(t *testing.T) { + tests := []struct { + name string + s []byte + want bool + }{ + { + name: "Empty slice", + s: []byte{}, + want: true, + }, + { + name: "All zeros", + s: []byte{0, 0, 0, 0}, + want: true, + }, + { + name: "Non-zero element", + s: []byte{0, 0, 1, 0}, + want: false, + }, + { + name: "Mixed elements", + s: []byte{0, 0, 0, 1}, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := generic.AllZero(tt.s); got != tt.want { + t.Errorf("AllZero() = %v, want %v", got, tt.want) + } + }) + } +} +func TestStrCnct(t *testing.T) { + tests := []struct { + name string + str []string + want string + }{ + { + name: "Empty strings", + str: []string{}, + want: "", + }, + { + name: "Single string", + str: []string{"Hello"}, + want: "Hello", + }, + { + name: "Multiple strings", + str: []string{"Hello", " ", "World"}, + want: "Hello World", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := generic.StrCnct(tt.str...); got != tt.want { + t.Errorf("StrCnct() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/hash/hash_test.go b/hash/hash_test.go index e598e7a..cb7c005 100644 --- a/hash/hash_test.go +++ b/hash/hash_test.go @@ -10,189 +10,210 @@ import ( r "github.com/stretchr/testify/require" ) -type testBlakes struct { - Algo hasher.Algorithms - Data []byte - Expected []byte - Key []byte -} - func TestBlakes(t *testing.T) { - tests := []testBlakes{ + + tests := []struct { + name string + algo hasher.Algorithms + data []byte + expected []byte + key []byte + }{ { - Algo: &hasher.Blake2b256{}, - Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - Key: nil, + name: "Blake2b256", + algo: &hasher.Blake2b256{}, + data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + key: nil, //nolint:lll - Expected: []byte{32, 109, 96, 136, 177, 62, 96, 1, 20, 103, 183, 90, 60, 235, 88, 246, 192, 122, 156, 107, 186, 36, 51, 3, 141, 52, 76, 81, 98, 229, 179, 237}, + expected: []byte{32, 109, 96, 136, 177, 62, 96, 1, 20, 103, 183, 90, 60, 235, 88, 246, 192, 122, 156, 107, 186, 36, 51, 3, 141, 52, 76, 81, 98, 229, 179, 237}, }, { - Algo: &hasher.Blake2b256{}, - Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - Key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + name: "Blake2b256 HMAC", + algo: &hasher.Blake2b256{}, + data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), //nolint:lll - Expected: []byte{141, 216, 41, 55, 227, 130, 65, 74, 238, 19, 155, 174, 22, 46, 103, 68, 212, 184, 176, 225, 176, 182, 94, 11, 128, 55, 85, 127, 136, 105, 14, 169}, + expected: []byte{141, 216, 41, 55, 227, 130, 65, 74, 238, 19, 155, 174, 22, 46, 103, 68, 212, 184, 176, 225, 176, 182, 94, 11, 128, 55, 85, 127, 136, 105, 14, 169}, }, { - Algo: &hasher.Blake2b384{}, - Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - Key: nil, + name: "Blake2b384", + algo: &hasher.Blake2b384{}, + data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + key: nil, //nolint:lll - Expected: []byte{50, 67, 235, 235, 146, 164, 58, 187, 5, 182, 182, 179, 132, 31, 200, 27, 68, 50, 83, 71, 221, 131, 86, 164, 203, 194, 251, 64, 172, 45, 105, 200, 90, 118, 50, 47, 37, 237, 28, 153, 88, 166, 95, 221, 138, 249, 176, 116}, + expected: []byte{50, 67, 235, 235, 146, 164, 58, 187, 5, 182, 182, 179, 132, 31, 200, 27, 68, 50, 83, 71, 221, 131, 86, 164, 203, 194, 251, 64, 172, 45, 105, 200, 90, 118, 50, 47, 37, 237, 28, 153, 88, 166, 95, 221, 138, 249, 176, 116}, }, { - Algo: &hasher.Blake2b384{}, - Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - Key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + name: "Blake2b384 HMAC", + algo: &hasher.Blake2b384{}, + data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), //nolint:lll - Expected: []byte{2, 58, 228, 49, 225, 253, 51, 171, 34, 190, 207, 112, 186, 131, 0, 65, 58, 117, 119, 182, 72, 69, 151, 185, 128, 227, 180, 137, 5, 39, 172, 99, 21, 102, 79, 245, 62, 180, 104, 244, 218, 233, 60, 57, 161, 15, 31, 169}, + expected: []byte{2, 58, 228, 49, 225, 253, 51, 171, 34, 190, 207, 112, 186, 131, 0, 65, 58, 117, 119, 182, 72, 69, 151, 185, 128, 227, 180, 137, 5, 39, 172, 99, 21, 102, 79, 245, 62, 180, 104, 244, 218, 233, 60, 57, 161, 15, 31, 169}, }, { - Algo: &hasher.Blake2b512{}, - Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - Key: nil, + name: "Blake2b512", + algo: &hasher.Blake2b512{}, + data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + key: nil, //nolint:lll - Expected: []byte{119, 40, 57, 190, 223, 104, 229, 179, 37, 38, 116, 236, 59, 79, 64, 38, 242, 100, 128, 101, 147, 40, 14, 159, 186, 100, 251, 182, 206, 58, 244, 200, 26, 133, 123, 65, 131, 213, 220, 248, 152, 111, 73, 93, 126, 181, 139, 26, 48, 40, 254, 156, 254, 108, 19, 47, 92, 67, 209, 60, 127, 148, 155, 39}, + expected: []byte{119, 40, 57, 190, 223, 104, 229, 179, 37, 38, 116, 236, 59, 79, 64, 38, 242, 100, 128, 101, 147, 40, 14, 159, 186, 100, 251, 182, 206, 58, 244, 200, 26, 133, 123, 65, 131, 213, 220, 248, 152, 111, 73, 93, 126, 181, 139, 26, 48, 40, 254, 156, 254, 108, 19, 47, 92, 67, 209, 60, 127, 148, 155, 39}, }, { - Algo: &hasher.Blake2b512{}, - Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - Key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + name: "Blake2b512 HMAC", + algo: &hasher.Blake2b512{}, + data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), //nolint:lll - Expected: []byte{216, 241, 59, 128, 75, 177, 73, 147, 208, 198, 138, 37, 187, 128, 230, 173, 60, 117, 96, 33, 223, 55, 143, 219, 51, 47, 108, 67, 98, 0, 159, 197, 24, 112, 56, 191, 150, 82, 9, 225, 89, 0, 213, 168, 81, 69, 18, 10, 189, 249, 143, 31, 55, 119, 242, 126, 205, 253, 41, 158, 156, 30, 188, 105}, + expected: []byte{216, 241, 59, 128, 75, 177, 73, 147, 208, 198, 138, 37, 187, 128, 230, 173, 60, 117, 96, 33, 223, 55, 143, 219, 51, 47, 108, 67, 98, 0, 159, 197, 24, 112, 56, 191, 150, 82, 9, 225, 89, 0, 213, 168, 81, 69, 18, 10, 189, 249, 143, 31, 55, 119, 242, 126, 205, 253, 41, 158, 156, 30, 188, 105}, }, { - Algo: &hasher.Sha3256{}, - Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - Key: nil, + name: "SHA3-256", + algo: &hasher.Sha3256{}, + data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + key: nil, //nolint:lll - Expected: []byte{195, 62, 41, 181, 107, 38, 110, 3, 129, 21, 52, 217, 117, 49, 247, 163, 218, 89, 94, 205, 254, 161, 207, 196, 114, 73, 155, 161, 61, 38, 229, 59}, + expected: []byte{195, 62, 41, 181, 107, 38, 110, 3, 129, 21, 52, 217, 117, 49, 247, 163, 218, 89, 94, 205, 254, 161, 207, 196, 114, 73, 155, 161, 61, 38, 229, 59}, }, { - Algo: &hasher.Sha3384{}, - Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - Key: nil, + name: "SHA3-384", + algo: &hasher.Sha3384{}, + data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + key: nil, //nolint:lll - Expected: []byte{13, 164, 89, 48, 108, 199, 207, 244, 184, 228, 229, 210, 233, 175, 29, 85, 79, 200, 21, 45, 82, 193, 210, 227, 195, 78, 6, 230, 102, 127, 126, 121, 118, 120, 44, 105, 214, 238, 75, 46, 166, 133, 61, 161, 228, 2, 6, 46}, + expected: []byte{13, 164, 89, 48, 108, 199, 207, 244, 184, 228, 229, 210, 233, 175, 29, 85, 79, 200, 21, 45, 82, 193, 210, 227, 195, 78, 6, 230, 102, 127, 126, 121, 118, 120, 44, 105, 214, 238, 75, 46, 166, 133, 61, 161, 228, 2, 6, 46}, }, { - Algo: &hasher.Sha3512{}, - Data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - Key: nil, + name: "SHA3-512", + algo: &hasher.Sha3512{}, + data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), + key: nil, //nolint:lll - Expected: []byte{125, 21, 172, 36, 13, 53, 250, 136, 28, 214, 188, 8, 227, 249, 19, 86, 128, 200, 212, 106, 225, 41, 67, 3, 81, 115, 58, 187, 209, 129, 44, 191, 163, 205, 134, 207, 246, 127, 72, 31, 9, 11, 33, 184, 131, 16, 44, 152, 2, 55, 71, 215, 195, 73, 233, 147, 80, 13, 79, 131, 146, 100, 38, 202}, + expected: []byte{125, 21, 172, 36, 13, 53, 250, 136, 28, 214, 188, 8, 227, 249, 19, 86, 128, 200, 212, 106, 225, 41, 67, 3, 81, 115, 58, 187, 209, 129, 44, 191, 163, 205, 134, 207, 246, 127, 72, 31, 9, 11, 33, 184, 131, 16, 44, 152, 2, 55, 71, 215, 195, 73, 233, 147, 80, 13, 79, 131, 146, 100, 38, 202}, }, } for _, test := range tests { - testHash(t, test) - } - -} + t.Run(test.name, func(t *testing.T) { + if test.key == nil { + hash, err := test.algo.Hash(test.data) + r.NoError(t, err) + t.Log("Hash: ", hex.EncodeToString(hash)) + r.Equal(t, test.expected, hash) -func testHash(t *testing.T, test testBlakes) { - if test.Key == nil { - hash, err := test.Algo.Hash(test.Data) - r.NoError(t, err) - t.Log("Hash: ", hex.EncodeToString(hash)) - r.Equal(t, test.Expected, hash) + validate, err := test.algo.ValidateHash(test.data, hash) + r.NoError(t, err) + r.True(t, validate) + } else { + hash, err := test.algo.Hmac(test.key, test.data) + r.NoError(t, err) + t.Log("Hash: ", hex.EncodeToString(hash)) + r.Equal(t, test.expected, hash) - validate, err := test.Algo.ValidateHash(test.Data, hash) - r.NoError(t, err) - r.True(t, validate) - } else { - hash, err := test.Algo.Hmac(test.Key, test.Data) - r.NoError(t, err) - t.Log("Hash: ", hex.EncodeToString(hash)) - r.Equal(t, test.Expected, hash) - - validate, err := test.Algo.ValidateHmac(test.Key, test.Data, hash) - r.NoError(t, err) - r.True(t, validate) + validate, err := test.algo.ValidateHmac(test.key, test.data, hash) + r.NoError(t, err) + r.True(t, validate) + } + }) } + } func TestFaultBlakes(t *testing.T) { - tests := []testBlakes{ + + tests := []struct { + name string + algo hasher.Algorithms + data []byte + expected []byte + key []byte + }{ { - Algo: &hasher.Blake2b256{}, - Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - Key: nil, + name: "Blake2b256", + algo: &hasher.Blake2b256{}, + data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + key: nil, //nolint:lll - Expected: []byte{32, 109, 96, 136, 177, 62, 96, 1, 20, 103, 183, 90, 60, 235, 88, 246, 192, 122, 156, 107, 186, 36, 51, 3, 141, 52, 76, 81, 98, 229, 179, 237}, + expected: []byte{32, 109, 96, 136, 177, 62, 96, 1, 20, 103, 183, 90, 60, 235, 88, 246, 192, 122, 156, 107, 186, 36, 51, 3, 141, 52, 76, 81, 98, 229, 179, 237}, }, { - Algo: &hasher.Blake2b256{}, - Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - Key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + name: "Blake2b256 HMAC", + algo: &hasher.Blake2b256{}, + data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), //nolint:lll - Expected: []byte{141, 216, 41, 55, 227, 130, 65, 74, 238, 19, 155, 174, 22, 46, 103, 68, 212, 184, 176, 225, 176, 182, 94, 11, 128, 55, 85, 127, 136, 105, 14, 169}, + expected: []byte{141, 216, 41, 55, 227, 130, 65, 74, 238, 19, 155, 174, 22, 46, 103, 68, 212, 184, 176, 225, 176, 182, 94, 11, 128, 55, 85, 127, 136, 105, 14, 169}, }, { - Algo: &hasher.Blake2b384{}, - Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - Key: nil, + name: "Blake2b384", + algo: &hasher.Blake2b384{}, + data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + key: nil, //nolint:lll - Expected: []byte{50, 67, 235, 235, 146, 164, 58, 187, 5, 182, 182, 179, 132, 31, 200, 27, 68, 50, 83, 71, 221, 131, 86, 164, 203, 194, 251, 64, 172, 45, 105, 200, 90, 118, 50, 47, 37, 237, 28, 153, 88, 166, 95, 221, 138, 249, 176, 116}, + expected: []byte{50, 67, 235, 235, 146, 164, 58, 187, 5, 182, 182, 179, 132, 31, 200, 27, 68, 50, 83, 71, 221, 131, 86, 164, 203, 194, 251, 64, 172, 45, 105, 200, 90, 118, 50, 47, 37, 237, 28, 153, 88, 166, 95, 221, 138, 249, 176, 116}, }, { - Algo: &hasher.Blake2b384{}, - Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - Key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + name: "Blake2b384 HMAC", + algo: &hasher.Blake2b384{}, + data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), //nolint:lll - Expected: []byte{2, 58, 228, 49, 225, 253, 51, 171, 34, 190, 207, 112, 186, 131, 0, 65, 58, 117, 119, 182, 72, 69, 151, 185, 128, 227, 180, 137, 5, 39, 172, 99, 21, 102, 79, 245, 62, 180, 104, 244, 218, 233, 60, 57, 161, 15, 31, 169}, + expected: []byte{2, 58, 228, 49, 225, 253, 51, 171, 34, 190, 207, 112, 186, 131, 0, 65, 58, 117, 119, 182, 72, 69, 151, 185, 128, 227, 180, 137, 5, 39, 172, 99, 21, 102, 79, 245, 62, 180, 104, 244, 218, 233, 60, 57, 161, 15, 31, 169}, }, { - Algo: &hasher.Blake2b512{}, - Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - Key: nil, + name: "Blake2b512", + algo: &hasher.Blake2b512{}, + data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + key: nil, //nolint:lll - Expected: []byte{119, 40, 57, 190, 223, 104, 229, 179, 37, 38, 116, 236, 59, 79, 64, 38, 242, 100, 128, 101, 147, 40, 14, 159, 186, 100, 251, 182, 206, 58, 244, 200, 26, 133, 123, 65, 131, 213, 220, 248, 152, 111, 73, 93, 126, 181, 139, 26, 48, 40, 254, 156, 254, 108, 19, 47, 92, 67, 209, 60, 127, 148, 155, 39}, + expected: []byte{119, 40, 57, 190, 223, 104, 229, 179, 37, 38, 116, 236, 59, 79, 64, 38, 242, 100, 128, 101, 147, 40, 14, 159, 186, 100, 251, 182, 206, 58, 244, 200, 26, 133, 123, 65, 131, 213, 220, 248, 152, 111, 73, 93, 126, 181, 139, 26, 48, 40, 254, 156, 254, 108, 19, 47, 92, 67, 209, 60, 127, 148, 155, 39}, }, { - Algo: &hasher.Blake2b512{}, - Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - Key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + name: "Blake2b512 HMAC", + algo: &hasher.Blake2b512{}, + data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), //nolint:lll - Expected: []byte{216, 241, 59, 128, 75, 177, 73, 147, 208, 198, 138, 37, 187, 128, 230, 173, 60, 117, 96, 33, 223, 55, 143, 219, 51, 47, 108, 67, 98, 0, 159, 197, 24, 112, 56, 191, 150, 82, 9, 225, 89, 0, 213, 168, 81, 69, 18, 10, 189, 249, 143, 31, 55, 119, 242, 126, 205, 253, 41, 158, 156, 30, 188, 105}, + expected: []byte{216, 241, 59, 128, 75, 177, 73, 147, 208, 198, 138, 37, 187, 128, 230, 173, 60, 117, 96, 33, 223, 55, 143, 219, 51, 47, 108, 67, 98, 0, 159, 197, 24, 112, 56, 191, 150, 82, 9, 225, 89, 0, 213, 168, 81, 69, 18, 10, 189, 249, 143, 31, 55, 119, 242, 126, 205, 253, 41, 158, 156, 30, 188, 105}, }, { - Algo: &hasher.Sha3256{}, - Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - Key: nil, + name: "SHA3-256", + algo: &hasher.Sha3256{}, + data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + key: nil, //nolint:lll - Expected: []byte{195, 62, 41, 181, 107, 38, 110, 3, 129, 21, 52, 217, 117, 49, 247, 163, 218, 89, 94, 205, 254, 161, 207, 196, 114, 73, 155, 161, 61, 38, 229, 59}, + expected: []byte{195, 62, 41, 181, 107, 38, 110, 3, 129, 21, 52, 217, 117, 49, 247, 163, 218, 89, 94, 205, 254, 161, 207, 196, 114, 73, 155, 161, 61, 38, 229, 59}, }, { - Algo: &hasher.Sha3384{}, - Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - Key: nil, + name: "SHA3-384", + algo: &hasher.Sha3384{}, + data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + key: nil, //nolint:lll - Expected: []byte{13, 164, 89, 48, 108, 199, 207, 244, 184, 228, 229, 210, 233, 175, 29, 85, 79, 200, 21, 45, 82, 193, 210, 227, 195, 78, 6, 230, 102, 127, 126, 121, 118, 120, 44, 105, 214, 238, 75, 46, 166, 133, 61, 161, 228, 2, 6, 46}, + expected: []byte{13, 164, 89, 48, 108, 199, 207, 244, 184, 228, 229, 210, 233, 175, 29, 85, 79, 200, 21, 45, 82, 193, 210, 227, 195, 78, 6, 230, 102, 127, 126, 121, 118, 120, 44, 105, 214, 238, 75, 46, 166, 133, 61, 161, 228, 2, 6, 46}, }, { - Algo: &hasher.Sha3512{}, - Data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - Key: nil, + name: "SHA3-512", + algo: &hasher.Sha3512{}, + data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + key: nil, //nolint:lll - Expected: []byte{125, 21, 172, 36, 13, 53, 250, 136, 28, 214, 188, 8, 227, 249, 19, 86, 128, 200, 212, 106, 225, 41, 67, 3, 81, 115, 58, 187, 209, 129, 44, 191, 163, 205, 134, 207, 246, 127, 72, 31, 9, 11, 33, 184, 131, 16, 44, 152, 2, 55, 71, 215, 195, 73, 233, 147, 80, 13, 79, 131, 146, 100, 38, 202}, + expected: []byte{125, 21, 172, 36, 13, 53, 250, 136, 28, 214, 188, 8, 227, 249, 19, 86, 128, 200, 212, 106, 225, 41, 67, 3, 81, 115, 58, 187, 209, 129, 44, 191, 163, 205, 134, 207, 246, 127, 72, 31, 9, 11, 33, 184, 131, 16, 44, 152, 2, 55, 71, 215, 195, 73, 233, 147, 80, 13, 79, 131, 146, 100, 38, 202}, }, } for _, test := range tests { - testHashFault(t, test) + t.Run(test.name, func(t *testing.T) { + if test.key == nil { + validate, err := test.algo.ValidateHash(test.data, test.expected) + r.NoError(t, err) + r.False(t, validate) + } else { + validate, err := test.algo.ValidateHmac(test.key, test.data, test.expected) + r.NoError(t, err) + r.False(t, validate) + } + }) } } - -func testHashFault(t *testing.T, test testBlakes) { - if test.Key == nil { - validate, err := test.Algo.ValidateHash(test.Data, test.Expected) - r.NoError(t, err) - r.False(t, validate) - } else { - validate, err := test.Algo.ValidateHmac(test.Key, test.Data, test.Expected) - r.NoError(t, err) - r.False(t, validate) - } -} diff --git a/hash/kdf.go b/hash/kdf.go index ba8fb8c..2bd8c46 100644 --- a/hash/kdf.go +++ b/hash/kdf.go @@ -4,15 +4,11 @@ import ( "encoding/base64" "errors" "hash" - "io" - "math" "regexp" "strconv" "github.com/D3vl0per/crypt/generic" "golang.org/x/crypto/argon2" - "golang.org/x/crypto/hkdf" - "golang.org/x/crypto/sha3" ) const ( @@ -30,9 +26,9 @@ type Kdf interface { type Hkdf struct { Salt []byte - Key []byte + Key []byte HashMode func() hash.Hash - Encoder generic.Hex + Encoder generic.Hex } type Argon2ID struct { @@ -88,6 +84,7 @@ func (a *Argon2ID) Hash(data []byte) (string, error) { } } + // Set default values a.Iterations |= AIterations a.Memory |= AMemory a.Parallelism |= AParallelism @@ -114,31 +111,30 @@ func (a *Argon2ID) Validate(data []byte, argonString string) (bool, error) { } if a.Iterations == 0 { - parsed, err := strconv.ParseUint(parameters["iterations"], 10, 32) + parsed, err := strconv.ParseInt(parameters["iterations"], 10, 32) if err != nil { return false, errors.New(generic.StrCnct([]string{"iteration parameter parsing error: ", err.Error()}...)) } + a.Iterations = uint32(parsed) } if a.Memory == 0 { - parsed, err := strconv.ParseUint(parameters["memory"], 10, 32) + parsed, err := strconv.ParseInt(parameters["memory"], 10, 32) if err != nil { return false, errors.New(generic.StrCnct([]string{"memory parameter parsing error: ", err.Error()}...)) } + a.Memory = uint32(parsed) } if a.Parallelism == 0 { - parsed, err := strconv.ParseUint(parameters["parallelism"], 10, 32) + parsed, err := strconv.ParseInt(parameters["parallelism"], 10, 8) if err != nil { return false, errors.New(generic.StrCnct([]string{"parallelism parameter parsing error: ", err.Error()}...)) } - if parsed > 0 && parsed <= math.MaxInt32 { - a.Parallelism = uint8(parsed) - } else { - return false, errors.New("parallelism parameter parsing error, can't parse that number") - } + + a.Parallelism = uint8(parsed) } if a.KeyLen == 0 { @@ -171,11 +167,11 @@ func (a *Argon2ID) ExtractParameters(input string) (map[string]string, error) { "hash": matches[7], } - if len(parameters["algorithm"]) == 0 || parameters["algorithm"] != "argon2id" { + if len(parameters["algorithm"]) == 0 || !generic.CompareString(parameters["algorithm"], "argon2id") { return map[string]string{}, errors.New(generic.StrCnct([]string{"invalid algorithm: ", parameters["algorithm"]}...)) } - if len(parameters["version"]) == 0 || parameters["version"] != strconv.FormatInt(int64(argon2.Version), 10) { + if len(parameters["version"]) == 0 || !generic.CompareString(parameters["version"], strconv.FormatInt(int64(argon2.Version), 10)) { return map[string]string{}, errors.New(generic.StrCnct([]string{"invalid version: ", parameters["version"]}...)) } @@ -190,6 +186,7 @@ func (a *Argon2ID) ExtractParameters(input string) (map[string]string, error) { return parameters, nil } +/* func (h *Hkdf) Hash(data []byte) (string, error) { if h.Salt != nil { if len(h.Salt) != h.HashMode().Size() { @@ -207,15 +204,15 @@ func (h *Hkdf) Hash(data []byte) (string, error) { h.HashMode = sha3.New512 } - kdf := hkdf.New(h.HashMode, h.Key , h.Salt, data) + kdf := hkdf.New(h.HashMode, h.Key, h.Salt, data) key := make([]byte, HKDFKeysize) if _, err := io.ReadFull(kdf, key); err != nil { return "", err } - - return generic.StrCnct([]string{h.Encoder.Encode(key), "#", h.Encoder.Encode(h.Salt), }...), nil + + return generic.StrCnct([]string{h.Encoder.Encode(key), "#", h.Encoder.Encode(h.Salt)}...), nil } func (h *Hkdf) Validate(data []byte, hash string) (bool, error) { @@ -228,14 +225,13 @@ func (h *Hkdf) Validate(data []byte, hash string) (bool, error) { h.HashMode = sha3.New512 } - kdf := hkdf.New(h.HashMode, h.Key , h.Salt, data) + kdf := hkdf.New(h.HashMode, h.Key, h.Salt, data) key := make([]byte, HKDFKeysize) if _, err := io.ReadFull(kdf, key); err != nil { return false, err } - hash_raw, err := h.Encoder.Decode(hash) if err != nil { @@ -243,6 +239,5 @@ func (h *Hkdf) Validate(data []byte, hash string) (bool, error) { } return generic.Compare(hash_raw, h.Salt), nil - //return generic.Compare(hash_raw, hash_to_validate), nil } - +*/ diff --git a/hash/kdf_test.go b/hash/kdf_test.go index a4b87e9..24e414d 100644 --- a/hash/kdf_test.go +++ b/hash/kdf_test.go @@ -1,6 +1,7 @@ package hash_test import ( + "strings" "testing" "github.com/D3vl0per/crypt/generic" @@ -13,66 +14,175 @@ func TestArgon2ID(t *testing.T) { data := []byte("Correct Horse Battery Staple") salt, err := generic.CSPRNG(16) r.NoError(t, err) - argon := []hasher.Argon2ID{ - {}, + tests := []struct { + name string + argon hasher.Argon2ID + }{ { - Memory: 2 * 64 * 1024, + name: "Default parameters", + argon: hasher.Argon2ID{}, }, { - Iterations: 4, + name: "Custom memory", + argon: hasher.Argon2ID{ + Memory: 2 * 64 * 1024, + }, }, { - Parallelism: 8, + name: "Custom iterations", + argon: hasher.Argon2ID{ + Iterations: 4, + }, }, { - KeyLen: 64, + name: "Custom parallelism", + argon: hasher.Argon2ID{ + Parallelism: 8, + }, }, { - Salt: salt, + name: "Custom key length", + argon: hasher.Argon2ID{ + KeyLen: 64, + }, }, { - Memory: 2 * 64 * 1024, - Iterations: 2, - Parallelism: 8, - KeyLen: 64, - Salt: salt, + name: "Custom salt", + argon: hasher.Argon2ID{ + Salt: salt, + }, + }, + { + name: "Custom parameters", + argon: hasher.Argon2ID{ + Memory: 2 * 64 * 1024, + Iterations: 2, + Parallelism: 8, + KeyLen: 64, + Salt: salt, + }, }, } - for _, e := range argon { - argonString, err := e.Hash(data) - r.NoError(t, err) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { - t.Log("Argon string: ", argonString) - parameters, err := e.ExtractParameters(argonString) - r.NoError(t, err) - t.Log("Argon parameters: ", parameters) + argonString, err := tt.argon.Hash(data) + r.NoError(t, err) - isValid, err := e.Validate(data, argonString) - r.NoError(t, err) - a.True(t, isValid) - } + t.Log("Argon string: ", argonString) + parameters, err := tt.argon.ExtractParameters(argonString) + r.NoError(t, err) + t.Log("Argon parameters: ", parameters) + isValid, err := tt.argon.Validate(data, argonString) + r.NoError(t, err) + a.True(t, isValid) + }) + } } -/* -func TestArgon2IDCustomSalt(t *testing.T) { - pass := []byte("Correct Horse Battery Staple") - salt, err := generic.CSPRNG(16) - r.NoError(t, err) +func TestArgon2IDWrongParameters(t *testing.T) { + // Wrong parameters + tests := []struct { + name string + argonString string + err string + }{ + { + name: "Fault test, algorithm is argon2i", + argonString: "$argon2i$v=19$m=10,t=2,p=1$SVJYMU1hdXB4czFTT3E4dw$+KPtJ/q0tnhCck+sbDva6g", + err: "invalid input format", + }, + { + name: "Fault test, algorithm is argon2d", + argonString: "$argon2d$v=19$m=10,t=2,p=1$SVJYMU1hdXB4czFTT3E4dw$+KPtJ/q0tnhCck+sbDva6g", + err: "invalid input format", + }, + { + name: "Fault test, algorithm is sdfgsdfgsf", + argonString: "$sdfgsdfgsf$v=19$m=10,t=2,p=1$SVJYMU1hdXB4czFTT3E4dw$+KPtJ/q0tnhCck+sbDva6g", + err: "invalid input format", + }, + { + name: "Fault test, version is 18", + argonString: "$argon2id$v=18$m=10,t=2,p=1$SVJYMU1hdXB4czFTT3E4dw$+KPtJ/q0tnhCck+sbDva6g", + err: "invalid version", + }, + { + name: "Fault test, version is asdasd", + argonString: "$argon2id$v=asdasd$m=10,t=2,p=1$SVJYMU1hdXB4czFTT3E4dw$+KPtJ/q0tnhCck+sbDva6g", + err: "invalid input format", + }, + { + name: "Fault test, memory uint32 2^32+1", + argonString: "$argon2id$v=19$m=4294967297,t=2,p=1$SVJYMU1hdXB4czFTT3E4dw$+KPtJ/q0tnhCck+sbDva6g", + err: "memory parameter parsing error", + }, + { + name: "Fault test, memory uint32 -1", + argonString: "$argon2id$v=19$m=-1,t=2,p=1$SVJYMU1hdXB4czFTT3E4dw$+KPtJ/q0tnhCck+sbDva6g", + err: "invalid input format", + }, + { + name: "Fault test, memory NaN", + argonString: "$argon2id$v=19$m=asdf,t=2,p=1$SVJYMU1hdXB4czFTT3E4dw$+KPtJ/q0tnhCck+sbDva6g", + err: "invalid input format", + }, + { + name: "Fault test, iterations uint32 2^32+1", + argonString: "$argon2id$v=19$m=10,t=4294967297,p=1$SVJYMU1hdXB4czFTT3E4dw$+KPtJ/q0tnhCck+sbDva6g", + err: "iteration parameter parsing error", + }, + { + name: "Fault test, iterations uint32 -1", + argonString: "$argon2id$v=19$m=10,t=-1,p=1$SVJYMU1hdXB4czFTT3E4dw$+KPtJ/q0tnhCck+sbDva6g", + err: "invalid input format", + }, + { + name: "Fault test, iteration NaN", + argonString: "$argon2id$v=19$m=10,t=asd,p=1$SVJYMU1hdXB4czFTT3E4dw$+KPtJ/q0tnhCck+sbDva6g", + err: "invalid input format", + }, + { + name: "Fault test, parallelism uint8 2^8+1", + argonString: "$argon2id$v=19$m=10,t=2,p=256$SVJYMU1hdXB4czFTT3E4dw$+KPtJ/q0tnhCck+sbDva6g", + err: "parallelism parameter parsing error", + }, + { + name: "Fault test, parallelism uint8 -1", + argonString: "$argon2id$v=19$m=10,t=2,p=-1$SVJYMU1hdXB4czFTT3E4dw$+KPtJ/q0tnhCck+sbDva6g", + err: "invalid input format", + }, + { + name: "Fault test, parallelism NaN", + argonString: "$argon2id$v=19$m=10,t=2,p=asr$SVJYMU1hdXB4czFTT3E4dw$+KPtJ/q0tnhCck+sbDva6g", + err: "invalid input format", + }, + } - blob, err := hash.Argon2IDCustomSalt(pass, salt) - r.NoError(t, err) + for _, tt := range tests { + argon := hasher.Argon2ID{} + t.Run(tt.name, func(t *testing.T) { + t.Log("Test name: ", tt.name) + t.Log("Argon string: ", tt.argonString) - isValid, err := hash.Argon2IDVerify(pass, blob.Salt, blob.Hash) - r.NoError(t, err) - a.True(t, isValid) + parameters, err := argon.ExtractParameters(tt.argonString) + if err != nil { + r.True(t, strings.Contains(err.Error(), tt.err)) + } else { + t.Log("Argon parameters: ", parameters) + + isValid, err := argon.Validate([]byte{}, tt.argonString) + a.False(t, isValid) + + r.True(t, strings.Contains(err.Error(), tt.err)) + } + }) + } - isValid, err = hash.Argon2IDVerify(pass, hex.EncodeToString(salt), blob.Hash) - r.NoError(t, err) - a.True(t, isValid) } -*/ + /* func TestHKDF(t *testing.T) { secret := []byte("Correct Horse Battery Staple") diff --git a/symmetric/symmetric.go b/symmetric/symmetric.go index 523a541..992257e 100644 --- a/symmetric/symmetric.go +++ b/symmetric/symmetric.go @@ -26,7 +26,7 @@ type XChaCha20 struct{} type Xor struct{} type XChaCha20Stream struct { - Key []byte + Key []byte Hash func() hash.Hash } @@ -150,7 +150,7 @@ func (x *XChaCha20Stream) Decrypt(in io.Reader, out io.Writer) error { return nil } -type stream struct{ +type stream struct { Hash func() hash.Hash } @@ -190,9 +190,9 @@ func (s *stream) key(fileKey, nonce []byte) ([]byte, error) { h := hkdf.New(s.Hash, fileKey, nonce, []byte("payload")) streamKey := make([]byte, chacha20poly1305.KeySize) if _, err := io.ReadFull(h, streamKey); err != nil { - return nil,err + return nil, err } - if generic.AllZero(streamKey){ + if generic.AllZero(streamKey) { return nil, errors.New("streamer key is all zero") } return streamKey, nil diff --git a/symmetric/symmetric_test.go b/symmetric/symmetric_test.go index 7a9e955..ca91424 100644 --- a/symmetric/symmetric_test.go +++ b/symmetric/symmetric_test.go @@ -64,13 +64,13 @@ func TestXChaCha20(t *testing.T) { } func TestXOR(t *testing.T) { - a := []byte{0x0f, 0x1a, 0x2b, 0x3c} - b := []byte{0x2a, 0x1b, 0x0c, 0x3d} + a := []byte{0x0f, 0x1a, 0x2b, 0x3c} + b := []byte{0x2a, 0x1b, 0x0c, 0x3d} sym := symmetric.Xor{} - expected := []byte{0x25, 0x01, 0x27, 0x01} - result, err := sym.Encrypt(a, b) - r.NoError(t, err) + expected := []byte{0x25, 0x01, 0x27, 0x01} + result, err := sym.Encrypt(a, b) + r.NoError(t, err) - r.Equal(t, expected, result) -} \ No newline at end of file + r.Equal(t, expected, result) +} From 97189c77ff917f186bd9fd6f5d6668ee5f803c66 Mon Sep 17 00:00:00 2001 From: D3v Date: Wed, 22 Nov 2023 00:16:28 +0100 Subject: [PATCH 07/12] Multiple improvements --- README.md | 5 +- asymmetric/asymmetric.go | 41 ++-- asymmetric/asymmetric_test.go | 175 +++++++++++++---- compression/compression_test.go | 37 +++- generic/encoder.go | 2 +- generic/fs.go | 49 +++-- generic/fs_test.go | 57 ++---- generic/imports_exports.go | 169 ++++++++++++++++ generic/imports_exports_test.go | 335 ++++++++++++++++++++++++++++++++ generic/ports.go | 71 ------- hash/fs.go | 12 ++ hash/hash.go | 129 ++++++++---- hash/hash_test.go | 123 ++++++++---- insecure/symmetric/symmetric.go | 9 +- symmetric/symmetric.go | 27 ++- 15 files changed, 968 insertions(+), 273 deletions(-) create mode 100644 generic/imports_exports.go create mode 100644 generic/imports_exports_test.go delete mode 100644 generic/ports.go diff --git a/README.md b/README.md index f5f1b28..f444704 100644 --- a/README.md +++ b/README.md @@ -12,15 +12,16 @@ Crypto suite: - /dev/hwrng - Encoders - Base64 + - Base32 - Hex - Symmetric - XChacha20-poly1305 - XChacha20-poly1305 Stream (modified age code) - - XOR (OTP) + - XOR - AES-GCM (pending) - Asymmetric - ed25519 - - ed448 + - ed448 - x25519 (pending) - Hash - Blake2b-256 diff --git a/asymmetric/asymmetric.go b/asymmetric/asymmetric.go index 22ea896..6077f3d 100644 --- a/asymmetric/asymmetric.go +++ b/asymmetric/asymmetric.go @@ -3,7 +3,6 @@ package asymmetric import ( "crypto" "crypto/ed25519" - "encoding/hex" "errors" "strconv" @@ -30,11 +29,13 @@ type Signing interface { type Ed25519 struct { SecretKey ed25519.PrivateKey PublicKey ed25519.PublicKey + Encoder generic.Encoder } type Ed448 struct { SecretKey ed448.PrivateKey PublicKey ed448.PublicKey Context string + Encoder generic.Encoder } func (e *Ed25519) Generate() error { @@ -58,17 +59,24 @@ func (e *Ed25519) GenerateFromSeed(seed []byte) error { } func (e *Ed25519) Sign(msg []byte) string { - return hex.EncodeToString(ed25519.Sign(e.SecretKey, msg)) + if e.Encoder == nil { + return string(ed25519.Sign(e.SecretKey, msg)) + } + + return e.Encoder.Encode(ed25519.Sign(e.SecretKey, msg)) } func (e *Ed25519) Verify(msg []byte, sig string) (bool, error) { - sig_raw, err := hex.DecodeString(sig) - if err != nil { - return false, err + if e.Encoder == nil { + return ed25519.Verify(e.PublicKey, msg, []byte(sig)), nil + } else { + sig_raw, err := e.Encoder.Decode(sig) + if err != nil { + return false, err + } + return ed25519.Verify(e.PublicKey, msg, sig_raw), nil } - - return ed25519.Verify(e.PublicKey, msg, sig_raw), nil } /// @@ -95,16 +103,23 @@ func (e *Ed448) GenerateFromSeed(seed []byte) error { } func (e *Ed448) Sign(msg []byte) string { - return hex.EncodeToString(ed448.Sign(e.SecretKey, msg, e.Context)) + if e.Encoder == nil { + return string(ed448.Sign(e.SecretKey, msg, e.Context)) + } + + return string(e.Encoder.Encode(ed448.Sign(e.SecretKey, msg, e.Context))) } func (e *Ed448) Verify(msg []byte, sig string) (bool, error) { - sig_raw, err := hex.DecodeString(sig) - if err != nil { - return false, err + if e.Encoder == nil { + return ed448.Verify(e.PublicKey, msg, []byte(sig), e.Context), nil + } else { + sig_raw, err := e.Encoder.Decode(sig) + if err != nil { + return false, err + } + return ed448.Verify(e.PublicKey, msg, sig_raw, e.Context), nil } - - return ed448.Verify(e.PublicKey, msg, sig_raw, e.Context), nil } func Ed25519ToPublicKey(pub crypto.PublicKey) (ed25519.PublicKey, error) { diff --git a/asymmetric/asymmetric_test.go b/asymmetric/asymmetric_test.go index 426a7d1..68d66eb 100644 --- a/asymmetric/asymmetric_test.go +++ b/asymmetric/asymmetric_test.go @@ -46,45 +46,73 @@ func TestGenerateEd25519KeypairFromSeed(t *testing.T) { func TestE2EEEd25519SignVerify(t *testing.T) { msg := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit.") - asym := asymmetric.Ed25519{} - - err := asym.Generate() - r.NoError(t, err) - - signature := asym.Sign(msg) - r.NotEmpty(t, signature) - - isValid, err := asym.Verify(msg, signature) - r.NoError(t, err) - r.True(t, isValid) -} - -/* -func TestKeyWrapping(t *testing.T) { - - pk, sk, err := GenerateEd25519Keypair() - t.Log("Public Key", pk) - t.Log("Secret Key", sk) - r.NoError(t, err) - - pk_w, err := ExportECCPK(sk.Public()) - t.Log("Wrapped Public Key", pk_w) - r.NoError(t, err) - - sk_w, err := ExportECCSK(sk) - t.Log("Wrapped Secret Key", sk_w) - r.NoError(t, err) - - pk_2, err := ImportECCPK(pk_w) - r.NoError(t, err) - - sk_2, err := ImportECCSK(sk_w) - r.NoError(t, err) - - r.Equal(t, pk, pk_2) - r.Equal(t, sk, sk_2) + tests := []struct { + name string + asym asymmetric.Ed25519 + }{ + { + name: "Raw keys", + }, + { + name: "Base64 encoder", + asym: asymmetric.Ed25519{ + Encoder: &generic.Base64{}, + }, + }, + { + name: "UrlBase64 encoder", + asym: asymmetric.Ed25519{ + Encoder: &generic.UrlBase64{}, + }, + }, + { + name: "RawUrlBase64 encoder", + asym: asymmetric.Ed25519{ + Encoder: &generic.RawUrlBase64{}, + }, + }, + { + name: "RawBase64 encoder", + asym: asymmetric.Ed25519{ + Encoder: &generic.RawBase64{}, + }, + }, + { + name: "Base32 encoder", + asym: asymmetric.Ed25519{ + Encoder: &generic.Base32{}, + }, + }, + { + name: "PaddinglessBase32 encoder", + asym: asymmetric.Ed25519{ + Encoder: &generic.PaddinglessBase32{}, + }, + }, + { + name: "Hex encoder", + asym: asymmetric.Ed25519{ + Encoder: &generic.Hex{}, + }, + }, + } + + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.asym.Generate() + r.NoError(t, err) + + signature := tt.asym.Sign(msg) + r.NotEmpty(t, signature) + t.Log("Signature:", signature) + + isValid, err := tt.asym.Verify(msg, signature) + r.NoError(t, err) + r.True(t, isValid) + }) + } } -*/ func TestGenerateEd448Keypair(t *testing.T) { asym := asymmetric.Ed448{} @@ -100,6 +128,77 @@ func TestGenerateEd448Keypair(t *testing.T) { t.Log("Ed448 Public Key Hex:", hex.EncodeToString(asym.PublicKey)) } +func TestE2EEEd448SignVerify(t *testing.T) { + msg := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit.") + + tests := []struct { + name string + asym asymmetric.Ed448 + }{ + { + name: "Raw keys", + }, + { + name: "Base64 encoder", + asym: asymmetric.Ed448{ + Encoder: &generic.Base64{}, + }, + }, + { + name: "UrlBase64 encoder", + asym: asymmetric.Ed448{ + Encoder: &generic.UrlBase64{}, + }, + }, + { + name: "RawUrlBase64 encoder", + asym: asymmetric.Ed448{ + Encoder: &generic.RawUrlBase64{}, + }, + }, + { + name: "RawBase64 encoder", + asym: asymmetric.Ed448{ + Encoder: &generic.RawBase64{}, + }, + }, + { + name: "Base32 encoder", + asym: asymmetric.Ed448{ + Encoder: &generic.Base32{}, + }, + }, + { + name: "PaddinglessBase32 encoder", + asym: asymmetric.Ed448{ + Encoder: &generic.PaddinglessBase32{}, + }, + }, + { + name: "Hex encoder", + asym: asymmetric.Ed448{ + Encoder: &generic.Hex{}, + }, + }, + } + + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.asym.Generate() + r.NoError(t, err) + + signature := tt.asym.Sign(msg) + r.NotEmpty(t, signature) + t.Log("Signature:", signature) + + isValid, err := tt.asym.Verify(msg, signature) + r.NoError(t, err) + r.True(t, isValid) + }) + } +} + // Deterministic generation check. func TestGenerateEd448KeypairFromSeed(t *testing.T) { rng, err := generic.CSPRNG(57) diff --git a/compression/compression_test.go b/compression/compression_test.go index 6bf595c..5821588 100644 --- a/compression/compression_test.go +++ b/compression/compression_test.go @@ -30,22 +30,39 @@ func TestRoundTrips(t *testing.T) { genericModes := []int{9, 1, 0, -1, -2} zstdModes := []int{11, 7, 3, 1} - test := map[int][]byte{ - 0: []byte("PSGIeAYZuvDa2QScJkAI1S824E0fA8M2aAYH3SdMd9mWlETmDIgfbexxT5nwygIDIHFp5A92V6Ke4Sl7FwsOU5ox7IIhReltbLONZutz0EbnN3TiquWz3QJjNlo0HJ1t"), - 1: []byte("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), - 2: []byte("10101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010"), + tests := []struct { + name string + data []byte + }{ + { + name: "Random data", + data: []byte("PSGIeAYZuvDa2QScJkAI1S824E0fA8M2aAYH3SdMd9mWlETmDIgfbexxT5nwygIDIHFp5A92V6Ke4Sl7FwsOU5ox7IIhReltbLONZutz0EbnN3TiquWz3QJjNlo0HJ1t"), + }, + { + name: "Zero data", + data: []byte("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), + }, + { + name: "One data", + data: []byte("10101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010"), + }, } - for _, data := range test { + + for _, test := range tests { for _, level := range zstdModes { - testRoundTrip(t, &compression.Zstd{Level: level}, data) + t.Run(test.name, func(t *testing.T) { + testRoundTrip(t, &compression.Zstd{Level: level}, test.data) + }) } } - for _, data := range test { + for _, test := range tests { for _, level := range genericModes { - testRoundTrip(t, &compression.Flate{Level: level}, data) - testRoundTrip(t, &compression.Zlib{Level: level}, data) - testRoundTrip(t, &compression.Gzip{Level: level}, data) + t.Run(test.name, func(t *testing.T) { + testRoundTrip(t, &compression.Flate{Level: level}, test.data) + testRoundTrip(t, &compression.Zlib{Level: level}, test.data) + testRoundTrip(t, &compression.Gzip{Level: level}, test.data) + }) } } } diff --git a/generic/encoder.go b/generic/encoder.go index d7305ea..d83bd9e 100644 --- a/generic/encoder.go +++ b/generic/encoder.go @@ -8,7 +8,7 @@ import ( type Encoder interface { Encode([]byte) string - Decode([]byte) ([]byte, error) + Decode(string) ([]byte, error) } // StdEncoding is the standard base64 encoding, as defined in RFC 4648. diff --git a/generic/fs.go b/generic/fs.go index 0079a07..b2d0ba6 100644 --- a/generic/fs.go +++ b/generic/fs.go @@ -28,11 +28,7 @@ func Delete(targetPath string, cycle int) error { for i := 0; i < cycle; i++ { // Owerwrite with zeros - n, err := file.Write([]byte(zeroBytes)) - if err != nil { - return err - } - err = file.Sync() + n, err := WriteAndFlush(file, zeroBytes) if err != nil { return err } @@ -44,15 +40,10 @@ func Delete(targetPath string, cycle int) error { if err != nil { return err } - err = file.Sync() + n, err = WriteAndFlush(file, rnd) if err != nil { return err } - n, err = file.Write(rnd) - if err != nil { - return err - } - if n != int(fileInfo.Size()) { return errors.New("rand owerwrite bytes mismatch") } @@ -83,15 +74,15 @@ func Overwrite(targetPath string, data []byte, cycle int) error { return err } + if len(data) != int(fileInfo.Size()) { + return errors.New("data size must be equal to file size") + } + zeroBytes := make([]byte, fileInfo.Size()) for i := 0; i < cycle; i++ { // Owerwrite with zeros - n, err := file.Write([]byte(zeroBytes)) - if err != nil { - return err - } - err = file.Sync() + n, err := WriteAndFlush(file, zeroBytes) if err != nil { return err } @@ -105,11 +96,7 @@ func Overwrite(targetPath string, data []byte, cycle int) error { return err } - n, err = file.Write(rnd) - if err != nil { - return err - } - err = file.Sync() + n, err = WriteAndFlush(file, rnd) if err != nil { return err } @@ -164,3 +151,23 @@ func ReadFileContent(path string) ([]byte, error) { } return data, nil } + +func WriteAndFlush(file *os.File, rnd []byte) (n int, err error){ + n, err = file.Write(rnd) + if err != nil { + return 0, err + } + err = file.Sync() + if err != nil { + return 0, err + } + err = file.Truncate(0) + if err != nil { + return 0, err + } + _, err = file.Seek(0, 0) + if err != nil { + return 0, err + } + return n, nil +} \ No newline at end of file diff --git a/generic/fs_test.go b/generic/fs_test.go index 385716d..207ee3d 100644 --- a/generic/fs_test.go +++ b/generic/fs_test.go @@ -1,39 +1,34 @@ package generic_test import ( + "bytes" "errors" "os" "testing" "github.com/D3vl0per/crypt/generic" + r "github.com/stretchr/testify/require" ) func TestDelete(t *testing.T) { // Create a temporary file for testing tempFile, err := os.CreateTemp("", "testfile") - if err != nil { - t.Fatal(err) - } + r.NoError(t, err) + defer os.Remove(tempFile.Name()) // Write some data to the temporary file data := []byte("test data") _, err = tempFile.Write(data) - if err != nil { - t.Fatal(err) - } + r.NoError(t, err) // Close the file before deleting it err = tempFile.Close() - if err != nil { - t.Fatal(err) - } + r.NoError(t, err) // Call the Delete function with the temporary file path err = generic.Delete(tempFile.Name(), 3) - if err != nil { - t.Fatal(err) - } + r.NoError(t, err) // Check if the file has been deleted _, err = os.Stat(tempFile.Name()) @@ -41,47 +36,37 @@ func TestDelete(t *testing.T) { t.Errorf("expected file to be deleted, got error: %v", err) } } - -/* func TestOverwrite(t *testing.T) { // Create a temporary file for testing tempFile, err := os.CreateTemp("", "testfile") - if err != nil { - t.Fatal(err) - } + r.NoError(t, err) + defer os.Remove(tempFile.Name()) // Write some data to the temporary file - data := []byte("test data") + data, err := generic.CSPRNG(32) + r.NoError(t, err) + + expectedContents, err := generic.CSPRNG(32) + r.NoError(t, err) + _, err = tempFile.Write(data) - if err != nil { - t.Fatal(err) - } + r.NoError(t, err) // Close the file before overwriting it err = tempFile.Close() - if err != nil { - t.Fatal(err) - } + r.NoError(t, err) - expectedContents := []byte("new data") // Call the Overwrite function with the temporary file path - err = generic.Overwrite(tempFile.Name(), expectedContents, 3) - if err != nil { - t.Fatal(err) - } + err = generic.Overwrite(tempFile.Name(), expectedContents, 10) + r.NoError(t, err) // Read the contents of the file fileContents, err := generic.ReadFileContent(tempFile.Name()) - if err != nil { - t.Fatal(err) - } - - t.Log(string(fileContents)) + r.NoError(t, err) - // Check if the file has been overwritten correctly + // Check if the file contents have been overwritten if !bytes.Equal(fileContents, expectedContents) { t.Errorf("expected file contents to be %q, got %q", expectedContents, fileContents) } } -*/ diff --git a/generic/imports_exports.go b/generic/imports_exports.go new file mode 100644 index 0000000..8e04020 --- /dev/null +++ b/generic/imports_exports.go @@ -0,0 +1,169 @@ +package generic + +import ( + "crypto" + "crypto/ed25519" + "crypto/x509" + "encoding/pem" + "errors" +) + +type ImportExport interface { + Import() error + Export() error +} + +// PKIX is a generic struct for import and export Ed25519 public key +// One way to import: +// 1. ImportData (string) -> PublicKey (ed25519.PublicKey) +// Two ways to export: +// 1. PublicKey (ed25519.PublicKey) -> ExportData (string) +// 2. ExportPublicKey (crypto.PublicKey) -> ExportData (string) +type PKIX struct { + PublicKey ed25519.PublicKey + ImportData string + ExportData string + ExportPublicKey crypto.PublicKey + Encoder Encoder +} + +type PKCS struct { + SecretKey ed25519.PrivateKey + ImportData string + ExportData string + Encoder Encoder +} + +// struct PKIX ImportData (string) -> struct PKIX PublicKey (ed25519.PublicKey) +func (e *PKIX) Import() error { + + if e.ImportData == "" { + return errors.New("import data is empty") + } + + var err error + var data []byte + if e.Encoder == nil { + data = []byte(e.ImportData) + } else { + data, err = e.Encoder.Decode(e.ImportData) + if err != nil { + return err + } + } + + pemBlock, rest := pem.Decode(data) + if len(rest) != 0 { + return errors.New("invalid pem block") + } + + pkRaw, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return err + } + // nolint:errcheck + pkC := pkRaw.(crypto.PublicKey) + // nolint:errcheck + e.PublicKey = pkC.(ed25519.PublicKey) + return nil +} +// Two ways to export: +// 1. struct PKIX PublicKey (ed25519.PublicKey) -> struct PKIX ExportData (string) +// 2. struct PKIX ExportPublicKey (crypto.PublicKey) -> struct PKIX ExportData (string) +func (e *PKIX) Export() error { + + if e.ExportPublicKey == nil && e.PublicKey == nil { + return errors.New("missing public key") + } + + if e.ExportPublicKey != nil && e.PublicKey != nil { + return errors.New("cannot export both public key and export public key") + } + + var err error + var marshal []byte + if e.ExportPublicKey != nil { + marshal, err = x509.MarshalPKIXPublicKey(e.ExportPublicKey) + if err != nil { + return err + } + } else { + marshal, err = x509.MarshalPKIXPublicKey(e.PublicKey) + if err != nil { + return err + } + } + + block := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: marshal, + } + + if e.Encoder == nil { + e.ExportData = string(pem.EncodeToMemory(block)) + } else { + e.ExportData = e.Encoder.Encode(pem.EncodeToMemory(block)) + } + + return nil +} + +// struct PKCS ImportData (string) -> struct PKCS SecretKey (ed25519.PrivateKey) +func (e *PKCS) Import() error { + + if e.ImportData == "" { + return errors.New("import data is empty") + } + + var err error + var data []byte + if e.Encoder == nil { + data = []byte(e.ImportData) + } else { + data, err = e.Encoder.Decode(e.ImportData) + if err != nil { + return err + } + } + + pemBlock, rest := pem.Decode(data) + if len(rest) != 0 { + return errors.New("invalid pem block") + } + + pkRaw, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes) + if err != nil { + return err + } + // nolint:errcheck + pkC := pkRaw.(crypto.PublicKey) + // nolint:errcheck + e.SecretKey = pkC.(ed25519.PrivateKey) + return nil +} + +// struct PKCS SecretKey (ed25519.PrivateKey) -> struct PKCS ExportData (string) +func (e *PKCS) Export() error { + + if e.SecretKey == nil { + return errors.New("missing secret key") + } + + b, err := x509.MarshalPKCS8PrivateKey(e.SecretKey) + if err != nil { + return err + } + + block := &pem.Block{ + Type: "PRIVATE KEY", + Bytes: b, + } + + if e.Encoder == nil { + e.ExportData = string(pem.EncodeToMemory(block)) + } else { + e.ExportData = e.Encoder.Encode(pem.EncodeToMemory(block)) + } + + return nil +} diff --git a/generic/imports_exports_test.go b/generic/imports_exports_test.go new file mode 100644 index 0000000..9615bca --- /dev/null +++ b/generic/imports_exports_test.go @@ -0,0 +1,335 @@ +package generic_test + +import ( + "strings" + "testing" + + "github.com/D3vl0per/crypt/asymmetric" + "github.com/D3vl0per/crypt/generic" + r "github.com/stretchr/testify/require" +) + +func TestPKIX(t *testing.T) { + asym := asymmetric.Ed25519{} + err := asym.Generate() + t.Log("Public Key", asym.PublicKey) + t.Log("Secret Key", asym.SecretKey) + r.NoError(t, err) + + tests := []struct { + name string + pkix generic.PKIX + }{ + { + name: "Raw keys", + pkix: generic.PKIX{ + ExportPublicKey: asym.SecretKey.Public(), + }, + }, + { + name: "Raw keys with ed25519.PublicKey", + pkix: generic.PKIX{ + ExportPublicKey: asym.PublicKey, + }, + }, + { + name: "Hex encoded keys", + pkix: generic.PKIX{ + ExportPublicKey: asym.SecretKey.Public(), + Encoder: &generic.Hex{}, + }, + }, + { + name: "Hex encoded keys with ed25519.PublicKey", + pkix: generic.PKIX{ + ExportPublicKey: asym.PublicKey, + Encoder: &generic.Hex{}, + }, + }, + { + name: "Base64 encoded keys", + pkix: generic.PKIX{ + ExportPublicKey: asym.SecretKey.Public(), + Encoder: &generic.Base64{}, + }, + }, + { + name: "Base64 encoded keys with ed25519.PublicKey", + pkix: generic.PKIX{ + ExportPublicKey: asym.PublicKey, + Encoder: &generic.Base64{}, + }, + }, + { + name: "UrlBase64 encoded keys", + pkix: generic.PKIX{ + ExportPublicKey: asym.SecretKey.Public(), + Encoder: &generic.UrlBase64{}, + }, + }, + { + name: "UrlBase64 encoded keys with ed25519.PublicKey", + pkix: generic.PKIX{ + ExportPublicKey: asym.PublicKey, + Encoder: &generic.UrlBase64{}, + }, + }, + { + name: "RawUrlBase64 encoded keys", + pkix: generic.PKIX{ + ExportPublicKey: asym.SecretKey.Public(), + Encoder: &generic.RawUrlBase64{}, + }, + }, + { + name: "RawUrlBase64 encoded keys with ed25519.PublicKey", + pkix: generic.PKIX{ + ExportPublicKey: asym.PublicKey, + Encoder: &generic.RawUrlBase64{}, + }, + }, + { + name: "RawBase64 encoded keys", + pkix: generic.PKIX{ + ExportPublicKey: asym.SecretKey.Public(), + Encoder: &generic.RawBase64{}, + }, + }, + { + name: "RawBase64 encoded keys with ed25519.PublicKey", + pkix: generic.PKIX{ + ExportPublicKey: asym.PublicKey, + Encoder: &generic.RawBase64{}, + }, + }, + { + name: "Base32 encoded keys", + pkix: generic.PKIX{ + ExportPublicKey: asym.SecretKey.Public(), + Encoder: &generic.Base32{}, + }, + }, + { + name: "Base32 encoded keys with ed25519.PublicKey", + pkix: generic.PKIX{ + ExportPublicKey: asym.PublicKey, + Encoder: &generic.Base32{}, + }, + }, + { + name: "PaddinglessBase32 encoded keys", + pkix: generic.PKIX{ + ExportPublicKey: asym.SecretKey.Public(), + Encoder: &generic.PaddinglessBase32{}, + }, + }, + { + name: "PaddinglessBase32 encoded keys with ed25519.PublicKey", + pkix: generic.PKIX{ + ExportPublicKey: asym.PublicKey, + Encoder: &generic.PaddinglessBase32{}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err = tt.pkix.Export() + r.NoError(t, err) + t.Log("PKIX wrapped Ed25519 Public Key", tt.pkix.ExportData) + + pkix2 := generic.PKIX{ + ImportData: tt.pkix.ExportData, + Encoder: tt.pkix.Encoder, + } + + err = pkix2.Import() + r.NoError(t, err) + r.Equal(t, asym.PublicKey, pkix2.PublicKey) + }) + } +} + +func TestFailPKIX(t *testing.T) { + asym := asymmetric.Ed25519{} + err := asym.Generate() + t.Log("Public Key", asym.PublicKey) + t.Log("Secret Key", asym.SecretKey) + r.NoError(t, err) + + tests := []struct { + name string + pkix generic.PKIX + expectedError string + }{ + { + name: "No keys export", + pkix: generic.PKIX{}, + expectedError: "missing public key", + }, + { + name: "Double key export", + pkix: generic.PKIX{ + PublicKey: asym.PublicKey, + ExportPublicKey: asym.SecretKey.Public(), + }, + expectedError: "cannot export both public key and export public key", + }, + { + name: "Wrong data import", + pkix: generic.PKIX{ + ImportData: "wrong data", + }, + expectedError: "invalid pem block", + }, + { + name: "Missing import data", + pkix: generic.PKIX{ + ImportData: "", + }, + expectedError: "import data is empty", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if strings.Contains(test.name, "export") { + err = test.pkix.Export() + r.Error(t, err) + r.EqualError(t, err, test.expectedError) + } + if strings.Contains(test.name, "import") { + err = test.pkix.Import() + r.Error(t, err) + r.EqualError(t, err, test.expectedError) + } + }) + } +} + +func TestPKCS(t *testing.T) { + asym := asymmetric.Ed25519{} + err := asym.Generate() + t.Log("Public Key", asym.PublicKey) + t.Log("Secret Key", asym.SecretKey) + r.NoError(t, err) + + tests := []struct { + name string + pkcs generic.PKCS + }{ + { + name: "Raw keys", + pkcs: generic.PKCS{ + SecretKey: asym.SecretKey, + }, + }, + { + name: "Hex encoded keys", + pkcs: generic.PKCS{ + SecretKey: asym.SecretKey, + Encoder: &generic.Hex{}, + }, + }, + { + name: "Base64 encoded keys", + pkcs: generic.PKCS{ + SecretKey: asym.SecretKey, + Encoder: &generic.Base64{}, + }, + }, + { + name: "UrlBase64 encoded keys", + pkcs: generic.PKCS{ + SecretKey: asym.SecretKey, + Encoder: &generic.UrlBase64{}, + }, + }, + { + name: "RawUrlBase64 encoded keys", + pkcs: generic.PKCS{ + SecretKey: asym.SecretKey, + Encoder: &generic.RawUrlBase64{}, + }, + }, + { + name: "RawBase64 encoded keys", + pkcs: generic.PKCS{ + SecretKey: asym.SecretKey, + Encoder: &generic.RawBase64{}, + }, + }, + { + name: "Base32 encoded keys", + pkcs: generic.PKCS{ + SecretKey: asym.SecretKey, + Encoder: &generic.Base32{}, + }, + }, + { + name: "PaddinglessBase32 encoded keys", + pkcs: generic.PKCS{ + SecretKey: asym.SecretKey, + Encoder: &generic.PaddinglessBase32{}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + err = tt.pkcs.Export() + r.NoError(t, err) + t.Log("PKCS wrapped Ed25519 Secret Key", tt.pkcs.ExportData) + + pkcs2 := generic.PKCS{ + ImportData: tt.pkcs.ExportData, + Encoder: tt.pkcs.Encoder, + } + + err = pkcs2.Import() + r.NoError(t, err) + r.Equal(t, asym.SecretKey, pkcs2.SecretKey) + }) + } +} + +func TestFailPKCS(t *testing.T) { + asym := asymmetric.Ed25519{} + err := asym.Generate() + t.Log("Public Key", asym.PublicKey) + t.Log("Secret Key", asym.SecretKey) + r.NoError(t, err) + + tests := []struct { + name string + pkcs generic.PKCS + expectedError string + }{ + { + name: "No keys export", + pkcs: generic.PKCS{}, + expectedError: "missing secret key", + }, + { + name: "Missing import data", + pkcs: generic.PKCS{}, + expectedError: "import data is empty", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if strings.Contains(test.name, "export") { + err = test.pkcs.Export() + r.Error(t, err) + r.EqualError(t, err, test.expectedError) + } + if strings.Contains(test.name, "import") { + err = test.pkcs.Import() + r.Error(t, err) + r.EqualError(t, err, test.expectedError) + } + }) + } +} diff --git a/generic/ports.go b/generic/ports.go deleted file mode 100644 index 47f9af9..0000000 --- a/generic/ports.go +++ /dev/null @@ -1,71 +0,0 @@ -package generic - -import ( - "crypto" - "crypto/ed25519" - "crypto/x509" - "encoding/hex" - "encoding/pem" -) - -func ExportECCPK(pk crypto.PublicKey) (string, error) { - b, err := x509.MarshalPKIXPublicKey(pk) - if err != nil { - return "", err - } - - block := &pem.Block{ - Type: "PUBLIC KEY", - Bytes: b, - } - return hex.EncodeToString(pem.EncodeToMemory(block)), nil -} - -func ExportECCSK(sk ed25519.PrivateKey) (string, error) { - b, err := x509.MarshalPKCS8PrivateKey(sk) - if err != nil { - return "", err - } - - block := &pem.Block{ - Type: "PRIVATE KEY", - Bytes: b, - } - return hex.EncodeToString(pem.EncodeToMemory(block)), nil -} - -func ImportECCPK(pk string) (ed25519.PublicKey, error) { - pkPem, err := hex.DecodeString(pk) - if err != nil { - return ed25519.PublicKey{}, err - } - - pemBlock, _ := pem.Decode(pkPem) - - pkRaw, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) - if err != nil { - return ed25519.PublicKey{}, err - } - // nolint:errcheck - pkC := pkRaw.(crypto.PublicKey) - // nolint:errcheck - return pkC.(ed25519.PublicKey), nil -} - -func ImportECCSK(sk string) (ed25519.PublicKey, error) { - skPem, err := hex.DecodeString(sk) - if err != nil { - return ed25519.PublicKey{}, err - } - - pemBlock, _ := pem.Decode(skPem) - - pkRaw, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes) - if err != nil { - return ed25519.PublicKey{}, err - } - // nolint:errcheck - pkC := pkRaw.(crypto.PublicKey) - // nolint:errcheck - return pkC.(ed25519.PublicKey), nil -} diff --git a/hash/fs.go b/hash/fs.go index b5f72d2..c8d7d79 100644 --- a/hash/fs.go +++ b/hash/fs.go @@ -13,3 +13,15 @@ func ReadFileContentAndHash(algo Algorithms, path string) ([]byte, error) { } return hash, nil } + +func ReadFileContentAndHmac(algo Algorithms, path string) ([]byte, error) { + content, err := generic.ReadFileContent(path) + if err != nil { + return []byte{}, err + } + hash, err := algo.Hmac(content) + if err != nil { + return []byte{}, err + } + return hash, nil +} \ No newline at end of file diff --git a/hash/hash.go b/hash/hash.go index 942b401..66cd911 100644 --- a/hash/hash.go +++ b/hash/hash.go @@ -1,6 +1,7 @@ package hash import ( + "errors" "hash" "github.com/D3vl0per/crypt/generic" @@ -8,23 +9,47 @@ import ( "golang.org/x/crypto/sha3" ) +var ( + ErrHmacSecretNil = errors.New("HMAC secret is nil") +) + type Algorithms interface { + // data Hash([]byte) ([]byte, error) + // plaintext, expectedHash ValidateHash([]byte, []byte) (bool, error) - Hmac([]byte, []byte) ([]byte, error) - ValidateHmac([]byte, []byte, []byte) (bool, error) + // data + Hmac([]byte) ([]byte, error) + // data, expectedHash + ValidateHmac([]byte, []byte) (bool, error) } -type Blake2b256 struct{} -type Blake2b384 struct{} -type Blake2b512 struct{} +type Blake2b256 struct { + HmacSecret []byte +} +type Blake2b384 struct { + HmacSecret []byte +} +type Blake2b512 struct { + HmacSecret []byte +} -type Sha3256 struct{} -type Sha3384 struct{} -type Sha3512 struct{} +type Sha3256 struct { + HmacSecret []byte +} +type Sha3384 struct { + HmacSecret []byte +} +type Sha3512 struct { + HmacSecret []byte +} -type Shake128 struct{} -type Shake256 struct{} +type Shake128 struct { + HmacSecret []byte +} +type Shake256 struct { + HmacSecret []byte +} /// /// Blake2b-256 @@ -43,12 +68,18 @@ func (b *Blake2b256) ValidateHash(plaintext, expectedHash []byte) (bool, error) return generic.Compare(hashed, expectedHash), nil } -func (b *Blake2b256) Hmac(key, data []byte) ([]byte, error) { - return hashBlake2b(blake2b.Size256, key, data) +func (b *Blake2b256) Hmac(data []byte) ([]byte, error) { + if b.HmacSecret == nil { + return nil, ErrHmacSecretNil + } + return hashBlake2b(blake2b.Size256, b.HmacSecret, data) } -func (b *Blake2b256) ValidateHmac(key, data, expectedHash []byte) (bool, error) { - hashed, err := hashBlake2b(blake2b.Size256, key, data) +func (b *Blake2b256) ValidateHmac(data, expectedHash []byte) (bool, error) { + if b.HmacSecret == nil { + return false, ErrHmacSecretNil + } + hashed, err := hashBlake2b(blake2b.Size256, b.HmacSecret, data) if err != nil { return false, err } @@ -73,12 +104,18 @@ func (b *Blake2b384) ValidateHash(plaintext, expectedHash []byte) (bool, error) return generic.Compare(hashed, expectedHash), nil } -func (b *Blake2b384) Hmac(key, data []byte) ([]byte, error) { - return hashBlake2b(blake2b.Size384, key, data) +func (b *Blake2b384) Hmac(data []byte) ([]byte, error) { + if b.HmacSecret == nil { + return nil, ErrHmacSecretNil + } + return hashBlake2b(blake2b.Size384, b.HmacSecret, data) } -func (b *Blake2b384) ValidateHmac(key, data, expectedHash []byte) (bool, error) { - hashed, err := hashBlake2b(blake2b.Size384, key, data) +func (b *Blake2b384) ValidateHmac(data, expectedHash []byte) (bool, error) { + if b.HmacSecret == nil { + return false, ErrHmacSecretNil + } + hashed, err := hashBlake2b(blake2b.Size384, b.HmacSecret, data) if err != nil { return false, err } @@ -103,12 +140,18 @@ func (b *Blake2b512) ValidateHash(plaintext, expectedHash []byte) (bool, error) return generic.Compare(hashed, expectedHash), nil } -func (b *Blake2b512) Hmac(key, data []byte) ([]byte, error) { - return hashBlake2b(blake2b.Size, key, data) +func (b *Blake2b512) Hmac(data []byte) ([]byte, error) { + if b.HmacSecret == nil { + return nil, ErrHmacSecretNil + } + return hashBlake2b(blake2b.Size, b.HmacSecret, data) } -func (b *Blake2b512) ValidateHmac(key, data, expectedHash []byte) (bool, error) { - hashed, err := hashBlake2b(blake2b.Size, key, data) +func (b *Blake2b512) ValidateHmac(data, expectedHash []byte) (bool, error) { + if b.HmacSecret == nil { + return false, ErrHmacSecretNil + } + hashed, err := hashBlake2b(blake2b.Size, b.HmacSecret, data) if err != nil { return false, err } @@ -155,12 +198,18 @@ func (s *Sha3256) ValidateHash(plaintext, expectedHash []byte) (bool, error) { return generic.Compare(hashed, expectedHash), nil } -func (s *Sha3256) Hmac(key, data []byte) ([]byte, error) { - return hashSha3256(key, data) +func (s *Sha3256) Hmac(data []byte) ([]byte, error) { + if s.HmacSecret == nil { + return nil, ErrHmacSecretNil + } + return hashSha3256(s.HmacSecret, data) } -func (s *Sha3256) ValidateHmac(key, data, expectedHash []byte) (bool, error) { - hashed, err := hashSha3256(key, data) +func (s *Sha3256) ValidateHmac(data, expectedHash []byte) (bool, error) { + if s.HmacSecret == nil { + return false, ErrHmacSecretNil + } + hashed, err := hashSha3256(s.HmacSecret, data) if err != nil { return false, err } @@ -208,12 +257,18 @@ func (s *Sha3384) ValidateHash(plaintext, expectedHash []byte) (bool, error) { return generic.Compare(hashed, expectedHash), nil } -func (s *Sha3384) Hmac(key, data []byte) ([]byte, error) { - return hashSha3384(key, data) +func (s *Sha3384) Hmac(data []byte) ([]byte, error) { + if s.HmacSecret == nil { + return nil, ErrHmacSecretNil + } + return hashSha3384(s.HmacSecret, data) } -func (s *Sha3384) ValidateHmac(key, data, expectedHash []byte) (bool, error) { - hashed, err := hashSha3384(key, data) +func (s *Sha3384) ValidateHmac(data, expectedHash []byte) (bool, error) { + if s.HmacSecret == nil { + return false, ErrHmacSecretNil + } + hashed, err := hashSha3384(s.HmacSecret, data) if err != nil { return false, err } @@ -261,12 +316,18 @@ func (s *Sha3512) ValidateHash(plaintext, expectedHash []byte) (bool, error) { return generic.Compare(hashed, expectedHash), nil } -func (s *Sha3512) Hmac(key, data []byte) ([]byte, error) { - return hashSha3512(key, data) +func (s *Sha3512) Hmac(data []byte) ([]byte, error) { + if s.HmacSecret == nil { + return nil, ErrHmacSecretNil + } + return hashSha3512(s.HmacSecret, data) } -func (s *Sha3512) ValidateHmac(key, data, expectedHash []byte) (bool, error) { - hashed, err := hashSha3512(key, data) +func (s *Sha3512) ValidateHmac(data, expectedHash []byte) (bool, error) { + if s.HmacSecret == nil { + return false, ErrHmacSecretNil + } + hashed, err := hashSha3512(s.HmacSecret, data) if err != nil { return false, err } diff --git a/hash/hash_test.go b/hash/hash_test.go index cb7c005..71af3e2 100644 --- a/hash/hash_test.go +++ b/hash/hash_test.go @@ -3,6 +3,7 @@ package hash_test import ( // "encoding/hex" "encoding/hex" + "strings" "testing" hasher "github.com/D3vl0per/crypt/hash" @@ -17,21 +18,20 @@ func TestBlakes(t *testing.T) { algo hasher.Algorithms data []byte expected []byte - key []byte }{ { name: "Blake2b256", algo: &hasher.Blake2b256{}, data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - key: nil, //nolint:lll expected: []byte{32, 109, 96, 136, 177, 62, 96, 1, 20, 103, 183, 90, 60, 235, 88, 246, 192, 122, 156, 107, 186, 36, 51, 3, 141, 52, 76, 81, 98, 229, 179, 237}, }, { name: "Blake2b256 HMAC", - algo: &hasher.Blake2b256{}, + algo: &hasher.Blake2b256{ + HmacSecret: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + }, data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), //nolint:lll expected: []byte{141, 216, 41, 55, 227, 130, 65, 74, 238, 19, 155, 174, 22, 46, 103, 68, 212, 184, 176, 225, 176, 182, 94, 11, 128, 55, 85, 127, 136, 105, 14, 169}, }, @@ -39,15 +39,15 @@ func TestBlakes(t *testing.T) { name: "Blake2b384", algo: &hasher.Blake2b384{}, data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - key: nil, //nolint:lll expected: []byte{50, 67, 235, 235, 146, 164, 58, 187, 5, 182, 182, 179, 132, 31, 200, 27, 68, 50, 83, 71, 221, 131, 86, 164, 203, 194, 251, 64, 172, 45, 105, 200, 90, 118, 50, 47, 37, 237, 28, 153, 88, 166, 95, 221, 138, 249, 176, 116}, }, { name: "Blake2b384 HMAC", - algo: &hasher.Blake2b384{}, + algo: &hasher.Blake2b384{ + HmacSecret: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + }, data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), //nolint:lll expected: []byte{2, 58, 228, 49, 225, 253, 51, 171, 34, 190, 207, 112, 186, 131, 0, 65, 58, 117, 119, 182, 72, 69, 151, 185, 128, 227, 180, 137, 5, 39, 172, 99, 21, 102, 79, 245, 62, 180, 104, 244, 218, 233, 60, 57, 161, 15, 31, 169}, }, @@ -55,15 +55,15 @@ func TestBlakes(t *testing.T) { name: "Blake2b512", algo: &hasher.Blake2b512{}, data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - key: nil, //nolint:lll expected: []byte{119, 40, 57, 190, 223, 104, 229, 179, 37, 38, 116, 236, 59, 79, 64, 38, 242, 100, 128, 101, 147, 40, 14, 159, 186, 100, 251, 182, 206, 58, 244, 200, 26, 133, 123, 65, 131, 213, 220, 248, 152, 111, 73, 93, 126, 181, 139, 26, 48, 40, 254, 156, 254, 108, 19, 47, 92, 67, 209, 60, 127, 148, 155, 39}, }, { name: "Blake2b512 HMAC", - algo: &hasher.Blake2b512{}, + algo: &hasher.Blake2b512{ + HmacSecret: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + }, data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), //nolint:lll expected: []byte{216, 241, 59, 128, 75, 177, 73, 147, 208, 198, 138, 37, 187, 128, 230, 173, 60, 117, 96, 33, 223, 55, 143, 219, 51, 47, 108, 67, 98, 0, 159, 197, 24, 112, 56, 191, 150, 82, 9, 225, 89, 0, 213, 168, 81, 69, 18, 10, 189, 249, 143, 31, 55, 119, 242, 126, 205, 253, 41, 158, 156, 30, 188, 105}, }, @@ -71,7 +71,6 @@ func TestBlakes(t *testing.T) { name: "SHA3-256", algo: &hasher.Sha3256{}, data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - key: nil, //nolint:lll expected: []byte{195, 62, 41, 181, 107, 38, 110, 3, 129, 21, 52, 217, 117, 49, 247, 163, 218, 89, 94, 205, 254, 161, 207, 196, 114, 73, 155, 161, 61, 38, 229, 59}, }, @@ -79,7 +78,6 @@ func TestBlakes(t *testing.T) { name: "SHA3-384", algo: &hasher.Sha3384{}, data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - key: nil, //nolint:lll expected: []byte{13, 164, 89, 48, 108, 199, 207, 244, 184, 228, 229, 210, 233, 175, 29, 85, 79, 200, 21, 45, 82, 193, 210, 227, 195, 78, 6, 230, 102, 127, 126, 121, 118, 120, 44, 105, 214, 238, 75, 46, 166, 133, 61, 161, 228, 2, 6, 46}, }, @@ -87,7 +85,6 @@ func TestBlakes(t *testing.T) { name: "SHA3-512", algo: &hasher.Sha3512{}, data: []byte("m82yeNhzBX6xKmyTqW70M4Cw9bNaZYYYRxbYgFSSXQG7hDPvQx2Q7anSWTgCshvh"), - key: nil, //nolint:lll expected: []byte{125, 21, 172, 36, 13, 53, 250, 136, 28, 214, 188, 8, 227, 249, 19, 86, 128, 200, 212, 106, 225, 41, 67, 3, 81, 115, 58, 187, 209, 129, 44, 191, 163, 205, 134, 207, 246, 127, 72, 31, 9, 11, 33, 184, 131, 16, 44, 152, 2, 55, 71, 215, 195, 73, 233, 147, 80, 13, 79, 131, 146, 100, 38, 202}, }, @@ -95,22 +92,22 @@ func TestBlakes(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - if test.key == nil { - hash, err := test.algo.Hash(test.data) + if strings.Contains(test.name, "HMAC") { + hash, err := test.algo.Hmac(test.data) r.NoError(t, err) t.Log("Hash: ", hex.EncodeToString(hash)) r.Equal(t, test.expected, hash) - validate, err := test.algo.ValidateHash(test.data, hash) + validate, err := test.algo.ValidateHmac(test.data, hash) r.NoError(t, err) r.True(t, validate) } else { - hash, err := test.algo.Hmac(test.key, test.data) + hash, err := test.algo.Hash(test.data) r.NoError(t, err) t.Log("Hash: ", hex.EncodeToString(hash)) r.Equal(t, test.expected, hash) - validate, err := test.algo.ValidateHmac(test.key, test.data, hash) + validate, err := test.algo.ValidateHash(test.data, hash) r.NoError(t, err) r.True(t, validate) } @@ -126,21 +123,20 @@ func TestFaultBlakes(t *testing.T) { algo hasher.Algorithms data []byte expected []byte - key []byte }{ { name: "Blake2b256", algo: &hasher.Blake2b256{}, data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - key: nil, //nolint:lll expected: []byte{32, 109, 96, 136, 177, 62, 96, 1, 20, 103, 183, 90, 60, 235, 88, 246, 192, 122, 156, 107, 186, 36, 51, 3, 141, 52, 76, 81, 98, 229, 179, 237}, }, { name: "Blake2b256 HMAC", - algo: &hasher.Blake2b256{}, + algo: &hasher.Blake2b256{ + HmacSecret: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + }, data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), //nolint:lll expected: []byte{141, 216, 41, 55, 227, 130, 65, 74, 238, 19, 155, 174, 22, 46, 103, 68, 212, 184, 176, 225, 176, 182, 94, 11, 128, 55, 85, 127, 136, 105, 14, 169}, }, @@ -148,15 +144,15 @@ func TestFaultBlakes(t *testing.T) { name: "Blake2b384", algo: &hasher.Blake2b384{}, data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - key: nil, //nolint:lll expected: []byte{50, 67, 235, 235, 146, 164, 58, 187, 5, 182, 182, 179, 132, 31, 200, 27, 68, 50, 83, 71, 221, 131, 86, 164, 203, 194, 251, 64, 172, 45, 105, 200, 90, 118, 50, 47, 37, 237, 28, 153, 88, 166, 95, 221, 138, 249, 176, 116}, }, { name: "Blake2b384 HMAC", - algo: &hasher.Blake2b384{}, + algo: &hasher.Blake2b384{ + HmacSecret: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + }, data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), //nolint:lll expected: []byte{2, 58, 228, 49, 225, 253, 51, 171, 34, 190, 207, 112, 186, 131, 0, 65, 58, 117, 119, 182, 72, 69, 151, 185, 128, 227, 180, 137, 5, 39, 172, 99, 21, 102, 79, 245, 62, 180, 104, 244, 218, 233, 60, 57, 161, 15, 31, 169}, }, @@ -164,15 +160,15 @@ func TestFaultBlakes(t *testing.T) { name: "Blake2b512", algo: &hasher.Blake2b512{}, data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - key: nil, //nolint:lll expected: []byte{119, 40, 57, 190, 223, 104, 229, 179, 37, 38, 116, 236, 59, 79, 64, 38, 242, 100, 128, 101, 147, 40, 14, 159, 186, 100, 251, 182, 206, 58, 244, 200, 26, 133, 123, 65, 131, 213, 220, 248, 152, 111, 73, 93, 126, 181, 139, 26, 48, 40, 254, 156, 254, 108, 19, 47, 92, 67, 209, 60, 127, 148, 155, 39}, }, { name: "Blake2b512 HMAC", - algo: &hasher.Blake2b512{}, + algo: &hasher.Blake2b512{ + HmacSecret: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), + }, data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - key: []byte("fa430a028a6cf6678b1d52d4959af4b78364b986ad08ba79e57d03f71a35d633"), //nolint:lll expected: []byte{216, 241, 59, 128, 75, 177, 73, 147, 208, 198, 138, 37, 187, 128, 230, 173, 60, 117, 96, 33, 223, 55, 143, 219, 51, 47, 108, 67, 98, 0, 159, 197, 24, 112, 56, 191, 150, 82, 9, 225, 89, 0, 213, 168, 81, 69, 18, 10, 189, 249, 143, 31, 55, 119, 242, 126, 205, 253, 41, 158, 156, 30, 188, 105}, }, @@ -180,7 +176,6 @@ func TestFaultBlakes(t *testing.T) { name: "SHA3-256", algo: &hasher.Sha3256{}, data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - key: nil, //nolint:lll expected: []byte{195, 62, 41, 181, 107, 38, 110, 3, 129, 21, 52, 217, 117, 49, 247, 163, 218, 89, 94, 205, 254, 161, 207, 196, 114, 73, 155, 161, 61, 38, 229, 59}, }, @@ -188,7 +183,6 @@ func TestFaultBlakes(t *testing.T) { name: "SHA3-384", algo: &hasher.Sha3384{}, data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - key: nil, //nolint:lll expected: []byte{13, 164, 89, 48, 108, 199, 207, 244, 184, 228, 229, 210, 233, 175, 29, 85, 79, 200, 21, 45, 82, 193, 210, 227, 195, 78, 6, 230, 102, 127, 126, 121, 118, 120, 44, 105, 214, 238, 75, 46, 166, 133, 61, 161, 228, 2, 6, 46}, }, @@ -196,7 +190,6 @@ func TestFaultBlakes(t *testing.T) { name: "SHA3-512", algo: &hasher.Sha3512{}, data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), - key: nil, //nolint:lll expected: []byte{125, 21, 172, 36, 13, 53, 250, 136, 28, 214, 188, 8, 227, 249, 19, 86, 128, 200, 212, 106, 225, 41, 67, 3, 81, 115, 58, 187, 209, 129, 44, 191, 163, 205, 134, 207, 246, 127, 72, 31, 9, 11, 33, 184, 131, 16, 44, 152, 2, 55, 71, 215, 195, 73, 233, 147, 80, 13, 79, 131, 146, 100, 38, 202}, }, @@ -204,16 +197,80 @@ func TestFaultBlakes(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - if test.key == nil { + if !strings.Contains(test.name, "HMAC") { validate, err := test.algo.ValidateHash(test.data, test.expected) r.NoError(t, err) r.False(t, validate) } else { - validate, err := test.algo.ValidateHmac(test.key, test.data, test.expected) + validate, err := test.algo.ValidateHmac(test.data, test.expected) r.NoError(t, err) r.False(t, validate) } }) } +} + +func TestNilKeyError(t *testing.T) { + + tests := []struct { + name string + algo hasher.Algorithms + data []byte + expected []byte + }{ + { + name: "Blake2b256 HMAC", + algo: &hasher.Blake2b256{}, + data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + //nolint:lll + expected: []byte{141, 216, 41, 55, 227, 130, 65, 74, 238, 19, 155, 174, 22, 46, 103, 68, 212, 184, 176, 225, 176, 182, 94, 11, 128, 55, 85, 127, 136, 105, 14, 169}, + }, + { + name: "Blake2b384 HMAC", + algo: &hasher.Blake2b384{}, + data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + //nolint:lll + expected: []byte{2, 58, 228, 49, 225, 253, 51, 171, 34, 190, 207, 112, 186, 131, 0, 65, 58, 117, 119, 182, 72, 69, 151, 185, 128, 227, 180, 137, 5, 39, 172, 99, 21, 102, 79, 245, 62, 180, 104, 244, 218, 233, 60, 57, 161, 15, 31, 169}, + }, + { + name: "Blake2b512 HMAC", + algo: &hasher.Blake2b512{}, + data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + //nolint:lll + expected: []byte{216, 241, 59, 128, 75, 177, 73, 147, 208, 198, 138, 37, 187, 128, 230, 173, 60, 117, 96, 33, 223, 55, 143, 219, 51, 47, 108, 67, 98, 0, 159, 197, 24, 112, 56, 191, 150, 82, 9, 225, 89, 0, 213, 168, 81, 69, 18, 10, 189, 249, 143, 31, 55, 119, 242, 126, 205, 253, 41, 158, 156, 30, 188, 105}, + }, + { + name: "SHA3-256 HMAC", + algo: &hasher.Sha3256{}, + data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + //nolint:lll + expected: []byte{195, 62, 41, 181, 107, 38, 110, 3, 129, 21, 52, 217, 117, 49, 247, 163, 218, 89, 94, 205, 254, 161, 207, 196, 114, 73, 155, 161, 61, 38, 229, 59}, + }, + { + name: "SHA3-384 HMAC", + algo: &hasher.Sha3384{}, + data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + //nolint:lll + expected: []byte{13, 164, 89, 48, 108, 199, 207, 244, 184, 228, 229, 210, 233, 175, 29, 85, 79, 200, 21, 45, 82, 193, 210, 227, 195, 78, 6, 230, 102, 127, 126, 121, 118, 120, 44, 105, 214, 238, 75, 46, 166, 133, 61, 161, 228, 2, 6, 46}, + }, + { + name: "SHA3-512 HMAC", + algo: &hasher.Sha3512{}, + data: []byte("aing7jei3eebeaMohjeesheeph0ichaiXual4vah1Eeg3eikai7aichoeliej1da"), + //nolint:lll + expected: []byte{125, 21, 172, 36, 13, 53, 250, 136, 28, 214, 188, 8, 227, 249, 19, 86, 128, 200, 212, 106, 225, 41, 67, 3, 81, 115, 58, 187, 209, 129, 44, 191, 163, 205, 134, 207, 246, 127, 72, 31, 9, 11, 33, 184, 131, 16, 44, 152, 2, 55, 71, 215, 195, 73, 233, 147, 80, 13, 79, 131, 146, 100, 38, 202}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + hash, err := test.algo.Hmac(test.data) + r.ErrorIs(t, err, hasher.ErrHmacSecretNil) + r.Len(t, hash, 0) + validate, err := test.algo.ValidateHmac(test.data, test.expected) + r.ErrorIs(t, err, hasher.ErrHmacSecretNil) + r.False(t, validate) + }) + } } diff --git a/insecure/symmetric/symmetric.go b/insecure/symmetric/symmetric.go index a3d944e..d3d40e2 100644 --- a/insecure/symmetric/symmetric.go +++ b/insecure/symmetric/symmetric.go @@ -2,6 +2,7 @@ package symmetric import ( "crypto/aes" + "crypto/subtle" "errors" "github.com/D3vl0per/crypt/generic" @@ -14,7 +15,7 @@ func EncryptSecretBox(secret, plaintext []byte) ([]byte, error) { } var secretKey [32]byte - copy(secretKey[:], secret) + subtle.ConstantTimeCopy(1, secretKey[:], secret) nonce_raw, err := generic.CSPRNG(24) if err != nil { @@ -22,7 +23,7 @@ func EncryptSecretBox(secret, plaintext []byte) ([]byte, error) { } var nonce [24]byte - copy(nonce[:], nonce_raw) + subtle.ConstantTimeCopy(1, nonce[:], nonce_raw) return secretbox.Seal(nonce[:], plaintext, &nonce, &secretKey), nil } @@ -33,10 +34,10 @@ func DecryptSecretBox(secret, ciphertext []byte) ([]byte, error) { } var secretKey [32]byte - copy(secretKey[:], secret) + subtle.ConstantTimeCopy(1, secretKey[:], secret) var nonce [24]byte - copy(nonce[:], ciphertext[:24]) + subtle.ConstantTimeCopy(1, nonce[:], ciphertext[:24]) decrypted, ok := secretbox.Open(nil, ciphertext[24:], &nonce, &secretKey) if !ok { diff --git a/symmetric/symmetric.go b/symmetric/symmetric.go index 992257e..578699a 100644 --- a/symmetric/symmetric.go +++ b/symmetric/symmetric.go @@ -1,6 +1,7 @@ package symmetric import ( + "crypto/subtle" "errors" "hash" "io" @@ -30,9 +31,9 @@ type XChaCha20Stream struct { Hash func() hash.Hash } -// / -// / XChaCha20-Poly1305 -// / +/// +/// XChaCha20-Poly1305 +/// func (x *XChaCha20) Encrypt(key, plaintext []byte) ([]byte, error) { if len(key) != chacha20poly1305.KeySize { return []byte{}, errors.New("wrong key size") @@ -74,23 +75,26 @@ func (x *XChaCha20) Decrypt(key, ciphertext []byte) ([]byte, error) { return plaintext, nil } -// / -// / XOR -// / +/// +/// XOR +/// func (x *Xor) Encrypt(key, payload []byte) ([]byte, error) { if len(payload) != len(key) { - return []byte{}, errors.New("insecure xor operation, key and payload length need to equal") + return []byte{}, errors.New("insecure xor operation, key and payload length need to be equal") } + xored := make([]byte, len(payload)) - for i := range payload { - xored[i] = payload[i] ^ key[i] - } + subtle.XORBytes(xored, payload, key) if len(payload) != len(xored) || len(key) != len(xored) { return []byte{}, errors.New("xored array length mismatch") } + if generic.AllZero(xored) { + return []byte{}, errors.New("xored array has just zeroes") + } + return xored, nil } @@ -98,6 +102,9 @@ func (x *Xor) Decrypt(key, payload []byte) ([]byte, error) { return x.Encrypt(key, payload) } +/// +/// XChaCha20-Poly1305 Age Stream +/// func (x *XChaCha20Stream) Encrypt(in io.Reader, out io.Writer) error { if len(x.Key) != chacha20poly1305.KeySize { return errors.New("wrong key size") From 0251304794137be2775164e6becef8c1e654bed3 Mon Sep 17 00:00:00 2001 From: D3v Date: Wed, 22 Nov 2023 00:17:50 +0100 Subject: [PATCH 08/12] Missing make lint --- asymmetric/asymmetric_test.go | 6 ++---- compression/compression_test.go | 4 ++-- generic/fs.go | 4 ++-- generic/imports_exports.go | 15 ++++++++------- hash/fs.go | 2 +- symmetric/symmetric.go | 19 +++++++++---------- 6 files changed, 24 insertions(+), 26 deletions(-) diff --git a/asymmetric/asymmetric_test.go b/asymmetric/asymmetric_test.go index 68d66eb..badce1b 100644 --- a/asymmetric/asymmetric_test.go +++ b/asymmetric/asymmetric_test.go @@ -47,7 +47,7 @@ func TestE2EEEd25519SignVerify(t *testing.T) { msg := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit.") tests := []struct { - name string + name string asym asymmetric.Ed25519 }{ { @@ -97,7 +97,6 @@ func TestE2EEEd25519SignVerify(t *testing.T) { }, } - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := tt.asym.Generate() @@ -132,7 +131,7 @@ func TestE2EEEd448SignVerify(t *testing.T) { msg := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit.") tests := []struct { - name string + name string asym asymmetric.Ed448 }{ { @@ -182,7 +181,6 @@ func TestE2EEEd448SignVerify(t *testing.T) { }, } - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := tt.asym.Generate() diff --git a/compression/compression_test.go b/compression/compression_test.go index 5821588..fa2f3b4 100644 --- a/compression/compression_test.go +++ b/compression/compression_test.go @@ -31,8 +31,8 @@ func TestRoundTrips(t *testing.T) { zstdModes := []int{11, 7, 3, 1} tests := []struct { - name string - data []byte + name string + data []byte }{ { name: "Random data", diff --git a/generic/fs.go b/generic/fs.go index b2d0ba6..e069ec0 100644 --- a/generic/fs.go +++ b/generic/fs.go @@ -152,7 +152,7 @@ func ReadFileContent(path string) ([]byte, error) { return data, nil } -func WriteAndFlush(file *os.File, rnd []byte) (n int, err error){ +func WriteAndFlush(file *os.File, rnd []byte) (n int, err error) { n, err = file.Write(rnd) if err != nil { return 0, err @@ -170,4 +170,4 @@ func WriteAndFlush(file *os.File, rnd []byte) (n int, err error){ return 0, err } return n, nil -} \ No newline at end of file +} diff --git a/generic/imports_exports.go b/generic/imports_exports.go index 8e04020..a4998ed 100644 --- a/generic/imports_exports.go +++ b/generic/imports_exports.go @@ -28,10 +28,10 @@ type PKIX struct { } type PKCS struct { - SecretKey ed25519.PrivateKey - ImportData string - ExportData string - Encoder Encoder + SecretKey ed25519.PrivateKey + ImportData string + ExportData string + Encoder Encoder } // struct PKIX ImportData (string) -> struct PKIX PublicKey (ed25519.PublicKey) @@ -67,19 +67,20 @@ func (e *PKIX) Import() error { e.PublicKey = pkC.(ed25519.PublicKey) return nil } + // Two ways to export: // 1. struct PKIX PublicKey (ed25519.PublicKey) -> struct PKIX ExportData (string) // 2. struct PKIX ExportPublicKey (crypto.PublicKey) -> struct PKIX ExportData (string) func (e *PKIX) Export() error { - + if e.ExportPublicKey == nil && e.PublicKey == nil { return errors.New("missing public key") } - + if e.ExportPublicKey != nil && e.PublicKey != nil { return errors.New("cannot export both public key and export public key") } - + var err error var marshal []byte if e.ExportPublicKey != nil { diff --git a/hash/fs.go b/hash/fs.go index c8d7d79..a0345d8 100644 --- a/hash/fs.go +++ b/hash/fs.go @@ -24,4 +24,4 @@ func ReadFileContentAndHmac(algo Algorithms, path string) ([]byte, error) { return []byte{}, err } return hash, nil -} \ No newline at end of file +} diff --git a/symmetric/symmetric.go b/symmetric/symmetric.go index 578699a..2924004 100644 --- a/symmetric/symmetric.go +++ b/symmetric/symmetric.go @@ -31,9 +31,9 @@ type XChaCha20Stream struct { Hash func() hash.Hash } -/// -/// XChaCha20-Poly1305 -/// +// / +// / XChaCha20-Poly1305 +// / func (x *XChaCha20) Encrypt(key, plaintext []byte) ([]byte, error) { if len(key) != chacha20poly1305.KeySize { return []byte{}, errors.New("wrong key size") @@ -75,15 +75,14 @@ func (x *XChaCha20) Decrypt(key, ciphertext []byte) ([]byte, error) { return plaintext, nil } -/// -/// XOR -/// +// / +// / XOR +// / func (x *Xor) Encrypt(key, payload []byte) ([]byte, error) { if len(payload) != len(key) { return []byte{}, errors.New("insecure xor operation, key and payload length need to be equal") } - xored := make([]byte, len(payload)) subtle.XORBytes(xored, payload, key) @@ -102,9 +101,9 @@ func (x *Xor) Decrypt(key, payload []byte) ([]byte, error) { return x.Encrypt(key, payload) } -/// -/// XChaCha20-Poly1305 Age Stream -/// +// / +// / XChaCha20-Poly1305 Age Stream +// / func (x *XChaCha20Stream) Encrypt(in io.Reader, out io.Writer) error { if len(x.Key) != chacha20poly1305.KeySize { return errors.New("wrong key size") From 6aa505ab2ba04d37b531ccf7833dfbb7a055766f Mon Sep 17 00:00:00 2001 From: D3v Date: Wed, 22 Nov 2023 00:24:28 +0100 Subject: [PATCH 09/12] Klauspost Compress version bump to v1.17.3 --- go.mod | 2 +- go.sum | 10 +++++ .../github.com/klauspost/compress/README.md | 8 ++++ .../klauspost/compress/fse/compress.go | 2 +- .../klauspost/compress/zstd/enc_best.go | 44 ++++++++++++------- .../klauspost/compress/zstd/enc_better.go | 17 ++++--- vendor/modules.txt | 6 +-- 7 files changed, 57 insertions(+), 32 deletions(-) diff --git a/go.mod b/go.mod index 66cf823..204b3d5 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21.3 require ( filippo.io/age v1.1.1 github.com/cloudflare/circl v1.3.6 - github.com/klauspost/compress v1.17.2 + github.com/klauspost/compress v1.17.3 github.com/stretchr/testify v1.8.4 golang.org/x/crypto v0.15.0 ) diff --git a/go.sum b/go.sum index 6576747..d51f52f 100644 --- a/go.sum +++ b/go.sum @@ -2,12 +2,21 @@ filippo.io/age v1.1.1 h1:pIpO7l151hCnQ4BdyBujnGP2YlUo0uj6sAVNHGBvXHg= filippo.io/age v1.1.1/go.mod h1:l03SrzDUrBkdBx8+IILdnn2KZysqQdbEBUQ4p3sqEQE= github.com/cloudflare/circl v1.3.6 h1:/xbKIqSHbZXHwkhbrhrt2YOHIwYJlXH94E3tI/gDlUg= github.com/cloudflare/circl v1.3.6/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= +github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= @@ -16,5 +25,6 @@ golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 43de486..7e83f58 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,14 @@ This package provides various compression algorithms. # changelog +* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) + * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 + +* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) + * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 + * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 + * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 + * Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 65d7773..074018d 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -212,7 +212,7 @@ func (s *Scratch) writeCount() error { previous0 bool charnum uint16 - maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 // Write Table Size bitStream = uint32(tableLog - minTablelog) diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 858f8f4..c81a153 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -43,7 +43,7 @@ func (m *match) estBits(bitsPerByte int32) { if m.rep < 0 { ofc = ofCode(uint32(m.s-m.offset) + 3) } else { - ofc = ofCode(uint32(m.rep)) + ofc = ofCode(uint32(m.rep) & 3) } // Cost, excluding ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] @@ -227,7 +227,7 @@ encodeLoop: } } l := 4 + e.matchlen(s+4, offset+4, src) - if rep < 0 { + if true { // Extend candidate match backwards as far as possible. tMin := s - e.maxMatchOff if tMin < 0 { @@ -282,6 +282,7 @@ encodeLoop: // Load next and check... e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + index0 := s + 1 // Look far ahead, unless we have a really long match already... if best.length < goodEnough { @@ -357,19 +358,16 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index old s + 1 -> s - 1 - index0 := s + 1 s = best.s + best.length - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, best.length) - } - break encodeLoop - } + // Index skipped... + end := s + if s > sLimit+4 { + end = sLimit + 4 + } off := index0 + e.cur - for index0 < s { + for index0 < end { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) @@ -378,6 +376,7 @@ encodeLoop: off++ index0++ } + switch best.rep { case 2, 4 | 1: offset1, offset2 = offset2, offset1 @@ -386,12 +385,17 @@ encodeLoop: case 4 | 3: offset1, offset2, offset3 = offset1-1, offset1, offset2 } + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + } + break encodeLoop + } continue } // A 4-byte match has been found. Update recent offsets. // We'll later see if more than 4 bytes. - index0 := s + 1 s = best.s t := best.offset offset1, offset2, offset3 = s-t, offset1, offset2 @@ -419,19 +423,25 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) nextEmit = s - if s >= sLimit { - break encodeLoop + + // Index old s + 1 -> s - 1 or sLimit + end := s + if s > sLimit-4 { + end = sLimit - 4 } - // Index old s + 1 -> s - 1 - for index0 < s { + off := index0 + e.cur + for index0 < end { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} index0++ + off++ + } + if s >= sLimit { + break encodeLoop } } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 8582f31..20d25b0 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -145,7 +145,7 @@ encodeLoop: var t int32 // We allow the encoder to optionally turn off repeat offsets across blocks canRepeat := len(blk.sequences) > 2 - var matched int32 + var matched, index0 int32 for { if debugAsserts && canRepeat && offset1 == 0 { @@ -162,6 +162,7 @@ encodeLoop: off := s + e.cur e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + index0 = s + 1 if canRepeat { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { @@ -258,7 +259,6 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - index0 := s + repOff2 s += lenght + repOff2 nextEmit = s if s >= sLimit { @@ -498,15 +498,15 @@ encodeLoop: } // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 + off := index0 + e.cur for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} index0 += 2 + off += 2 } cv = load6432(src, s) @@ -672,7 +672,7 @@ encodeLoop: var t int32 // We allow the encoder to optionally turn off repeat offsets across blocks canRepeat := len(blk.sequences) > 2 - var matched int32 + var matched, index0 int32 for { if debugAsserts && canRepeat && offset1 == 0 { @@ -691,6 +691,7 @@ encodeLoop: e.markLongShardDirty(nextHashL) e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} e.markShortShardDirty(nextHashS) + index0 = s + 1 if canRepeat { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { @@ -726,7 +727,6 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - index0 := s + repOff s += lenght + repOff nextEmit = s @@ -790,7 +790,6 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - index0 := s + repOff2 s += lenght + repOff2 nextEmit = s if s >= sLimit { @@ -1024,18 +1023,18 @@ encodeLoop: } // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 + off := index0 + e.cur for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.markLongShardDirty(h0) h1 := hashLen(cv1, betterShortTableBits, betterShortLen) e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} e.markShortShardDirty(h1) index0 += 2 + off += 2 } cv = load6432(src, s) diff --git a/vendor/modules.txt b/vendor/modules.txt index b9786fb..8910215 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -17,10 +17,8 @@ github.com/cloudflare/circl/sign/ed448 # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/google/uuid v1.4.0 -## explicit -# github.com/klauspost/compress v1.17.2 -## explicit; go 1.18 +# github.com/klauspost/compress v1.17.3 +## explicit; go 1.19 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse From 5bdd804d455f15adb297d8b10d254b1a1b5e7cce Mon Sep 17 00:00:00 2001 From: D3v Date: Wed, 22 Nov 2023 01:32:53 +0100 Subject: [PATCH 10/12] AES-GCM with AAD, XChaCha20 AAD refact --- README.md | 14 ++-- hash/hash_test.go | 2 +- symmetric/symmetric.go | 126 ++++++++++++++++++++++++++++++------ symmetric/symmetric_test.go | 99 ++++++++++++++++++++++++++++ 4 files changed, 217 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index f444704..8fd2f00 100644 --- a/README.md +++ b/README.md @@ -14,11 +14,15 @@ Crypto suite: - Base64 - Base32 - Hex + - Key Wrappers + - ed25519 + - PKIX + - PKCS - Symmetric - XChacha20-poly1305 - XChacha20-poly1305 Stream (modified age code) - XOR - - AES-GCM (pending) + - AES-GCM - Asymmetric - ed25519 - ed448 @@ -27,15 +31,15 @@ Crypto suite: - Blake2b-256 - Blake2b-384 - Blake2b-512 - - Argon2id - - Scrypt (pending) - - HKDF (pending) - SHA3-256 - SHA3-384 - SHA3-512 - SHAKE-128 (pending) - SHAKE-256 (pending) - go_simhash (pending) + - Argon2id + - Scrypt (pending) + - HKDF (pending) - Compression - flate - gzip @@ -43,5 +47,5 @@ Crypto suite: - zstd - Aged - Age encryption suite - - Age header obfuscation + - Age header obfuscation v1 diff --git a/hash/hash_test.go b/hash/hash_test.go index 71af3e2..b0fd77f 100644 --- a/hash/hash_test.go +++ b/hash/hash_test.go @@ -266,7 +266,7 @@ func TestNilKeyError(t *testing.T) { t.Run(test.name, func(t *testing.T) { hash, err := test.algo.Hmac(test.data) r.ErrorIs(t, err, hasher.ErrHmacSecretNil) - r.Len(t, hash, 0) + r.Empty(t, hash) validate, err := test.algo.ValidateHmac(test.data, test.expected) r.ErrorIs(t, err, hasher.ErrHmacSecretNil) diff --git a/symmetric/symmetric.go b/symmetric/symmetric.go index 2924004..2ba7d65 100644 --- a/symmetric/symmetric.go +++ b/symmetric/symmetric.go @@ -1,6 +1,8 @@ package symmetric import ( + "crypto/aes" + "crypto/cipher" "crypto/subtle" "errors" "hash" @@ -23,17 +25,20 @@ type SymmetricStream interface { Decrypt(io.Reader, io.Writer) error } -type XChaCha20 struct{} +type XChaCha20 struct { + AdditionalData []byte +} type Xor struct{} +type AesGCM struct { + AdditionalData []byte +} type XChaCha20Stream struct { Key []byte Hash func() hash.Hash } -// / -// / XChaCha20-Poly1305 -// / +// XChaCha20-Poly1305 func (x *XChaCha20) Encrypt(key, plaintext []byte) ([]byte, error) { if len(key) != chacha20poly1305.KeySize { return []byte{}, errors.New("wrong key size") @@ -46,10 +51,15 @@ func (x *XChaCha20) Encrypt(key, plaintext []byte) ([]byte, error) { nonce := make([]byte, aead.NonceSize(), aead.NonceSize()+len(plaintext)+aead.Overhead()) if _, err := generic.Rand().Read(nonce); err != nil { - panic(err) + return []byte{}, err + } + + if x.AdditionalData != nil { + return aead.Seal(nonce, nonce, plaintext, x.AdditionalData), nil + } else { + return aead.Seal(nonce, nonce, plaintext, nil), nil } - return aead.Seal(nonce, nonce, plaintext, nil), nil } func (x *XChaCha20) Decrypt(key, ciphertext []byte) ([]byte, error) { @@ -68,16 +78,22 @@ func (x *XChaCha20) Decrypt(key, ciphertext []byte) ([]byte, error) { nonce, ciphertext := ciphertext[:aead.NonceSize()], ciphertext[aead.NonceSize():] - plaintext, err := aead.Open(nil, nonce, ciphertext, nil) - if err != nil { - return []byte{}, err + if x.AdditionalData != nil { + payload, err := aead.Open(nil, nonce, ciphertext, x.AdditionalData) + if err != nil { + return []byte{}, err + } + return payload, nil + } else { + payload, err := aead.Open(nil, nonce, ciphertext, nil) + if err != nil { + return []byte{}, err + } + return payload, nil } - return plaintext, nil } -// / -// / XOR -// / +// XOR func (x *Xor) Encrypt(key, payload []byte) ([]byte, error) { if len(payload) != len(key) { return []byte{}, errors.New("insecure xor operation, key and payload length need to be equal") @@ -101,9 +117,7 @@ func (x *Xor) Decrypt(key, payload []byte) ([]byte, error) { return x.Encrypt(key, payload) } -// / -// / XChaCha20-Poly1305 Age Stream -// / +// XChaCha20-Poly1305 Age Stream func (x *XChaCha20Stream) Encrypt(in io.Reader, out io.Writer) error { if len(x.Key) != chacha20poly1305.KeySize { return errors.New("wrong key size") @@ -112,7 +126,7 @@ func (x *XChaCha20Stream) Encrypt(in io.Reader, out io.Writer) error { var str stream if x.Hash == nil { str = stream{ - Hash: sha3.New384, + Hash: sha3.New512, } } else { str = stream{Hash: x.Hash} @@ -139,7 +153,7 @@ func (x *XChaCha20Stream) Decrypt(in io.Reader, out io.Writer) error { var str stream if x.Hash == nil { str = stream{ - Hash: sha3.New384, + Hash: sha3.New512, } } else { str = stream{Hash: x.Hash} @@ -203,3 +217,79 @@ func (s *stream) key(fileKey, nonce []byte) ([]byte, error) { } return streamKey, nil } + +// +// AES-GCM 256 +// + +func (a *AesGCM) Encrypt(key, payload []byte) ([]byte, error) { + if generic.AllZero(key) { + return []byte{}, errors.New("key is all zero") + } + + if len(key) != 32 { + return []byte{}, errors.New("wrong key size, must be 32 bytes") + } + + aes, err := aes.NewCipher(key) + if err != nil { + return []byte{}, err + } + + gcm, err := cipher.NewGCM(aes) + if err != nil { + return []byte{}, err + } + + nonce := make([]byte, gcm.NonceSize(), gcm.NonceSize()+len(payload)+gcm.Overhead()) + if _, err := generic.Rand().Read(nonce); err != nil { + return []byte{}, err + } + + if a.AdditionalData != nil { + return gcm.Seal(nonce, nonce, payload, a.AdditionalData), nil + } else { + return gcm.Seal(nonce, nonce, payload, nil), nil + } +} + +func (a *AesGCM) Decrypt(key, ciphertext []byte) ([]byte, error) { + if generic.AllZero(key) { + return []byte{}, errors.New("key is all zero") + } + + if len(key) != 32 { + return []byte{}, errors.New("wrong key size, must be 32 bytes") + } + + aes, err := aes.NewCipher(key) + if err != nil { + return []byte{}, err + } + + gcm, err := cipher.NewGCM(aes) + if err != nil { + return []byte{}, err + } + + nonceSize := gcm.NonceSize() + if len(ciphertext) < nonceSize { + return []byte{}, errors.New("ciphertext too short") + } + + nonce, rawCiphertext := ciphertext[:nonceSize], ciphertext[nonceSize:] + + if a.AdditionalData != nil { + payload, err := gcm.Open(nil, nonce, rawCiphertext, a.AdditionalData) + if err != nil { + return []byte{}, err + } + return payload, nil + } else { + payload, err := gcm.Open(nil, nonce, rawCiphertext, nil) + if err != nil { + return []byte{}, err + } + return payload, nil + } +} diff --git a/symmetric/symmetric_test.go b/symmetric/symmetric_test.go index ca91424..fa5812f 100644 --- a/symmetric/symmetric_test.go +++ b/symmetric/symmetric_test.go @@ -61,6 +61,27 @@ func TestXChaCha20(t *testing.T) { r.NoError(t, err) r.Equal(t, payload, plaintext) + + // AAD test + aad := []byte("proposal") + + sym3 := symmetric.XChaCha20{ + AdditionalData: aad, + } + + ciphertext2, err := sym3.Encrypt(secret, payload) + r.NoError(t, err) + t.Log("Ciphertext AAD (hex): ", hex.EncodeToString(ciphertext2)) + t.Log("AAD (hex): ", hex.EncodeToString(aad)) + + sym4 := symmetric.XChaCha20{ + AdditionalData: aad, + } + + plaintext2, err := sym4.Decrypt(secret, ciphertext2) + r.NoError(t, err) + + r.Equal(t, payload, plaintext2) } func TestXOR(t *testing.T) { @@ -74,3 +95,81 @@ func TestXOR(t *testing.T) { r.Equal(t, expected, result) } + +func TestAESGCM(t *testing.T) { + secret, err := generic.CSPRNG(32) + r.NoError(t, err) + payload := []byte("https://xkcd.com/936/") + + sym := symmetric.AesGCM{} + + ciphertext, err := sym.Encrypt(secret, payload) + r.NoError(t, err) + t.Log("Ciphertext (hex): ", hex.EncodeToString(ciphertext)) + + sym2 := symmetric.AesGCM{} + + plaintext, err := sym2.Decrypt(secret, ciphertext) + r.NoError(t, err) + + r.Equal(t, payload, plaintext) + + // AAD test + aad := []byte("proposal") + + sym3 := symmetric.AesGCM{ + AdditionalData: aad, + } + + ciphertext2, err := sym3.Encrypt(secret, payload) + r.NoError(t, err) + t.Log("Ciphertext AAD (hex): ", hex.EncodeToString(ciphertext2)) + t.Log("AAD (hex): ", hex.EncodeToString(aad)) + + sym4 := symmetric.AesGCM{ + AdditionalData: aad, + } + + plaintext2, err := sym4.Decrypt(secret, ciphertext2) + r.NoError(t, err) + + r.Equal(t, payload, plaintext2) +} + +func TestAESGCMFails(t *testing.T) { + secret, err := generic.CSPRNG(32) + r.NoError(t, err) + + payload := []byte("https://xkcd.com/936/") + + sym := symmetric.AesGCM{} + + zeroKey := make([]byte, 32) + zeroCiphertext, err := sym.Encrypt(zeroKey, payload) + r.Error(t, err) + r.Empty(t, zeroCiphertext) + r.EqualError(t, err, "key is all zero") + + invalidKey := []byte("0123456789abcdef") + invalidCiphertext, err := sym.Encrypt(invalidKey, payload) + r.Error(t, err) + r.Empty(t, invalidCiphertext) + r.EqualError(t, err, "wrong key size, must be 32 bytes") + + zeroPlaintext, err := sym.Decrypt(zeroKey, zeroCiphertext) + r.Error(t, err) + r.Empty(t, zeroPlaintext) + r.EqualError(t, err, "key is all zero") + + invalidCiphertext2 := []byte("ciphertext") + invalidPlaintext, err := sym.Decrypt(invalidKey, invalidCiphertext2) + r.Error(t, err) + r.Empty(t, invalidPlaintext) + r.EqualError(t, err, "wrong key size, must be 32 bytes") + + shortCiphertext := []byte("short") + shortPlaintext, err := sym.Decrypt(secret, shortCiphertext) + r.Error(t, err) + r.Empty(t, shortPlaintext) + r.EqualError(t, err, "ciphertext too short") +} From 958a1dd5dc5be7f8fbf276dc3d9d3556f0ac1b89 Mon Sep 17 00:00:00 2001 From: D3v Date: Wed, 22 Nov 2023 01:40:34 +0100 Subject: [PATCH 11/12] Replace KeychainSetup to SetupKeychainParameters --- aged/age_bind.go | 4 +- aged/age_bind_test.go | 100 +------- aged/obf_test.go | 2 +- coverage.txt | 584 ------------------------------------------ 4 files changed, 9 insertions(+), 681 deletions(-) delete mode 100644 coverage.txt diff --git a/aged/age_bind.go b/aged/age_bind.go index ae1b781..c82fed7 100644 --- a/aged/age_bind.go +++ b/aged/age_bind.go @@ -15,13 +15,13 @@ type Keychain struct { recipients []age.Recipient } -type KeychainSetup struct { +type SetupKeychainParameters struct { SecretKey string PublicKeys []string SelfRecipient bool } -func SetupKeychain(keychainSetup KeychainSetup) (Keychain, error) { +func SetupKeychain(keychainSetup SetupKeychainParameters) (Keychain, error) { var keychain Keychain identity, err := age.ParseX25519Identity(keychainSetup.SecretKey) diff --git a/aged/age_bind_test.go b/aged/age_bind_test.go index 2fbe921..8ab4c60 100644 --- a/aged/age_bind_test.go +++ b/aged/age_bind_test.go @@ -33,21 +33,21 @@ func keychainInit(t *testing.T) chains { wrongKeypair, err := aged.GenKeypair() r.NoError(t, err) - keychain, err := aged.SetupKeychain(aged.KeychainSetup{ + keychain, err := aged.SetupKeychain(aged.SetupKeychainParameters{ SecretKey: secretKey1.String(), PublicKeys: []string{publicKey1.Recipient().String(), publicKey2.Recipient().String()}, SelfRecipient: true, }) r.NoError(t, err) - keychain2, err := aged.SetupKeychain(aged.KeychainSetup{ + keychain2, err := aged.SetupKeychain(aged.SetupKeychainParameters{ SecretKey: publicKey1.String(), PublicKeys: []string{secretKey1.Recipient().String(), publicKey2.Recipient().String()}, SelfRecipient: true, }) r.NoError(t, err) - keychainWrong, err := aged.SetupKeychain(aged.KeychainSetup{ + keychainWrong, err := aged.SetupKeychain(aged.SetupKeychainParameters{ SecretKey: wrongKeypair.String(), PublicKeys: []string{secretKey1.Recipient().String(), publicKey2.Recipient().String()}, SelfRecipient: true, @@ -84,7 +84,7 @@ func TestKeychain(t *testing.T) { func TestKeychainImportExport(t *testing.T) { keychain := keychainInit(t) - s := aged.KeychainSetup{ + s := aged.SetupKeychainParameters{ SecretKey: keychain.keychain.KeychainExportSecretKey(), PublicKeys: keychain.keychain.KeychainExport(), SelfRecipient: true, @@ -256,7 +256,7 @@ func TestRoundTrips(t *testing.T) { func TestWrongSecretKeyKeyringSetup(t *testing.T) { keychain := keychainInit(t) - s := aged.KeychainSetup{ + s := aged.SetupKeychainParameters{ SecretKey: "correct horse battery staple", PublicKeys: []string{keychain.publicKey1.Recipient().String(), keychain.publicKey2.Recipient().String()}, SelfRecipient: true, @@ -269,7 +269,7 @@ func TestWrongSecretKeyKeyringSetup(t *testing.T) { func TestWrongPublicKeyKeyringSetup(t *testing.T) { keychain := keychainInit(t) - s := aged.KeychainSetup{ + s := aged.SetupKeychainParameters{ SecretKey: keychain.keychain.KeychainExportSecretKey(), PublicKeys: []string{keychain.publicKey1.Recipient().String(), keychain.publicKey2.Recipient().String(), "correct horse battery staple"}, SelfRecipient: true, @@ -279,91 +279,3 @@ func TestWrongPublicKeyKeyringSetup(t *testing.T) { r.Error(t, err) t.Log(err.Error()) } - -/* - func TestEncryptAndDecryptCompress(t *testing.T) { - keychains := keychainInit(t) - - cipherData, err := keychains.keychain.Encrypt(plainData, true, false) - r.NoError(t, err, "Encryption without error") - t.Logf("Original size:%d Processed size: %d", len(plainData), len(cipherData)) - - decryptedData, err2 := keychains.keychain.Decrypt(cipherData, true, false) - r.NoError(t, err2, "Decryption without error") - r.Equal(t, plainData, decryptedData, "Decrypted data is equal with the plaintext data by the same keychain") - - decryptedData2, err3 := keychains.keychain2.Decrypt(cipherData, true, false) - r.NoError(t, err3, "Decryption two without error") - r.Equal(t, plainData, decryptedData2, "Decrypted data is equal with the plaintext data by different valid keychain") - - decryptedData3, err4 := keychains.keychainWrong.Decrypt(cipherData, true, false) - r.Equal(t, []byte{}, decryptedData3) - r.EqualError(t, err4, "no identity matched any of the recipients") - } - - func TestEncryptAndDecryptObfuscated(t *testing.T) { - keychains := keychainInit(t) - - cipherData, err := keychains.keychain.Encrypt(plainData, false, true) - r.NoError(t, err, "Encryption without error") - t.Logf("Original size:%d Processed size: %d", len(plainData), len(cipherData)) - - decryptedData, err2 := keychains.keychain.Decrypt(cipherData, false, true) - r.NoError(t, err2, "Decryption without error") - r.Equal(t, plainData, decryptedData, "Decrypted data is equal with the plaintext data by the same keychain") - - decryptedData2, err3 := keychains.keychain2.Decrypt(cipherData, false, true) - r.NoError(t, err3, "Decryption two without error") - r.Equal(t, plainData, decryptedData2, "Decrypted data is equal with the plaintext data by different valid keychain") - - decryptedData3, err4 := keychains.keychainWrong.Decrypt(cipherData, false, true) - r.Equal(t, []byte{}, decryptedData3) - r.EqualError(t, err4, "no identity matched any of the recipients") - } - - func TestEncryptAndDecryptBigFile(t *testing.T) { - keychains := keychainInit(t) - - plainText, err := generic.CSPRNG(10485760) - r.NoError(t, err, "Encryption without error") - cipherData, err := keychains.keychain.Encrypt(plainText, false, true) - r.NoError(t, err, "Encryption without error") - t.Logf("Original size:%d Processed size: %d", len(plainText), len(cipherData)) - - decryptedData, err2 := keychains.keychain.Decrypt(cipherData, false, true) - r.NoError(t, err2, "Decryption without error") - r.Equal(t, plainText, decryptedData, "Decrypted data is equal with the plaintext data by the same keychain") - - decryptedData2, err3 := keychains.keychain2.Decrypt(cipherData, false, true) - r.NoError(t, err3, "Decryption two without error") - r.Equal(t, plainText, decryptedData2, "Decrypted data is equal with the plaintext data by different valid keychain") - - decryptedData3, err4 := keychains.keychainWrong.Decrypt(cipherData, false, true) - r.Equal(t, []byte{}, decryptedData3) - r.EqualError(t, err4, "no identity matched any of the recipients") - } - - func TestEncryptAndDecryptCompressAndObfuscated(t *testing.T) { - keychains := keychainInit(t) - - cipherData, err := keychains.keychain.Encrypt(plainData, true, true) - r.NoError(t, err, "Encryption without error") - t.Logf("Size:%d", len(cipherData)) - - decryptedData, err2 := keychains.keychain.Decrypt(cipherData, true, true) - r.NoError(t, err2, "Decryption without error") - r.Equal(t, plainData, decryptedData, "Decrypted data is equal with the plaintext data by the same keychain") - - decryptedData2, err3 := keychains.keychain2.Decrypt(cipherData, true, true) - r.NoError(t, err3, "Decryption two without error") - r.Equal(t, plainData, decryptedData2, "Decrypted data is equal with the plaintext data by different valid keychain") - - decryptedData3, err4 := keychains.keychainWrong.Decrypt(cipherData, true, true) - r.Equal(t, []byte{}, decryptedData3) - r.EqualError(t, err4, "no identity matched any of the recipients") - } - - func TestEncryptWithPwd(t *testing.T) { - - } -*/ diff --git a/aged/obf_test.go b/aged/obf_test.go index c8bd4d3..294f4fd 100644 --- a/aged/obf_test.go +++ b/aged/obf_test.go @@ -18,7 +18,7 @@ func TestObf(t *testing.T) { r.NoError(t, err) obfuscator := aged.AgeV1Obf{} - obfKeychain, err := aged.SetupKeychain(aged.KeychainSetup{ + obfKeychain, err := aged.SetupKeychain(aged.SetupKeychainParameters{ SecretKey: obfKeypair1.String(), PublicKeys: []string{obfKeypair2.Recipient().String()}, SelfRecipient: true, diff --git a/coverage.txt b/coverage.txt deleted file mode 100644 index 0f65e1f..0000000 --- a/coverage.txt +++ /dev/null @@ -1,584 +0,0 @@ -mode: atomic -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:40.36,43.16 3 2 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:43.16,45.3 1 0 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:47.2,47.12 1 2 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:50.55,51.43 1 2 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:51.43,53.3 1 0 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:54.2,57.12 4 2 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:60.43,62.2 1 1 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:64.64,67.16 2 1 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:67.16,69.3 1 0 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:71.2,71.55 1 1 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:78.34,81.16 3 1 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:81.16,83.3 1 0 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:84.2,84.12 1 1 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:87.53,88.41 1 4 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:88.41,90.3 1 2 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:91.2,94.12 4 2 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:97.41,99.2 1 0 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:101.62,103.16 2 0 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:103.16,105.3 1 0 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:107.2,107.64 1 0 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:110.74,111.27 1 2 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:112.25,113.18 1 2 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:114.10,115.44 1 0 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:119.70,120.27 1 2 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:121.23,122.18 1 2 -github.com/D3vl0per/crypt/asymmetric/asymmetric.go:123.10,124.44 1 0 -github.com/D3vl0per/crypt/generic/csprng.go:10.38,12.60 2 10 -github.com/D3vl0per/crypt/generic/csprng.go:12.60,14.3 1 0 -github.com/D3vl0per/crypt/generic/csprng.go:15.2,15.20 1 10 -github.com/D3vl0per/crypt/generic/csprng.go:18.41,21.2 2 9 -github.com/D3vl0per/crypt/generic/csprng.go:23.37,25.16 2 1 -github.com/D3vl0per/crypt/generic/csprng.go:25.16,27.3 1 0 -github.com/D3vl0per/crypt/generic/csprng.go:28.2,31.52 3 1 -github.com/D3vl0per/crypt/generic/csprng.go:31.52,33.3 1 0 -github.com/D3vl0per/crypt/generic/csprng.go:34.2,34.20 1 1 -github.com/D3vl0per/crypt/generic/csprng.go:37.23,39.2 1 1 -github.com/D3vl0per/crypt/generic/fs.go:10.49,11.16 1 0 -github.com/D3vl0per/crypt/generic/fs.go:11.16,13.3 1 0 -github.com/D3vl0per/crypt/generic/fs.go:15.2,16.16 2 0 -github.com/D3vl0per/crypt/generic/fs.go:16.16,18.3 1 0 -github.com/D3vl0per/crypt/generic/fs.go:19.2,22.16 3 0 -github.com/D3vl0per/crypt/generic/fs.go:22.16,24.3 1 0 -github.com/D3vl0per/crypt/generic/fs.go:26.2,29.29 3 0 -github.com/D3vl0per/crypt/generic/fs.go:29.29,32.17 2 0 -github.com/D3vl0per/crypt/generic/fs.go:32.17,34.4 1 0 -github.com/D3vl0per/crypt/generic/fs.go:35.3,36.17 2 0 -github.com/D3vl0per/crypt/generic/fs.go:36.17,38.4 1 0 -github.com/D3vl0per/crypt/generic/fs.go:39.3,39.32 1 0 -github.com/D3vl0per/crypt/generic/fs.go:39.32,41.4 1 0 -github.com/D3vl0per/crypt/generic/fs.go:43.3,44.17 2 0 -github.com/D3vl0per/crypt/generic/fs.go:44.17,46.4 1 0 -github.com/D3vl0per/crypt/generic/fs.go:47.3,48.17 2 0 -github.com/D3vl0per/crypt/generic/fs.go:48.17,50.4 1 0 -github.com/D3vl0per/crypt/generic/fs.go:51.3,52.17 2 0 -github.com/D3vl0per/crypt/generic/fs.go:52.17,54.4 1 0 -github.com/D3vl0per/crypt/generic/fs.go:56.3,56.32 1 0 -github.com/D3vl0per/crypt/generic/fs.go:56.32,58.4 1 0 -github.com/D3vl0per/crypt/generic/fs.go:61.2,62.16 2 0 -github.com/D3vl0per/crypt/generic/fs.go:62.16,64.3 1 0 -github.com/D3vl0per/crypt/generic/fs.go:65.2,65.12 1 0 -github.com/D3vl0per/crypt/generic/fs.go:68.65,69.16 1 0 -github.com/D3vl0per/crypt/generic/fs.go:69.16,71.3 1 0 -github.com/D3vl0per/crypt/generic/fs.go:74.2,75.16 2 0 -github.com/D3vl0per/crypt/generic/fs.go:75.16,77.3 1 0 -github.com/D3vl0per/crypt/generic/fs.go:78.2,81.16 3 0 -github.com/D3vl0per/crypt/generic/fs.go:81.16,83.3 1 0 -github.com/D3vl0per/crypt/generic/fs.go:85.2,88.29 3 0 -github.com/D3vl0per/crypt/generic/fs.go:88.29,91.17 2 0 -github.com/D3vl0per/crypt/generic/fs.go:91.17,93.4 1 0 -github.com/D3vl0per/crypt/generic/fs.go:94.3,95.17 2 0 -github.com/D3vl0per/crypt/generic/fs.go:95.17,97.4 1 0 -github.com/D3vl0per/crypt/generic/fs.go:99.3,99.32 1 0 -github.com/D3vl0per/crypt/generic/fs.go:99.32,101.4 1 0 -github.com/D3vl0per/crypt/generic/fs.go:103.3,104.17 2 0 -github.com/D3vl0per/crypt/generic/fs.go:104.17,106.4 1 0 -github.com/D3vl0per/crypt/generic/fs.go:108.3,109.17 2 0 -github.com/D3vl0per/crypt/generic/fs.go:109.17,111.4 1 0 -github.com/D3vl0per/crypt/generic/fs.go:112.3,113.17 2 0 -github.com/D3vl0per/crypt/generic/fs.go:113.17,115.4 1 0 -github.com/D3vl0per/crypt/generic/fs.go:116.3,116.32 1 0 -github.com/D3vl0per/crypt/generic/fs.go:116.32,118.4 1 0 -github.com/D3vl0per/crypt/generic/fs.go:120.2,121.16 2 0 -github.com/D3vl0per/crypt/generic/fs.go:121.16,123.3 1 0 -github.com/D3vl0per/crypt/generic/fs.go:124.2,125.16 2 0 -github.com/D3vl0per/crypt/generic/fs.go:125.16,127.3 1 0 -github.com/D3vl0per/crypt/generic/fs.go:128.2,128.31 1 0 -github.com/D3vl0per/crypt/generic/fs.go:128.31,130.3 1 0 -github.com/D3vl0per/crypt/generic/fs.go:131.2,131.12 1 0 -github.com/D3vl0per/crypt/generic/fs.go:134.63,137.87 2 0 -github.com/D3vl0per/crypt/generic/fs.go:137.87,138.17 1 0 -github.com/D3vl0per/crypt/generic/fs.go:138.17,140.4 1 0 -github.com/D3vl0per/crypt/generic/fs.go:142.3,142.43 1 0 -github.com/D3vl0per/crypt/generic/fs.go:142.43,144.4 1 0 -github.com/D3vl0per/crypt/generic/fs.go:146.3,146.13 1 0 -github.com/D3vl0per/crypt/generic/fs.go:148.2,148.16 1 0 -github.com/D3vl0per/crypt/generic/fs.go:148.16,150.3 1 0 -github.com/D3vl0per/crypt/generic/fs.go:151.2,151.19 1 0 -github.com/D3vl0per/crypt/generic/fs.go:154.51,157.16 2 0 -github.com/D3vl0per/crypt/generic/fs.go:157.16,159.3 1 0 -github.com/D3vl0per/crypt/generic/fs.go:160.2,162.16 3 0 -github.com/D3vl0per/crypt/generic/fs.go:162.16,164.3 1 0 -github.com/D3vl0per/crypt/generic/fs.go:165.2,165.18 1 0 -github.com/D3vl0per/crypt/generic/utils.go:22.45,24.2 1 0 -github.com/D3vl0per/crypt/generic/utils.go:26.54,28.2 1 0 -github.com/D3vl0per/crypt/generic/utils.go:30.42,32.2 1 0 -github.com/D3vl0per/crypt/generic/utils.go:34.51,36.2 1 0 -github.com/D3vl0per/crypt/generic/utils.go:38.32,40.2 1 0 -github.com/D3vl0per/crypt/generic/utils.go:42.55,44.16 2 0 -github.com/D3vl0per/crypt/generic/utils.go:44.16,46.3 1 0 -github.com/D3vl0per/crypt/generic/utils.go:48.2,52.59 2 0 -github.com/D3vl0per/crypt/generic/utils.go:55.57,57.16 2 0 -github.com/D3vl0per/crypt/generic/utils.go:57.16,59.3 1 0 -github.com/D3vl0per/crypt/generic/utils.go:61.2,65.59 2 0 -github.com/D3vl0per/crypt/generic/utils.go:68.56,70.16 2 0 -github.com/D3vl0per/crypt/generic/utils.go:70.16,72.3 1 0 -github.com/D3vl0per/crypt/generic/utils.go:74.2,77.16 3 0 -github.com/D3vl0per/crypt/generic/utils.go:77.16,79.3 1 0 -github.com/D3vl0per/crypt/generic/utils.go:81.2,83.37 2 0 -github.com/D3vl0per/crypt/generic/utils.go:86.56,88.16 2 0 -github.com/D3vl0per/crypt/generic/utils.go:88.16,90.3 1 0 -github.com/D3vl0per/crypt/generic/utils.go:92.2,95.16 3 0 -github.com/D3vl0per/crypt/generic/utils.go:95.16,97.3 1 0 -github.com/D3vl0per/crypt/generic/utils.go:99.2,101.37 2 0 -github.com/D3vl0per/crypt/generic/utils.go:104.29,105.22 1 0 -github.com/D3vl0per/crypt/generic/utils.go:105.22,106.13 1 0 -github.com/D3vl0per/crypt/generic/utils.go:106.13,108.4 1 0 -github.com/D3vl0per/crypt/generic/utils.go:110.2,110.13 1 0 -github.com/D3vl0per/crypt/generic/utils.go:113.36,116.24 2 0 -github.com/D3vl0per/crypt/generic/utils.go:116.24,118.3 1 0 -github.com/D3vl0per/crypt/generic/utils.go:120.2,120.24 1 0 -github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:20.45,22.16 2 3 -github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:22.16,24.3 1 0 -github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:26.2,29.8 1 3 -github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:32.74,43.16 8 1 -github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:43.16,45.3 1 0 -github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:47.2,49.88 3 1 -github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:52.75,65.9 10 1 -github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:65.9,67.3 1 0 -github.com/D3vl0per/crypt/insecure/asymmetric/asymmetric.go:68.2,68.23 1 1 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:11.65,12.23 1 1 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:12.23,14.3 1 0 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:16.2,20.16 4 1 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:20.16,22.3 1 0 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:24.2,27.69 3 1 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:30.66,31.23 1 1 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:31.23,33.3 1 0 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:35.2,42.9 6 1 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:42.9,44.3 1 0 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:46.2,46.23 1 1 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:49.54,51.16 2 0 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:51.16,53.3 1 0 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:54.2,57.67 3 0 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:57.67,59.3 1 0 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:61.2,61.23 1 0 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:64.54,66.16 2 0 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:66.16,68.3 1 0 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:69.2,72.67 3 0 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:72.67,74.3 1 0 -github.com/D3vl0per/crypt/insecure/symmetric/symmetric.go:76.2,76.23 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:36.68,37.42 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:37.42,39.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:41.2,42.16 2 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:42.16,44.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:46.2,47.54 2 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:47.54,48.13 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:51.2,51.53 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:54.69,55.42 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:55.42,57.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:59.2,60.16 2 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:60.16,62.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:64.2,64.51 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:64.51,66.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:68.2,71.16 3 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:71.16,73.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:74.2,74.23 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:80.60,81.30 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:81.30,83.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:85.2,86.25 2 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:86.25,88.3 1 4 -github.com/D3vl0per/crypt/symmetric/symmetric.go:90.2,90.58 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:90.58,92.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:94.2,94.19 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:97.60,99.2 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:101.70,102.44 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:102.44,104.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:106.2,107.19 2 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:107.19,111.3 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:111.8,113.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:114.2,115.16 2 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:115.16,117.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:119.2,119.42 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:119.42,121.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:122.2,122.34 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:122.34,124.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:125.2,125.12 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:128.70,129.44 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:129.44,131.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:133.2,134.19 2 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:134.19,138.3 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:138.8,140.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:142.2,143.16 2 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:143.16,145.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:147.2,147.43 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:147.43,149.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:150.2,150.12 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:157.71,159.51 2 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:159.51,161.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:163.2,164.16 2 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:164.16,166.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:168.2,168.41 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:171.76,173.54 2 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:173.54,175.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:177.2,177.44 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:177.44,179.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:181.2,182.16 2 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:182.16,184.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:186.2,186.41 1 1 -github.com/D3vl0per/crypt/symmetric/symmetric.go:189.61,192.53 3 2 -github.com/D3vl0per/crypt/symmetric/symmetric.go:192.53,194.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:195.2,195.31 1 2 -github.com/D3vl0per/crypt/symmetric/symmetric.go:195.31,197.3 1 0 -github.com/D3vl0per/crypt/symmetric/symmetric.go:198.2,198.23 1 2 -github.com/D3vl0per/crypt/compression/compression.go:25.52,30.16 4 25 -github.com/D3vl0per/crypt/compression/compression.go:30.16,32.3 1 0 -github.com/D3vl0per/crypt/compression/compression.go:34.2,34.36 1 25 -github.com/D3vl0per/crypt/compression/compression.go:37.66,40.16 2 40 -github.com/D3vl0per/crypt/compression/compression.go:40.16,42.3 1 0 -github.com/D3vl0per/crypt/compression/compression.go:44.2,45.16 2 40 -github.com/D3vl0per/crypt/compression/compression.go:45.16,48.3 2 0 -github.com/D3vl0per/crypt/compression/compression.go:49.2,49.20 1 40 -github.com/D3vl0per/crypt/compression/compression.go:52.54,57.16 4 25 -github.com/D3vl0per/crypt/compression/compression.go:57.16,59.3 1 0 -github.com/D3vl0per/crypt/compression/compression.go:61.2,61.38 1 25 -github.com/D3vl0per/crypt/compression/compression.go:64.68,66.16 2 40 -github.com/D3vl0per/crypt/compression/compression.go:66.16,69.3 2 0 -github.com/D3vl0per/crypt/compression/compression.go:70.2,72.12 3 40 -github.com/D3vl0per/crypt/compression/compression.go:75.31,77.2 1 15 -github.com/D3vl0per/crypt/compression/compression.go:83.52,88.16 4 12 -github.com/D3vl0per/crypt/compression/compression.go:88.16,90.3 1 0 -github.com/D3vl0per/crypt/compression/compression.go:92.2,92.36 1 12 -github.com/D3vl0per/crypt/compression/compression.go:95.66,98.16 2 24 -github.com/D3vl0per/crypt/compression/compression.go:98.16,100.3 1 0 -github.com/D3vl0per/crypt/compression/compression.go:101.2,102.16 2 24 -github.com/D3vl0per/crypt/compression/compression.go:102.16,105.3 2 0 -github.com/D3vl0per/crypt/compression/compression.go:106.2,106.20 1 24 -github.com/D3vl0per/crypt/compression/compression.go:109.54,114.16 4 12 -github.com/D3vl0per/crypt/compression/compression.go:114.16,116.3 1 0 -github.com/D3vl0per/crypt/compression/compression.go:118.2,118.38 1 12 -github.com/D3vl0per/crypt/compression/compression.go:121.68,123.16 2 25 -github.com/D3vl0per/crypt/compression/compression.go:123.16,126.3 2 0 -github.com/D3vl0per/crypt/compression/compression.go:127.2,129.12 3 25 -github.com/D3vl0per/crypt/compression/compression.go:132.31,134.2 1 12 -github.com/D3vl0per/crypt/compression/compression.go:140.53,145.16 4 15 -github.com/D3vl0per/crypt/compression/compression.go:145.16,147.3 1 0 -github.com/D3vl0per/crypt/compression/compression.go:149.2,149.36 1 15 -github.com/D3vl0per/crypt/compression/compression.go:152.67,155.16 2 30 -github.com/D3vl0per/crypt/compression/compression.go:155.16,157.3 1 0 -github.com/D3vl0per/crypt/compression/compression.go:158.2,159.16 2 30 -github.com/D3vl0per/crypt/compression/compression.go:159.16,162.3 2 0 -github.com/D3vl0per/crypt/compression/compression.go:163.2,163.20 1 30 -github.com/D3vl0per/crypt/compression/compression.go:166.55,171.16 4 15 -github.com/D3vl0per/crypt/compression/compression.go:171.16,173.3 1 0 -github.com/D3vl0per/crypt/compression/compression.go:175.2,175.38 1 15 -github.com/D3vl0per/crypt/compression/compression.go:178.69,183.2 4 30 -github.com/D3vl0per/crypt/compression/compression.go:185.32,187.2 1 15 -github.com/D3vl0per/crypt/compression/compression.go:193.53,198.16 4 15 -github.com/D3vl0per/crypt/compression/compression.go:198.16,200.3 1 0 -github.com/D3vl0per/crypt/compression/compression.go:202.2,202.36 1 15 -github.com/D3vl0per/crypt/compression/compression.go:205.67,208.16 2 30 -github.com/D3vl0per/crypt/compression/compression.go:208.16,210.3 1 0 -github.com/D3vl0per/crypt/compression/compression.go:211.2,212.16 2 30 -github.com/D3vl0per/crypt/compression/compression.go:212.16,215.3 2 0 -github.com/D3vl0per/crypt/compression/compression.go:216.2,216.20 1 30 -github.com/D3vl0per/crypt/compression/compression.go:219.55,224.16 4 15 -github.com/D3vl0per/crypt/compression/compression.go:224.16,226.3 1 0 -github.com/D3vl0per/crypt/compression/compression.go:228.2,228.38 1 15 -github.com/D3vl0per/crypt/compression/compression.go:231.69,233.16 2 30 -github.com/D3vl0per/crypt/compression/compression.go:233.16,236.3 2 0 -github.com/D3vl0per/crypt/compression/compression.go:237.2,239.12 3 30 -github.com/D3vl0per/crypt/compression/compression.go:242.32,244.2 1 15 -github.com/D3vl0per/crypt/hash/fs.go:5.75,7.16 2 0 -github.com/D3vl0per/crypt/hash/fs.go:7.16,9.3 1 0 -github.com/D3vl0per/crypt/hash/fs.go:10.2,11.16 2 0 -github.com/D3vl0per/crypt/hash/fs.go:11.16,13.3 1 0 -github.com/D3vl0per/crypt/hash/fs.go:14.2,14.18 1 0 -github.com/D3vl0per/crypt/hash/hash.go:33.56,35.2 1 1 -github.com/D3vl0per/crypt/hash/hash.go:37.81,39.16 2 2 -github.com/D3vl0per/crypt/hash/hash.go:39.16,41.3 1 0 -github.com/D3vl0per/crypt/hash/hash.go:43.2,43.51 1 2 -github.com/D3vl0per/crypt/hash/hash.go:46.61,48.2 1 1 -github.com/D3vl0per/crypt/hash/hash.go:50.81,52.16 2 2 -github.com/D3vl0per/crypt/hash/hash.go:52.16,54.3 1 0 -github.com/D3vl0per/crypt/hash/hash.go:56.2,56.51 1 2 -github.com/D3vl0per/crypt/hash/hash.go:63.56,65.2 1 1 -github.com/D3vl0per/crypt/hash/hash.go:67.81,69.16 2 2 -github.com/D3vl0per/crypt/hash/hash.go:69.16,71.3 1 0 -github.com/D3vl0per/crypt/hash/hash.go:73.2,73.51 1 2 -github.com/D3vl0per/crypt/hash/hash.go:76.61,78.2 1 1 -github.com/D3vl0per/crypt/hash/hash.go:80.81,82.16 2 2 -github.com/D3vl0per/crypt/hash/hash.go:82.16,84.3 1 0 -github.com/D3vl0per/crypt/hash/hash.go:86.2,86.51 1 2 -github.com/D3vl0per/crypt/hash/hash.go:93.56,95.2 1 1 -github.com/D3vl0per/crypt/hash/hash.go:97.81,99.16 2 2 -github.com/D3vl0per/crypt/hash/hash.go:99.16,101.3 1 0 -github.com/D3vl0per/crypt/hash/hash.go:103.2,103.51 1 2 -github.com/D3vl0per/crypt/hash/hash.go:106.61,108.2 1 1 -github.com/D3vl0per/crypt/hash/hash.go:110.81,112.16 2 2 -github.com/D3vl0per/crypt/hash/hash.go:112.16,114.3 1 0 -github.com/D3vl0per/crypt/hash/hash.go:116.2,116.51 1 2 -github.com/D3vl0per/crypt/hash/hash.go:119.62,123.16 3 18 -github.com/D3vl0per/crypt/hash/hash.go:123.16,125.17 2 9 -github.com/D3vl0per/crypt/hash/hash.go:125.17,127.4 1 0 -github.com/D3vl0per/crypt/hash/hash.go:128.8,130.17 2 9 -github.com/D3vl0per/crypt/hash/hash.go:130.17,132.4 1 0 -github.com/D3vl0per/crypt/hash/hash.go:135.2,135.44 1 18 -github.com/D3vl0per/crypt/hash/hash.go:135.44,137.3 1 0 -github.com/D3vl0per/crypt/hash/hash.go:138.2,138.27 1 18 -github.com/D3vl0per/crypt/hash/hash.go:145.53,147.2 1 1 -github.com/D3vl0per/crypt/hash/hash.go:149.78,151.16 2 2 -github.com/D3vl0per/crypt/hash/hash.go:151.16,153.3 1 0 -github.com/D3vl0per/crypt/hash/hash.go:155.2,155.51 1 2 -github.com/D3vl0per/crypt/hash/hash.go:158.58,160.2 1 0 -github.com/D3vl0per/crypt/hash/hash.go:162.78,164.16 2 0 -github.com/D3vl0per/crypt/hash/hash.go:164.16,166.3 1 0 -github.com/D3vl0per/crypt/hash/hash.go:168.2,168.51 1 0 -github.com/D3vl0per/crypt/hash/hash.go:171.52,175.16 3 3 -github.com/D3vl0per/crypt/hash/hash.go:175.16,177.44 2 0 -github.com/D3vl0per/crypt/hash/hash.go:177.44,179.4 1 0 -github.com/D3vl0per/crypt/hash/hash.go:180.8,182.17 2 3 -github.com/D3vl0per/crypt/hash/hash.go:182.17,184.4 1 0 -github.com/D3vl0per/crypt/hash/hash.go:187.2,187.44 1 3 -github.com/D3vl0per/crypt/hash/hash.go:187.44,189.3 1 0 -github.com/D3vl0per/crypt/hash/hash.go:191.2,191.27 1 3 -github.com/D3vl0per/crypt/hash/hash.go:198.53,200.2 1 1 -github.com/D3vl0per/crypt/hash/hash.go:202.78,204.16 2 2 -github.com/D3vl0per/crypt/hash/hash.go:204.16,206.3 1 0 -github.com/D3vl0per/crypt/hash/hash.go:208.2,208.51 1 2 -github.com/D3vl0per/crypt/hash/hash.go:211.58,213.2 1 0 -github.com/D3vl0per/crypt/hash/hash.go:215.78,217.16 2 0 -github.com/D3vl0per/crypt/hash/hash.go:217.16,219.3 1 0 -github.com/D3vl0per/crypt/hash/hash.go:221.2,221.51 1 0 -github.com/D3vl0per/crypt/hash/hash.go:224.52,228.16 3 3 -github.com/D3vl0per/crypt/hash/hash.go:228.16,230.44 2 0 -github.com/D3vl0per/crypt/hash/hash.go:230.44,232.4 1 0 -github.com/D3vl0per/crypt/hash/hash.go:233.8,235.17 2 3 -github.com/D3vl0per/crypt/hash/hash.go:235.17,237.4 1 0 -github.com/D3vl0per/crypt/hash/hash.go:240.2,240.44 1 3 -github.com/D3vl0per/crypt/hash/hash.go:240.44,242.3 1 0 -github.com/D3vl0per/crypt/hash/hash.go:244.2,244.27 1 3 -github.com/D3vl0per/crypt/hash/hash.go:251.53,253.2 1 1 -github.com/D3vl0per/crypt/hash/hash.go:255.78,257.16 2 2 -github.com/D3vl0per/crypt/hash/hash.go:257.16,259.3 1 0 -github.com/D3vl0per/crypt/hash/hash.go:261.2,261.51 1 2 -github.com/D3vl0per/crypt/hash/hash.go:264.58,266.2 1 0 -github.com/D3vl0per/crypt/hash/hash.go:268.78,270.16 2 0 -github.com/D3vl0per/crypt/hash/hash.go:270.16,272.3 1 0 -github.com/D3vl0per/crypt/hash/hash.go:274.2,274.51 1 0 -github.com/D3vl0per/crypt/hash/hash.go:277.52,281.16 3 3 -github.com/D3vl0per/crypt/hash/hash.go:281.16,283.44 2 0 -github.com/D3vl0per/crypt/hash/hash.go:283.44,285.4 1 0 -github.com/D3vl0per/crypt/hash/hash.go:286.8,288.17 2 3 -github.com/D3vl0per/crypt/hash/hash.go:288.17,290.4 1 0 -github.com/D3vl0per/crypt/hash/hash.go:293.2,293.44 1 3 -github.com/D3vl0per/crypt/hash/hash.go:293.44,295.3 1 0 -github.com/D3vl0per/crypt/hash/hash.go:297.2,297.27 1 3 -github.com/D3vl0per/crypt/hash/kdf.go:54.54,76.2 5 14 -github.com/D3vl0per/crypt/hash/kdf.go:78.54,79.19 1 7 -github.com/D3vl0per/crypt/hash/kdf.go:79.19,80.24 1 2 -github.com/D3vl0per/crypt/hash/kdf.go:80.24,82.4 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:83.8,86.17 3 5 -github.com/D3vl0per/crypt/hash/kdf.go:86.17,88.4 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:91.2,97.32 6 7 -github.com/D3vl0per/crypt/hash/kdf.go:100.76,102.16 2 7 -github.com/D3vl0per/crypt/hash/kdf.go:102.16,104.3 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:106.2,107.16 2 7 -github.com/D3vl0per/crypt/hash/kdf.go:107.16,109.3 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:111.2,112.16 2 7 -github.com/D3vl0per/crypt/hash/kdf.go:112.16,114.3 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:116.2,116.23 1 7 -github.com/D3vl0per/crypt/hash/kdf.go:116.23,118.17 2 0 -github.com/D3vl0per/crypt/hash/kdf.go:118.17,120.4 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:121.3,121.32 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:124.2,124.19 1 7 -github.com/D3vl0per/crypt/hash/kdf.go:124.19,126.17 2 0 -github.com/D3vl0per/crypt/hash/kdf.go:126.17,128.4 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:129.3,129.28 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:132.2,132.24 1 7 -github.com/D3vl0per/crypt/hash/kdf.go:132.24,134.17 2 0 -github.com/D3vl0per/crypt/hash/kdf.go:134.17,136.4 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:137.3,137.44 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:137.44,139.4 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:139.9,141.4 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:144.2,144.19 1 7 -github.com/D3vl0per/crypt/hash/kdf.go:144.19,146.3 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:148.2,150.56 2 7 -github.com/D3vl0per/crypt/hash/kdf.go:153.79,160.23 4 14 -github.com/D3vl0per/crypt/hash/kdf.go:160.23,162.3 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:164.2,174.80 2 14 -github.com/D3vl0per/crypt/hash/kdf.go:174.80,176.3 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:178.2,178.110 1 14 -github.com/D3vl0per/crypt/hash/kdf.go:178.110,180.3 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:182.2,182.34 1 14 -github.com/D3vl0per/crypt/hash/kdf.go:182.34,184.3 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:186.2,186.34 1 14 -github.com/D3vl0per/crypt/hash/kdf.go:186.34,188.3 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:190.2,190.24 1 14 -github.com/D3vl0per/crypt/hash/kdf.go:193.50,194.19 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:194.19,195.41 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:195.41,197.4 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:198.8,201.17 3 0 -github.com/D3vl0per/crypt/hash/kdf.go:201.17,203.4 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:206.2,206.23 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:206.23,208.3 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:210.2,214.49 3 0 -github.com/D3vl0per/crypt/hash/kdf.go:214.49,216.3 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:218.2,218.98 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:221.65,223.60 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:223.60,225.3 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:227.2,227.23 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:227.23,229.3 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:231.2,235.49 3 0 -github.com/D3vl0per/crypt/hash/kdf.go:235.49,237.3 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:240.2,241.16 2 0 -github.com/D3vl0per/crypt/hash/kdf.go:241.16,243.3 1 0 -github.com/D3vl0per/crypt/hash/kdf.go:245.2,245.47 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:24.67,28.16 3 16 -github.com/D3vl0per/crypt/aged/age_bind.go:28.16,30.3 1 1 -github.com/D3vl0per/crypt/aged/age_bind.go:32.2,34.45 2 15 -github.com/D3vl0per/crypt/aged/age_bind.go:34.45,35.14 1 31 -github.com/D3vl0per/crypt/aged/age_bind.go:35.14,36.12 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:38.3,38.41 1 31 -github.com/D3vl0per/crypt/aged/age_bind.go:38.41,40.18 2 30 -github.com/D3vl0per/crypt/aged/age_bind.go:40.18,42.5 1 1 -github.com/D3vl0per/crypt/aged/age_bind.go:43.4,43.64 1 29 -github.com/D3vl0per/crypt/aged/age_bind.go:47.2,47.33 1 14 -github.com/D3vl0per/crypt/aged/age_bind.go:47.33,49.3 1 14 -github.com/D3vl0per/crypt/aged/age_bind.go:51.2,51.22 1 14 -github.com/D3vl0per/crypt/aged/age_bind.go:54.48,56.16 2 20 -github.com/D3vl0per/crypt/aged/age_bind.go:56.16,58.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:59.2,59.22 1 20 -github.com/D3vl0per/crypt/aged/age_bind.go:70.57,73.16 2 12 -github.com/D3vl0per/crypt/aged/age_bind.go:73.16,75.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:77.2,79.16 3 12 -github.com/D3vl0per/crypt/aged/age_bind.go:79.16,81.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:83.2,83.42 1 12 -github.com/D3vl0per/crypt/aged/age_bind.go:83.42,85.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:86.2,86.34 1 12 -github.com/D3vl0per/crypt/aged/age_bind.go:86.34,88.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:90.2,90.35 1 12 -github.com/D3vl0per/crypt/aged/age_bind.go:93.57,95.16 2 34 -github.com/D3vl0per/crypt/aged/age_bind.go:95.16,97.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:98.2,99.16 2 34 -github.com/D3vl0per/crypt/aged/age_bind.go:99.16,101.3 1 11 -github.com/D3vl0per/crypt/aged/age_bind.go:102.2,103.43 2 23 -github.com/D3vl0per/crypt/aged/age_bind.go:103.43,105.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:107.2,107.37 1 23 -github.com/D3vl0per/crypt/aged/age_bind.go:110.63,112.16 2 11 -github.com/D3vl0per/crypt/aged/age_bind.go:112.16,114.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:116.2,117.16 2 11 -github.com/D3vl0per/crypt/aged/age_bind.go:117.16,119.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:121.2,123.16 3 11 -github.com/D3vl0per/crypt/aged/age_bind.go:123.16,125.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:127.2,127.16 1 11 -github.com/D3vl0per/crypt/aged/age_bind.go:127.16,129.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:131.2,131.42 1 11 -github.com/D3vl0per/crypt/aged/age_bind.go:131.42,133.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:134.2,134.34 1 11 -github.com/D3vl0per/crypt/aged/age_bind.go:134.34,136.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:138.2,138.35 1 11 -github.com/D3vl0per/crypt/aged/age_bind.go:141.63,143.16 2 11 -github.com/D3vl0per/crypt/aged/age_bind.go:143.16,145.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:147.2,148.16 2 11 -github.com/D3vl0per/crypt/aged/age_bind.go:148.16,150.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:152.2,153.16 2 11 -github.com/D3vl0per/crypt/aged/age_bind.go:153.16,155.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:157.2,158.43 2 11 -github.com/D3vl0per/crypt/aged/age_bind.go:158.43,160.3 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:162.2,162.37 1 11 -github.com/D3vl0per/crypt/aged/age_bind.go:165.54,168.16 2 23 -github.com/D3vl0per/crypt/aged/age_bind.go:168.16,173.17 4 17 -github.com/D3vl0per/crypt/aged/age_bind.go:173.17,175.4 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:177.3,177.39 1 17 -github.com/D3vl0per/crypt/aged/age_bind.go:179.8,181.3 1 6 -github.com/D3vl0per/crypt/aged/age_bind.go:182.2,182.16 1 23 -github.com/D3vl0per/crypt/aged/age_bind.go:185.62,186.16 1 34 -github.com/D3vl0per/crypt/aged/age_bind.go:186.16,188.17 2 25 -github.com/D3vl0per/crypt/aged/age_bind.go:188.17,190.4 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:191.3,191.18 1 25 -github.com/D3vl0per/crypt/aged/age_bind.go:193.2,193.18 1 9 -github.com/D3vl0per/crypt/aged/age_bind.go:196.58,197.19 1 23 -github.com/D3vl0per/crypt/aged/age_bind.go:197.19,199.17 2 12 -github.com/D3vl0per/crypt/aged/age_bind.go:199.17,201.4 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:202.3,202.18 1 12 -github.com/D3vl0per/crypt/aged/age_bind.go:204.2,204.16 1 11 -github.com/D3vl0per/crypt/aged/age_bind.go:207.49,209.19 2 45 -github.com/D3vl0per/crypt/aged/age_bind.go:209.19,212.17 3 24 -github.com/D3vl0per/crypt/aged/age_bind.go:212.17,214.4 1 0 -github.com/D3vl0per/crypt/aged/age_bind.go:215.8,217.3 1 21 -github.com/D3vl0per/crypt/aged/age_bind.go:218.2,218.24 1 45 -github.com/D3vl0per/crypt/aged/age_bind.go:221.45,223.35 2 3 -github.com/D3vl0per/crypt/aged/age_bind.go:223.35,225.3 1 9 -github.com/D3vl0per/crypt/aged/age_bind.go:226.2,226.13 1 3 -github.com/D3vl0per/crypt/aged/age_bind.go:229.52,231.2 1 4 -github.com/D3vl0per/crypt/aged/obf.go:26.62,29.23 2 13 -github.com/D3vl0per/crypt/aged/obf.go:29.23,31.3 1 0 -github.com/D3vl0per/crypt/aged/obf.go:32.2,32.44 1 13 -github.com/D3vl0per/crypt/aged/obf.go:32.44,34.3 1 0 -github.com/D3vl0per/crypt/aged/obf.go:35.2,39.27 4 13 -github.com/D3vl0per/crypt/aged/obf.go:39.27,42.3 2 3337 -github.com/D3vl0per/crypt/aged/obf.go:44.2,45.58 2 13 -github.com/D3vl0per/crypt/aged/obf.go:48.64,50.23 2 25 -github.com/D3vl0per/crypt/aged/obf.go:50.23,52.3 1 0 -github.com/D3vl0per/crypt/aged/obf.go:53.2,53.45 1 25 -github.com/D3vl0per/crypt/aged/obf.go:53.45,55.3 1 0 -github.com/D3vl0per/crypt/aged/obf.go:56.2,57.32 2 25 -github.com/D3vl0per/crypt/aged/obf.go:57.32,59.3 1 0 -github.com/D3vl0per/crypt/aged/obf.go:61.2,64.54 3 25 -github.com/D3vl0per/crypt/aged/obf.go:64.54,67.3 2 7693 -github.com/D3vl0per/crypt/aged/obf.go:69.2,69.52 1 25 -github.com/D3vl0per/crypt/aged/stream.go:37.60,38.20 1 16 -github.com/D3vl0per/crypt/aged/stream.go:38.20,40.3 1 0 -github.com/D3vl0per/crypt/aged/stream.go:42.2,43.16 2 16 -github.com/D3vl0per/crypt/aged/stream.go:43.16,45.3 1 0 -github.com/D3vl0per/crypt/aged/stream.go:46.2,49.8 1 16 -github.com/D3vl0per/crypt/aged/stream.go:52.46,53.23 1 620 -github.com/D3vl0per/crypt/aged/stream.go:53.23,57.3 3 604 -github.com/D3vl0per/crypt/aged/stream.go:58.2,58.18 1 16 -github.com/D3vl0per/crypt/aged/stream.go:58.18,60.3 1 0 -github.com/D3vl0per/crypt/aged/stream.go:61.2,61.17 1 16 -github.com/D3vl0per/crypt/aged/stream.go:61.17,63.3 1 0 -github.com/D3vl0per/crypt/aged/stream.go:65.2,66.16 2 16 -github.com/D3vl0per/crypt/aged/stream.go:66.16,69.3 2 0 -github.com/D3vl0per/crypt/aged/stream.go:71.2,74.10 3 16 -github.com/D3vl0per/crypt/aged/stream.go:74.10,79.56 1 12 -github.com/D3vl0per/crypt/aged/stream.go:79.56,81.4 1 0 -github.com/D3vl0per/crypt/aged/stream.go:81.9,81.27 1 12 -github.com/D3vl0per/crypt/aged/stream.go:81.27,83.4 1 0 -github.com/D3vl0per/crypt/aged/stream.go:83.9,85.4 1 12 -github.com/D3vl0per/crypt/aged/stream.go:88.2,88.15 1 16 -github.com/D3vl0per/crypt/aged/stream.go:94.53,95.24 1 16 -github.com/D3vl0per/crypt/aged/stream.go:95.24,96.70 1 0 -github.com/D3vl0per/crypt/aged/stream.go:99.2,101.9 3 16 -github.com/D3vl0per/crypt/aged/stream.go:102.30,104.36 1 0 -github.com/D3vl0per/crypt/aged/stream.go:105.43,108.52 1 8 -github.com/D3vl0per/crypt/aged/stream.go:108.52,110.4 1 0 -github.com/D3vl0per/crypt/aged/stream.go:111.3,113.29 3 8 -github.com/D3vl0per/crypt/aged/stream.go:114.18,115.20 1 0 -github.com/D3vl0per/crypt/aged/stream.go:118.2,120.25 3 16 -github.com/D3vl0per/crypt/aged/stream.go:120.25,125.3 3 4 -github.com/D3vl0per/crypt/aged/stream.go:126.2,126.16 1 16 -github.com/D3vl0per/crypt/aged/stream.go:126.16,128.3 1 0 -github.com/D3vl0per/crypt/aged/stream.go:130.2,132.18 3 16 -github.com/D3vl0per/crypt/aged/stream.go:135.57,136.39 1 36 -github.com/D3vl0per/crypt/aged/stream.go:136.39,138.20 2 36 -github.com/D3vl0per/crypt/aged/stream.go:138.20,139.9 1 36 -github.com/D3vl0per/crypt/aged/stream.go:140.9,140.20 1 0 -github.com/D3vl0per/crypt/aged/stream.go:140.20,142.49 1 0 -github.com/D3vl0per/crypt/aged/stream.go:147.65,149.2 1 28 -github.com/D3vl0per/crypt/aged/stream.go:151.65,153.2 1 8 -github.com/D3vl0per/crypt/aged/stream.go:164.60,165.20 1 16 -github.com/D3vl0per/crypt/aged/stream.go:165.20,167.3 1 0 -github.com/D3vl0per/crypt/aged/stream.go:168.2,169.16 2 16 -github.com/D3vl0per/crypt/aged/stream.go:169.16,171.3 1 0 -github.com/D3vl0per/crypt/aged/stream.go:172.2,177.15 3 16 -github.com/D3vl0per/crypt/aged/stream.go:180.53,183.18 1 1236 -github.com/D3vl0per/crypt/aged/stream.go:183.18,185.3 1 0 -github.com/D3vl0per/crypt/aged/stream.go:186.2,186.17 1 1236 -github.com/D3vl0per/crypt/aged/stream.go:186.17,188.3 1 618 -github.com/D3vl0per/crypt/aged/stream.go:190.2,191.17 2 618 -github.com/D3vl0per/crypt/aged/stream.go:191.17,197.50 5 622 -github.com/D3vl0per/crypt/aged/stream.go:197.50,198.53 1 4 -github.com/D3vl0per/crypt/aged/stream.go:198.53,201.5 2 0 -github.com/D3vl0per/crypt/aged/stream.go:204.2,204.19 1 618 -github.com/D3vl0per/crypt/aged/stream.go:208.32,209.18 1 16 -github.com/D3vl0per/crypt/aged/stream.go:209.18,211.3 1 0 -github.com/D3vl0per/crypt/aged/stream.go:213.2,214.18 2 16 -github.com/D3vl0per/crypt/aged/stream.go:214.18,216.3 1 0 -github.com/D3vl0per/crypt/aged/stream.go:218.2,219.12 2 16 -github.com/D3vl0per/crypt/aged/stream.go:227.46,228.44 1 20 -github.com/D3vl0per/crypt/aged/stream.go:228.44,229.67 1 0 -github.com/D3vl0per/crypt/aged/stream.go:232.2,232.10 1 20 -github.com/D3vl0per/crypt/aged/stream.go:232.10,234.3 1 16 -github.com/D3vl0per/crypt/aged/stream.go:235.2,239.12 5 20 From afa691d4a557c13da0e18751dc8b81b0c69fc857 Mon Sep 17 00:00:00 2001 From: D3v Date: Wed, 22 Nov 2023 14:54:20 +0100 Subject: [PATCH 12/12] ECDH Curve25519 implementation, README modifications --- README.md | 27 ++++--- asymmetric/ecdh.go | 41 +++++++++++ asymmetric/ecdh_test.go | 73 +++++++++++++++++++ asymmetric/{asymmetric.go => ecdsa.go} | 5 -- .../{asymmetric_test.go => ecdsa_test.go} | 0 go.sum | 10 --- 6 files changed, 131 insertions(+), 25 deletions(-) create mode 100644 asymmetric/ecdh.go create mode 100644 asymmetric/ecdh_test.go rename asymmetric/{asymmetric.go => ecdsa.go} (98%) rename asymmetric/{asymmetric_test.go => ecdsa_test.go} (100%) diff --git a/README.md b/README.md index 8fd2f00..d949cdd 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,9 @@ # Go-Crypt (!!! WIP !!!) -High-level API binding to low-level crypto APIs in golang +This project is a comprehensive toolkit for developers who need to implement various cryptographic operations in their Go applications -Crypto suite: +## Crypto suite: - Generic - (Secure) Overwrite - (Secure) Delete @@ -24,9 +24,11 @@ Crypto suite: - XOR - AES-GCM - Asymmetric - - ed25519 - - ed448 - - x25519 (pending) + - ECDSA + - ed25519 + - ed448 + - ECDH + - Curve25519 - Hash - Blake2b-256 - Blake2b-384 @@ -34,12 +36,12 @@ Crypto suite: - SHA3-256 - SHA3-384 - SHA3-512 - - SHAKE-128 (pending) - - SHAKE-256 (pending) - - go_simhash (pending) + - SHAKE-128 (planed) + - SHAKE-256 (planed) + - go_simhash (planed) - Argon2id - - Scrypt (pending) - - HKDF (pending) + - Scrypt (planed) + - HKDF (planed) - Compression - flate - gzip @@ -49,3 +51,8 @@ Crypto suite: - Age encryption suite - Age header obfuscation v1 +## Disclaimer + +This project includes cryptographic operations that have not been independently audited. While every effort has been made to ensure the correctness and security of these operations, they are provided "as is". The author cannot guarantee their security and cannot be held responsible for any consequences arising from their use. If you use these package in your own projects, you do so at your own risk. + +It is strongly recommended that you seek an independent security review if you plan to use them in a production environment. \ No newline at end of file diff --git a/asymmetric/ecdh.go b/asymmetric/ecdh.go new file mode 100644 index 0000000..cc50d9a --- /dev/null +++ b/asymmetric/ecdh.go @@ -0,0 +1,41 @@ +package asymmetric + +import ( + "github.com/D3vl0per/crypt/generic" + "golang.org/x/crypto/curve25519" +) + +type DH interface { + GenerateKeypair() error + GenerateSharedSecret([]byte) ([]byte, error) +} + +type Curve25519 struct { + PublicKey []byte + SecretKey []byte +} + +func (c *Curve25519) GenerateKeypair() error { + secretKey, err := generic.CSPRNG(32) + if err != nil { + return err + } + c.SecretKey = secretKey + + publicKey, err := curve25519.X25519(secretKey, curve25519.Basepoint) + if err != nil { + return err + } + + c.PublicKey = publicKey + return nil +} + +func (c *Curve25519) GenerateSharedSecret(recipientPublicKey []byte) ([]byte, error) { + sharedSecret, err := curve25519.X25519(c.SecretKey, recipientPublicKey) + if err != nil { + return nil, err + } + + return sharedSecret, nil +} diff --git a/asymmetric/ecdh_test.go b/asymmetric/ecdh_test.go new file mode 100644 index 0000000..e12d327 --- /dev/null +++ b/asymmetric/ecdh_test.go @@ -0,0 +1,73 @@ +package asymmetric_test + +import ( + "testing" + + "github.com/D3vl0per/crypt/asymmetric" + "github.com/D3vl0per/crypt/generic" + r "github.com/stretchr/testify/require" + "golang.org/x/crypto/curve25519" +) + +func TestGenerateSharedSecret(t *testing.T) { + alice := asymmetric.Curve25519{} + hex := generic.Hex{} + err := alice.GenerateKeypair() + r.NoError(t, err) + t.Log("Secret Key (Alice):", hex.Encode(alice.SecretKey)) + t.Log("Public Key (Alice):", hex.Encode(alice.PublicKey)) + r.Len(t, alice.SecretKey, 32) + r.Len(t, alice.PublicKey, 32) + + bob := asymmetric.Curve25519{} + err = bob.GenerateKeypair() + r.NoError(t, err) + t.Log("Secret Key (Bob):", hex.Encode(bob.SecretKey)) + t.Log("Public Key (Bob):", hex.Encode(bob.PublicKey)) + r.Len(t, bob.SecretKey, 32) + r.Len(t, bob.PublicKey, 32) + + r.NotEqual(t, alice.PublicKey, bob.PublicKey) + r.NotEqual(t, alice.SecretKey, bob.SecretKey) + + aliceSharedSecret, err := alice.GenerateSharedSecret(bob.PublicKey) + r.NoError(t, err) + t.Log("Shared Secret (Alice):", hex.Encode(aliceSharedSecret)) + r.Len(t, aliceSharedSecret, 32) + + bobSharedSecret, err := bob.GenerateSharedSecret(alice.PublicKey) + r.NoError(t, err) + t.Log("Shared Secret (Bob):", hex.Encode(bobSharedSecret)) + r.Len(t, bobSharedSecret, 32) + + r.Equal(t, aliceSharedSecret, bobSharedSecret) +} + +func TestOriginalCurve25519(t *testing.T) { + hex := generic.Hex{} + aliceSecretKey, err := generic.CSPRNG(32) + r.NoError(t, err) + t.Log("Secret key (alice):", hex.Encode(aliceSecretKey)) + + alicePublicKey, err := curve25519.X25519(aliceSecretKey, curve25519.Basepoint) + r.NoError(t, err) + t.Log("Public key (alice):", hex.Encode(alicePublicKey)) + + bobSecretKey, err := generic.CSPRNG(32) + r.NoError(t, err) + t.Log("Secret key (bob):", hex.Encode(bobSecretKey)) + + bobPublicKey, err := curve25519.X25519(bobSecretKey, curve25519.Basepoint) + r.NoError(t, err) + t.Log("Public key (bob):", hex.Encode(bobPublicKey)) + + aliceSharedKey, err := curve25519.X25519(aliceSecretKey, bobPublicKey) + r.NoError(t, err) + t.Log("Shared key (alice):", hex.Encode(aliceSharedKey)) + + bobSharedKey, err := curve25519.X25519(bobSecretKey, alicePublicKey) + r.NoError(t, err) + t.Log("Shared key (bob):", hex.Encode(bobSharedKey)) + + r.Equal(t, aliceSharedKey, bobSharedKey) +} diff --git a/asymmetric/asymmetric.go b/asymmetric/ecdsa.go similarity index 98% rename from asymmetric/asymmetric.go rename to asymmetric/ecdsa.go index 6077f3d..9b45005 100644 --- a/asymmetric/asymmetric.go +++ b/asymmetric/ecdsa.go @@ -14,11 +14,6 @@ import ( /// Ed25519 Suite /// -type Encryption interface { - Encrypt() - Decrypt() -} - type Signing interface { Generate() error GenerateFromSeed([]byte) error diff --git a/asymmetric/asymmetric_test.go b/asymmetric/ecdsa_test.go similarity index 100% rename from asymmetric/asymmetric_test.go rename to asymmetric/ecdsa_test.go diff --git a/go.sum b/go.sum index d51f52f..9f0ddaa 100644 --- a/go.sum +++ b/go.sum @@ -2,21 +2,12 @@ filippo.io/age v1.1.1 h1:pIpO7l151hCnQ4BdyBujnGP2YlUo0uj6sAVNHGBvXHg= filippo.io/age v1.1.1/go.mod h1:l03SrzDUrBkdBx8+IILdnn2KZysqQdbEBUQ4p3sqEQE= github.com/cloudflare/circl v1.3.6 h1:/xbKIqSHbZXHwkhbrhrt2YOHIwYJlXH94E3tI/gDlUg= github.com/cloudflare/circl v1.3.6/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= -github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= @@ -25,6 +16,5 @@ golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=