From 4df9aa832f94177c8fe0060e272f81fd81dc17cb Mon Sep 17 00:00:00 2001 From: Ti Chi Robot Date: Thu, 12 Dec 2024 13:11:33 +0800 Subject: [PATCH] br: fix debug decode backupmeta (#56627) (#57891) close pingcap/tidb#56296 --- br/cmd/br/debug.go | 15 ++- br/pkg/metautil/metafile.go | 2 +- br/pkg/utils/BUILD.bazel | 7 +- br/pkg/utils/debug.go | 91 +++++++++++++ br/pkg/utils/debug_test.go | 183 ++++++++++++++++++++++++++ br/pkg/utils/json.go | 89 +++++++++++++ br/pkg/utils/json_test.go | 239 +++++++++++++++++++++++++++++++++- br/tests/br_debug_meta/run.sh | 29 +++-- 8 files changed, 643 insertions(+), 12 deletions(-) create mode 100644 br/pkg/utils/debug.go create mode 100644 br/pkg/utils/debug_test.go diff --git a/br/cmd/br/debug.go b/br/cmd/br/debug.go index abd723f66f9d7..ec763c2ea67e4 100644 --- a/br/cmd/br/debug.go +++ b/br/cmd/br/debug.go @@ -283,6 +283,16 @@ func decodeBackupMetaCommand() *cobra.Command { fieldName, _ := cmd.Flags().GetString("field") if fieldName == "" { + if err := utils.DecodeMetaFile(ctx, s, &cfg.CipherInfo, backupMeta.FileIndex); err != nil { + return errors.Trace(err) + } + if err := utils.DecodeMetaFile(ctx, s, &cfg.CipherInfo, backupMeta.RawRangeIndex); err != nil { + return errors.Trace(err) + } + if err := utils.DecodeMetaFile(ctx, s, &cfg.CipherInfo, backupMeta.SchemaIndex); err != nil { + return errors.Trace(err) + } + // No field flag, write backupmeta to external storage in JSON format. backupMetaJSON, err := utils.MarshalBackupMeta(backupMeta) if err != nil { @@ -292,7 +302,7 @@ func decodeBackupMetaCommand() *cobra.Command { if err != nil { return errors.Trace(err) } - cmd.Printf("backupmeta decoded at %s\n", path.Join(cfg.Storage, metautil.MetaJSONFile)) + cmd.Printf("backupmeta decoded at %s\n", path.Join(s.URI(), metautil.MetaJSONFile)) return nil } @@ -351,6 +361,9 @@ func encodeBackupMetaCommand() *cobra.Command { if err != nil { return errors.Trace(err) } + if backupMetaJSON.Version == metautil.MetaV2 { + return errors.Errorf("encoding backupmeta v2 is unimplemented") + } backupMeta, err := proto.Marshal(backupMetaJSON) if err != nil { return errors.Trace(err) diff --git a/br/pkg/metautil/metafile.go b/br/pkg/metautil/metafile.go index b915da8fa10d6..4cbe1f8b30795 100644 --- a/br/pkg/metautil/metafile.go +++ b/br/pkg/metautil/metafile.go @@ -36,7 +36,7 @@ const ( // MetaFile represents file name MetaFile = "backupmeta" // MetaJSONFile represents backup meta json file name - MetaJSONFile = "backupmeta.json" + MetaJSONFile = "jsons/backupmeta.json" // MaxBatchSize represents the internal channel buffer size of MetaWriter and MetaReader. MaxBatchSize = 1024 diff --git a/br/pkg/utils/BUILD.bazel b/br/pkg/utils/BUILD.bazel index 9b0169dfe06d8..d853e644db315 100644 --- a/br/pkg/utils/BUILD.bazel +++ b/br/pkg/utils/BUILD.bazel @@ -6,6 +6,7 @@ go_library( "backoff.go", "cdc.go", "db.go", + "debug.go", "dyn_pprof_other.go", "dyn_pprof_unix.go", "env.go", @@ -31,6 +32,7 @@ go_library( "//br/pkg/errors", "//br/pkg/logutil", "//br/pkg/metautil", + "//br/pkg/storage", "//pkg/errno", "//pkg/kv", "//pkg/parser/model", @@ -43,6 +45,7 @@ go_library( "//pkg/util/sqlexec", "@com_github_cheggaaa_pb_v3//:pb", "@com_github_docker_go_units//:go-units", + "@com_github_gogo_protobuf//proto", "@com_github_google_uuid//:uuid", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", @@ -76,6 +79,7 @@ go_test( "backoff_test.go", "cdc_test.go", "db_test.go", + "debug_test.go", "env_test.go", "json_test.go", "key_test.go", @@ -91,7 +95,7 @@ go_test( ], embed = [":utils"], flaky = True, - shard_count = 37, + shard_count = 39, deps = [ "//br/pkg/errors", "//br/pkg/metautil", @@ -107,6 +111,7 @@ go_test( "//pkg/types", "//pkg/util/chunk", "//pkg/util/sqlexec", + "@com_github_gogo_protobuf//proto", "@com_github_golang_protobuf//proto", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", diff --git a/br/pkg/utils/debug.go b/br/pkg/utils/debug.go new file mode 100644 index 0000000000000..c584172659191 --- /dev/null +++ b/br/pkg/utils/debug.go @@ -0,0 +1,91 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "bytes" + "context" + "crypto/sha256" + "fmt" + + "github.com/gogo/protobuf/proto" + "github.com/pingcap/errors" + backuppb "github.com/pingcap/kvproto/pkg/brpb" + berrors "github.com/pingcap/tidb/br/pkg/errors" + "github.com/pingcap/tidb/br/pkg/metautil" + "github.com/pingcap/tidb/br/pkg/storage" + "golang.org/x/sync/errgroup" +) + +const ( + // JSONFileFormat represents json file name format + JSONFileFormat = "jsons/%s.json" +) + +// DecodeMetaFile decodes the meta file to json format, it is called by br debug +func DecodeMetaFile( + ctx context.Context, + s storage.ExternalStorage, + cipher *backuppb.CipherInfo, + metaIndex *backuppb.MetaFile, +) error { + if metaIndex == nil { + return nil + } + eg, ectx := errgroup.WithContext(ctx) + workers := NewWorkerPool(8, "download files workers") + for _, node := range metaIndex.MetaFiles { + workers.ApplyOnErrorGroup(eg, func() error { + content, err := s.ReadFile(ectx, node.Name) + if err != nil { + return errors.Trace(err) + } + + decryptContent, err := metautil.Decrypt(content, cipher, node.CipherIv) + if err != nil { + return errors.Trace(err) + } + + checksum := sha256.Sum256(decryptContent) + if !bytes.Equal(node.Sha256, checksum[:]) { + return berrors.ErrInvalidMetaFile.GenWithStackByArgs(fmt.Sprintf( + "checksum mismatch expect %x, got %x", node.Sha256, checksum[:])) + } + + child := &backuppb.MetaFile{} + if err = proto.Unmarshal(decryptContent, child); err != nil { + return errors.Trace(err) + } + + // the max depth of the root metafile is only 1. + // ASSERT: len(child.MetaFiles) == 0 + if len(child.MetaFiles) > 0 { + return errors.Errorf("the metafile has unexpected level: %v", child) + } + + jsonContent, err := MarshalMetaFile(child) + if err != nil { + return errors.Trace(err) + } + + if err := s.WriteFile(ctx, fmt.Sprintf(JSONFileFormat, node.Name), jsonContent); err != nil { + return errors.Trace(err) + } + + return errors.Trace(err) + }) + } + return eg.Wait() +} diff --git a/br/pkg/utils/debug_test.go b/br/pkg/utils/debug_test.go new file mode 100644 index 0000000000000..c879081085bcd --- /dev/null +++ b/br/pkg/utils/debug_test.go @@ -0,0 +1,183 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils_test + +import ( + "context" + "crypto/sha256" + "fmt" + "math/rand" + "testing" + + "github.com/gogo/protobuf/proto" + backuppb "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/tidb/br/pkg/metautil" + "github.com/pingcap/tidb/br/pkg/storage" + "github.com/pingcap/tidb/br/pkg/utils" + "github.com/stretchr/testify/require" +) + +func flushMetaFile( + ctx context.Context, + t *testing.T, + fname string, + metaFile *backuppb.MetaFile, + storage storage.ExternalStorage, + cipher *backuppb.CipherInfo, +) *backuppb.File { + content, err := metaFile.Marshal() + require.NoError(t, err) + + encyptedContent, iv, err := metautil.Encrypt(content, cipher) + require.NoError(t, err) + + err = storage.WriteFile(ctx, fname, encyptedContent) + require.NoError(t, err) + + checksum := sha256.Sum256(content) + file := &backuppb.File{ + Name: fname, + Sha256: checksum[:], + Size_: uint64(len(content)), + CipherIv: iv, + } + + return file +} + +func flushStatsFile( + ctx context.Context, + t *testing.T, + fname string, + statsFile *backuppb.StatsFile, + storage storage.ExternalStorage, + cipher *backuppb.CipherInfo, +) *backuppb.StatsFileIndex { + content, err := proto.Marshal(statsFile) + require.NoError(t, err) + + checksum := sha256.Sum256(content) + sizeOri := uint64(len(content)) + encryptedContent, iv, err := metautil.Encrypt(content, cipher) + require.NoError(t, err) + + err = storage.WriteFile(ctx, fname, encryptedContent) + require.NoError(t, err) + + return &backuppb.StatsFileIndex{ + Name: fname, + Sha256: checksum[:], + SizeEnc: uint64(len(encryptedContent)), + SizeOri: sizeOri, + CipherIv: iv, + InlineData: []byte(fmt.Sprintf("%d", rand.Int())), + } +} + +func TestDecodeMetaFile(t *testing.T) { + ctx := context.Background() + base := t.TempDir() + s, err := storage.NewLocalStorage(base) + require.NoError(t, err) + cipher := &backuppb.CipherInfo{CipherType: 1} + file1 := flushMetaFile(ctx, t, "data", &backuppb.MetaFile{ + DataFiles: []*backuppb.File{ + { + Name: "1.sst", + Sha256: []byte("1.sst"), + StartKey: []byte("start"), + EndKey: []byte("end"), + EndVersion: 1, + Crc64Xor: 1, + TotalKvs: 2, + TotalBytes: 3, + Cf: "write", + CipherIv: []byte("1.sst"), + }, + }, + }, s, cipher) + stats := flushStatsFile(ctx, t, "stats", &backuppb.StatsFile{Blocks: []*backuppb.StatsBlock{ + { + PhysicalId: 1, + JsonTable: []byte("1"), + }, + { + PhysicalId: 2, + JsonTable: []byte("2"), + }, + }}, s, cipher) + metaFile2 := &backuppb.MetaFile{ + Schemas: []*backuppb.Schema{ + { + Db: []byte(`{"db_name":{"L":"test","O":"test"},"id":1,"state":5}`), + Table: []byte(`{"id":2,"state":5}`), + Crc64Xor: 1, + TotalKvs: 2, + TotalBytes: 3, + TiflashReplicas: 4, + Stats: []byte(`{"a":1}`), + StatsIndex: []*backuppb.StatsFileIndex{stats}, + }, + }, + } + file2 := flushMetaFile(ctx, t, "schema", metaFile2, s, cipher) + + { + err = utils.DecodeMetaFile(ctx, s, cipher, &backuppb.MetaFile{MetaFiles: []*backuppb.File{file1}}) + require.NoError(t, err) + content, err := s.ReadFile(ctx, "jsons/data.json") + require.NoError(t, err) + metaFile, err := utils.UnmarshalMetaFile(content) + require.NoError(t, err) + require.Equal(t, 1, len(metaFile.DataFiles)) + require.Equal(t, "1.sst", metaFile.DataFiles[0].Name) + require.Equal(t, []byte("1.sst"), metaFile.DataFiles[0].Sha256) + require.Equal(t, []byte("start"), metaFile.DataFiles[0].StartKey) + require.Equal(t, []byte("end"), metaFile.DataFiles[0].EndKey) + require.Equal(t, uint64(1), metaFile.DataFiles[0].EndVersion) + require.Equal(t, uint64(1), metaFile.DataFiles[0].Crc64Xor) + require.Equal(t, uint64(2), metaFile.DataFiles[0].TotalKvs) + require.Equal(t, uint64(3), metaFile.DataFiles[0].TotalBytes) + require.Equal(t, "write", metaFile.DataFiles[0].Cf) + require.Equal(t, []byte("1.sst"), metaFile.DataFiles[0].CipherIv) + } + + { + err = utils.DecodeMetaFile(ctx, s, cipher, &backuppb.MetaFile{MetaFiles: []*backuppb.File{file2}}) + require.NoError(t, err) + { + content, err := s.ReadFile(ctx, "jsons/schema.json") + require.NoError(t, err) + metaFile, err := utils.UnmarshalMetaFile(content) + require.NoError(t, err) + require.Equal(t, 1, len(metaFile.Schemas)) + require.Equal(t, metaFile2.Schemas[0].Db, metaFile.Schemas[0].Db) + require.Equal(t, metaFile2.Schemas[0].Table, metaFile.Schemas[0].Table) + require.Equal(t, uint64(1), metaFile.Schemas[0].Crc64Xor) + require.Equal(t, uint64(2), metaFile.Schemas[0].TotalKvs) + require.Equal(t, uint64(3), metaFile.Schemas[0].TotalBytes) + require.Equal(t, uint32(4), metaFile.Schemas[0].TiflashReplicas) + require.Equal(t, metaFile2.Schemas[0].Stats, metaFile.Schemas[0].Stats) + statsIndex := metaFile.Schemas[0].StatsIndex + require.Equal(t, 1, len(statsIndex)) + require.Equal(t, stats.Name, statsIndex[0].Name) + require.Equal(t, stats.Sha256, statsIndex[0].Sha256) + require.Equal(t, stats.SizeEnc, statsIndex[0].SizeEnc) + require.Equal(t, stats.SizeOri, statsIndex[0].SizeOri) + require.Equal(t, stats.CipherIv, statsIndex[0].CipherIv) + require.Equal(t, stats.InlineData, statsIndex[0].InlineData) + } + } +} diff --git a/br/pkg/utils/json.go b/br/pkg/utils/json.go index a29725db6b1b3..c866acd4998d8 100644 --- a/br/pkg/utils/json.go +++ b/br/pkg/utils/json.go @@ -28,6 +28,22 @@ func UnmarshalBackupMeta(data []byte) (*backuppb.BackupMeta, error) { return fromJSONBackupMeta(jMeta) } +func MarshalMetaFile(meta *backuppb.MetaFile) ([]byte, error) { + result, err := makeJSONMetaFile(meta) + if err != nil { + return nil, errors.Trace(err) + } + return json.Marshal(result) +} + +func UnmarshalMetaFile(data []byte) (*backuppb.MetaFile, error) { + jMeta := &jsonMetaFile{} + if err := json.Unmarshal(data, jMeta); err != nil { + return nil, errors.Trace(err) + } + return fromJSONMetaFile(jMeta) +} + type jsonValue interface{} type jsonFile struct { @@ -195,3 +211,76 @@ func fromJSONBackupMeta(jMeta *jsonBackupMeta) (*backuppb.BackupMeta, error) { } return meta, nil } + +type jsonMetaFile struct { + DataFiles []*jsonFile `json:"data_files,omitempty"` + Schemas []*jsonSchema `json:"schemas,omitempty"` + RawRanges []*jsonRawRange `json:"raw_ranges,omitempty"` + DDLs []jsonValue `json:"ddls,omitempty"` + + *backuppb.MetaFile +} + +func makeJSONMetaFile(meta *backuppb.MetaFile) (*jsonMetaFile, error) { + result := &jsonMetaFile{ + MetaFile: meta, + } + for _, file := range meta.DataFiles { + result.DataFiles = append(result.DataFiles, makeJSONFile(file)) + } + for _, rawRange := range meta.RawRanges { + result.RawRanges = append(result.RawRanges, makeJSONRawRange(rawRange)) + } + for _, schema := range meta.Schemas { + s, err := makeJSONSchema(schema) + if err != nil { + return nil, errors.Trace(err) + } + result.Schemas = append(result.Schemas, s) + } + for _, ddl := range meta.Ddls { + var d jsonValue + if err := json.Unmarshal(ddl, &d); err != nil { + return nil, errors.Trace(err) + } + result.DDLs = append(result.DDLs, d) + } + return result, nil +} + +func fromJSONMetaFile(jMeta *jsonMetaFile) (*backuppb.MetaFile, error) { + meta := jMeta.MetaFile + if meta == nil { + meta = &backuppb.MetaFile{} + } + + for _, schema := range jMeta.Schemas { + s, err := fromJSONSchema(schema) + if err != nil { + return nil, errors.Trace(err) + } + meta.Schemas = append(meta.Schemas, s) + } + for _, file := range jMeta.DataFiles { + f, err := fromJSONFile(file) + if err != nil { + return nil, errors.Trace(err) + } + meta.DataFiles = append(meta.DataFiles, f) + } + for _, rawRange := range jMeta.RawRanges { + rng, err := fromJSONRawRange(rawRange) + if err != nil { + return nil, errors.Trace(err) + } + meta.RawRanges = append(meta.RawRanges, rng) + } + for _, ddl := range jMeta.DDLs { + d, err := json.Marshal(ddl) + if err != nil { + return nil, errors.Trace(err) + } + meta.Ddls = append(meta.Ddls, d) + } + return meta, nil +} diff --git a/br/pkg/utils/json_test.go b/br/pkg/utils/json_test.go index 3f03f287d92f1..a2a279a381dd1 100644 --- a/br/pkg/utils/json_test.go +++ b/br/pkg/utils/json_test.go @@ -244,7 +244,7 @@ var testMetaJSONs = [][]byte{ }`), } -func TestEncodeAndDecode(t *testing.T) { +func TestEncodeAndDecodeForBackupMeta(t *testing.T) { for _, testMetaJSON := range testMetaJSONs { meta, err := UnmarshalBackupMeta(testMetaJSON) require.NoError(t, err) @@ -253,3 +253,240 @@ func TestEncodeAndDecode(t *testing.T) { require.JSONEq(t, string(testMetaJSON), string(metaJSON)) } } + +var testMetaFileJSONs = [][]byte{ + []byte(`{ + "data_files": [ + { + "sha256": "aa5cefba077644dbb2aa1d7fae2a0f879b56411195ad62d18caaf4ec76fae48f", + "start_key": "7480000000000000365f720000000000000000", + "end_key": "7480000000000000365f72ffffffffffffffff00", + "name": "1_2_29_6e97c3b17c657c4413724f614a619f5b665b990187b159e7d2b92552076144b6_1617351201040_write.sst", + "end_version": 423978913229963260, + "crc64xor": 8093018294706077000, + "total_kvs": 1, + "total_bytes": 27, + "cf": "write", + "size": 1423 + } + ], + "schemas": [ + { + "table": { + "Lock": null, + "ShardRowIDBits": 0, + "auto_id_cache": 0, + "auto_inc_id": 0, + "auto_rand_id": 0, + "auto_random_bits": 0, + "charset": "utf8mb4", + "collate": "utf8mb4_bin", + "cols": [ + { + "change_state_info": null, + "comment": "", + "default": null, + "default_bit": null, + "default_is_expr": false, + "dependences": null, + "generated_expr_string": "", + "generated_stored": false, + "hidden": false, + "id": 1, + "name": { + "L": "pk", + "O": "pk" + }, + "offset": 0, + "origin_default": null, + "origin_default_bit": null, + "state": 5, + "type": { + "charset": "utf8mb4", + "collate": "utf8mb4_bin", + "decimal": 0, + "elems": null, + "flag": 4099, + "flen": 256, + "tp": 15 + }, + "version": 2 + } + ], + "comment": "", + "common_handle_version": 1, + "compression": "", + "constraint_info": null, + "fk_info": null, + "id": 54, + "index_info": [ + { + "comment": "", + "id": 1, + "idx_cols": [ + { + "length": -1, + "name": { + "L": "pk", + "O": "pk" + }, + "offset": 0 + } + ], + "idx_name": { + "L": "primary", + "O": "PRIMARY" + }, + "index_type": 1, + "is_global": false, + "is_invisible": false, + "is_primary": true, + "is_unique": true, + "state": 5, + "tbl_name": { + "L": "", + "O": "" + } + } + ], + "is_columnar": false, + "is_common_handle": true, + "max_col_id": 1, + "max_cst_id": 0, + "max_idx_id": 1, + "max_shard_row_id_bits": 0, + "name": { + "L": "test", + "O": "test" + }, + "partition": null, + "pk_is_handle": false, + "pre_split_regions": 0, + "sequence": null, + "state": 5, + "tiflash_replica": null, + "update_timestamp": 423978913176223740, + "version": 4, + "view": null + }, + "db": { + "charset": "utf8mb4", + "collate": "utf8mb4_bin", + "db_name": { + "L": "test", + "O": "test" + }, + "id": 1, + "state": 5 + }, + "crc64xor": 8093018294706077000, + "total_kvs": 1, + "total_bytes": 27 + } + ], + "ddls": ["ddl1","ddl2"], + "backup_ranges": [{"start_key":"MTIz"}] +} +`), + []byte(`{ + "data_files": [ + { + "sha256": "5759c4c73789d6ecbd771b374d42e72a309245d31911efc8553423303c95f22c", + "end_key": "7480000000000000ff0500000000000000f8", + "name": "1_4_2_default.sst", + "total_kvs": 153, + "total_bytes": 824218, + "cf": "default", + "size": 44931 + }, + { + "sha256": "87597535ce0edbc9a9ef124777ad1d23388467e60c0409309ad33af505c1ea5b", + "start_key": "7480000000000000ff0f00000000000000f8", + "end_key": "7480000000000000ff1100000000000000f8", + "name": "1_16_8_58be9b5dfa92efb6a7de2127c196e03c5ddc3dd8ff3a9b3e7cd4c4aa7c969747_1617689203876_default.sst", + "total_kvs": 1, + "total_bytes": 396, + "cf": "default", + "size": 1350 + }, + { + "sha256": "97bd1b07f9cc218df089c70d454e23c694113fae63a226ae0433165a9c3d75d9", + "start_key": "7480000000000000ff1700000000000000f8", + "end_key": "7480000000000000ff1900000000000000f8", + "name": "1_24_12_72fa67937dd58d654197abadeb9e633d92ebccc5fd993a8e54819a1bd7f81a8c_1617689203853_default.sst", + "total_kvs": 35, + "total_bytes": 761167, + "cf": "default", + "size": 244471 + }, + { + "sha256": "6dcb6ba2ff11f4e7db349effc98210ba372bebbf2470e6cd600ed5f2294330e7", + "start_key": "7480000000000000ff3100000000000000f8", + "end_key": "7480000000000000ff3300000000000000f8", + "name": "1_50_25_2f1abd76c185ec355039f5b4a64f04637af91f80e6cb05099601ec6b9b1910e8_1617689203867_default.sst", + "total_kvs": 22, + "total_bytes": 1438283, + "cf": "default", + "size": 1284851 + }, + { + "sha256": "ba603af7ecb2e21c8f145d995ae85eea3625480cd8186d4cffb53ab1974d8679", + "start_key": "7480000000000000ff385f72ffffffffffffffffff0000000000fb", + "name": "1_2_33_07b745c3d5a614ed6cc1cf21723b161fcb3e8e7d537546839afd82a4f392988c_1617689203895_default.sst", + "total_kvs": 260000, + "total_bytes": 114425025, + "cf": "default", + "size": 66048845 + } + ], + "raw_ranges": [ + { + "cf": "default" + } + ], + "backup_ranges": [{"start_key":"MTIz"}] +}`), + []byte(`{ + "data_files": [ + { + "sha256": "3ae857ef9b379d498ae913434f1d47c3e90a55f3a4cd9074950bfbd163d5e5fc", + "start_key": "7480000000000000115f720000000000000000", + "end_key": "7480000000000000115f72ffffffffffffffff00", + "name": "1_20_9_36adb8cedcd7af34708edff520499e712e2cfdcb202f5707dc9305a031d55a98_1675066275424_write.sst", + "end_version": 439108573623222300, + "crc64xor": 16261462091570213000, + "total_kvs": 15, + "total_bytes": 1679, + "cf": "write", + "size": 2514, + "cipher_iv": "56MTbxA4CaNILpirKnBxUw==" + } + ], + "schemas": [ + { + "db": { + "charset": "utf8mb4", + "collate": "utf8mb4_bin", + "db_name": { + "L": "test", + "O": "test" + }, + "id": 1, + "policy_ref_info": null, + "state": 5 + } + } + ], + "backup_ranges": [{"start_key":"MTIz"}] + }`), +} + +func TestEncodeAndDecodeForMetaFile(t *testing.T) { + for _, testMetaFileJSON := range testMetaFileJSONs { + meta, err := UnmarshalMetaFile(testMetaFileJSON) + require.NoError(t, err) + metaJSON, err := MarshalMetaFile(meta) + require.NoError(t, err) + require.JSONEq(t, string(testMetaFileJSON), string(metaJSON)) + } +} diff --git a/br/tests/br_debug_meta/run.sh b/br/tests/br_debug_meta/run.sh index 9fc05b12cbaf3..4e93a54b1d709 100644 --- a/br/tests/br_debug_meta/run.sh +++ b/br/tests/br_debug_meta/run.sh @@ -32,36 +32,49 @@ run_sql "$table_region_sql" row_count_ori=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') -# backup table +# backup table with backupmetav2 echo "backup start..." run_br --pd $PD_ADDR backup table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB" -run_sql "DROP DATABASE $DB;" - # Test validate decode run_br validate decode -s "local://$TEST_DIR/$DB" # should generate backupmeta.json -if [ ! -f "$TEST_DIR/$DB/backupmeta.json" ]; then +if [ ! -f "$TEST_DIR/$DB/jsons/backupmeta.json" ]; then + echo "TEST: [$TEST_NAME] decode failed!" + exit 1 +fi + +# backup table with backupmetav1 +echo "backup start..." +run_br --pd $PD_ADDR backup table --db $DB --table $TABLE -s "local://$TEST_DIR/${DB}_2" --use-backupmeta-v2=false + + +# Test validate decode +run_br validate decode -s "local://$TEST_DIR/${DB}_2" + +# should generate backupmeta.json +if [ ! -f "$TEST_DIR/${DB}_2/jsons/backupmeta.json" ]; then echo "TEST: [$TEST_NAME] decode failed!" exit 1 fi # Test validate encode -run_br validate encode -s "local://$TEST_DIR/$DB" +run_br validate encode -s "local://$TEST_DIR/${DB}_2" # should generate backupmeta_from_json -if [ ! -f "$TEST_DIR/$DB/backupmeta_from_json" ]; then +if [ ! -f "$TEST_DIR/${DB}_2/backupmeta_from_json" ]; then echo "TEST: [$TEST_NAME] encode failed!" exit 1 fi # replace backupmeta -mv "$TEST_DIR/$DB/backupmeta_from_json" "$TEST_DIR/$DB/backupmeta" +mv "$TEST_DIR/${DB}_2/backupmeta_from_json" "$TEST_DIR/${DB}_2/backupmeta" # restore table echo "restore start..." -run_br --pd $PD_ADDR restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB" +run_sql "DROP DATABASE $DB;" +run_br --pd $PD_ADDR restore table --db $DB --table $TABLE -s "local://$TEST_DIR/${DB}_2" row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}')