diff --git a/internal/data/app.go b/internal/data/app.go index 90cac6e..1a54a84 100644 --- a/internal/data/app.go +++ b/internal/data/app.go @@ -18,13 +18,13 @@ type appRepoImpl struct { curd biz.CURD } -func NewAppRepoImpl(data *Data,curd biz.CURD, local *LayeredCache, cfg *config.Config) biz.AppRepo { - return &appRepoImpl{data: data,curd: curd, local: local, cfg: cfg} +func NewAppRepoImpl(data *Data, curd biz.CURD, local *LayeredCache, cfg *config.Config) biz.AppRepo { + return &appRepoImpl{data: data, curd: curd, local: local, cfg: cfg} } func (r *appRepoImpl) Add(ctx context.Context, apps *api.Apps) error { - if err := r.curd.Add(ctx, apps,false); err != nil { + if err := r.curd.Add(ctx, apps, false); err != nil { return fmt.Errorf("add app failed: %w", err) } key := r.cfg.GetAPPAccessKey(apps.AccessKey) @@ -35,8 +35,8 @@ func (r *appRepoImpl) Add(ctx context.Context, apps *api.Apps) error { func (r *appRepoImpl) Get(ctx context.Context, key string) (*api.Apps, error) { app := &api.Apps{} - err := r.curd.Get(ctx, app,false, "access_key = ? or appid=?", key, key) - if err != nil||app.Appid=="" { + err := r.curd.Get(ctx, app, false, "access_key = ? or appid=?", key, key) + if err != nil || app.Appid == "" { return nil, fmt.Errorf("get app failed: %w", err) } return app, err @@ -53,38 +53,71 @@ func (r *appRepoImpl) Del(ctx context.Context, key string) error { return err } _ = r.local.Del(ctx, r.cfg.GetAPPAccessKey(app.AccessKey)) - return r.curd.Del(ctx, app,false) + return r.curd.Del(ctx, app, false) } func (r *appRepoImpl) Patch(ctx context.Context, model *api.Apps) error { - return r.curd.Update(ctx, model,false) + return r.curd.Update(ctx, model, false) } func (r *appRepoImpl) List(ctx context.Context, tags []string, status []api.APPStatus, page, pageSize int32) ([]*api.Apps, error) { apps := make([]*api.Apps, 0) - query:="" - conds:=make([]interface{},0) - if len(tags)>0{ - query="tags in (?)" - conds=append(conds,tags) + query := "" + conds := make([]interface{}, 0) + if len(tags) > 0 { + query = "tags in (?)" + conds = append(conds, tags) } - if len(status)>0{ - if query!=""{ - query+=" and " + if len(status) > 0 { + if query != "" { + query += " and " } - query+="status in (?)" - conds=append(conds,status) + query += "status in (?)" + conds = append(conds, status) } - pagination:= &tiga.Pagination{ - Page: page, + pagination := &tiga.Pagination{ + Page: page, PageSize: pageSize, - Query: query, - Args: conds, + Query: query, + Args: conds, } - err := r.curd.List(ctx,&apps, pagination) + err := r.curd.List(ctx, &apps, pagination) if err != nil { return nil, fmt.Errorf("list app failed: %w", err) - + } return apps, nil } + +func (a *appRepoImpl) GetSecret(ctx context.Context, accessKey string) (string, error) { + cacheKey := a.cfg.GetAPPAccessKey(accessKey) + secretBytes, err := a.local.Get(ctx, cacheKey) + secret := string(secretBytes) + if err != nil { + apps, err := a.Get(ctx, accessKey) + if err != nil { + return "", err + } + secret = apps.Secret + + // _ = a.rdb.Set(ctx, cacheKey, secret, time.Hour*24*3) + _ = a.local.Set(ctx, cacheKey, []byte(secret), time.Hour*24*3) + } + return secret, nil +} +func (a *appRepoImpl) GetAppid(ctx context.Context, accessKey string) (string, error) { + cacheKey := a.cfg.GetAppidKey(accessKey) + secretBytes, err := a.local.Get(ctx, cacheKey) + appid := string(secretBytes) + if err != nil { + apps, err := a.Get(ctx, accessKey) + if err != nil { + return "", err + } + appid = apps.Appid + + // _ = a.rdb.Set(ctx, cacheKey, secret, time.Hour*24*3) + _ = a.local.Set(ctx, cacheKey, []byte(appid), time.Hour*24*3) + } + return appid, nil +} diff --git a/internal/data/app_test.go b/internal/data/app_test.go new file mode 100644 index 0000000..3af2229 --- /dev/null +++ b/internal/data/app_test.go @@ -0,0 +1,186 @@ +package data + +import ( + "context" + "crypto/rand" + "fmt" + "testing" + "time" + + "github.com/agiledragon/gomonkey/v2" + "github.com/begonia-org/begonia" + cfg "github.com/begonia-org/begonia/config" + "github.com/begonia-org/begonia/internal/pkg/config" + "github.com/begonia-org/begonia/internal/pkg/logger" + api "github.com/begonia-org/go-sdk/api/app/v1" + "github.com/cockroachdb/errors" + c "github.com/smartystreets/goconvey/convey" + "github.com/spark-lence/tiga" + "google.golang.org/protobuf/types/known/fieldmaskpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var appid = "" +var accessKey = "" +var secret = "" +var appName = "" + +func generateRandomString(n int) (string, error) { + const lettersAndDigits = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + b := make([]byte, n) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("Failed to generate random string: %w", err) + } + + for i := 0; i < n; i++ { + // 将随机字节转换为lettersAndDigits中的一个有效字符 + b[i] = lettersAndDigits[b[i]%byte(len(lettersAndDigits))] + } + + return string(b), nil +} +func addTest(t *testing.T) { + c.Convey("test app add success", t, func() { + t.Log("add test") + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + repo := NewAppRepo(cfg.ReadConfig(env), logger.Log) + snk, _ := tiga.NewSnowflake(1) + access, _ := generateRandomString(32) + accessKey = access + secret, _ = generateRandomString(62) + appid = snk.GenerateIDString() + appName = fmt.Sprintf("app-%s", time.Now().Format("20060102150405")) + err := repo.Add(context.TODO(), &api.Apps{ + Appid: appid, + AccessKey: access, + Secret: secret, + Status: api.APPStatus_APP_ENABLED, + IsDeleted: false, + Name: appName, + Description: "test", + CreatedAt: timestamppb.New(time.Now()), + UpdatedAt: timestamppb.New(time.Now()), + }) + + c.So(err, c.ShouldBeNil) + cfg := config.NewConfig(cfg.ReadConfig(env)) + cacheKey := cfg.GetAPPAccessKey(access) + + value, err := layered.Get(context.Background(), cacheKey) + c.So(err, c.ShouldBeNil) + c.So(string(value), c.ShouldEqual, secret) + + patch := gomonkey.ApplyFuncReturn((*LayeredCache).Get, nil, errors.New("error")) + + defer patch.Reset() + val, err := repo.GetSecret(context.Background(), access) + c.So(err, c.ShouldBeNil) + c.So(val, c.ShouldEqual, secret) + + }) +} +func getTest(t *testing.T) { + c.Convey("test app get success", t, func() { + t.Log("get test") + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + repo := NewAppRepo(cfg.ReadConfig(env), logger.Log) + app, err := repo.Get(context.TODO(), appid) + c.So(err, c.ShouldBeNil) + c.So(app.Appid, c.ShouldEqual, appid) + app, err = repo.Get(context.TODO(), accessKey) + c.So(err, c.ShouldBeNil) + c.So(app.AccessKey, c.ShouldEqual, accessKey) + _, err = repo.Get(context.TODO(), "123") + c.So(err, c.ShouldNotBeNil) + + }) +} + +func duplicateNameTest(t *testing.T) { + c.Convey("test app add duplicate name", t, func() { + t.Log("add test") + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + repo := NewAppRepo(cfg.ReadConfig(env), logger.Log) + // snk, _ := tiga.NewSnowflake(1) + access, _ := generateRandomString(32) + accessKey = access + secret, _ = generateRandomString(62) + // appid = snk.GenerateIDString() + err := repo.Add(context.TODO(), &api.Apps{ + Appid: appid, + AccessKey: access, + Secret: secret, + Status: api.APPStatus_APP_ENABLED, + IsDeleted: false, + Name: fmt.Sprintf("app-%s", time.Now().Format("20060102150405")), + Description: "test", + CreatedAt: timestamppb.New(time.Now()), + UpdatedAt: timestamppb.New(time.Now()), + }) + + c.So(err, c.ShouldNotBeNil) + c.So(err.Error(), c.ShouldContainSubstring, "Duplicate entry") + }) +} +func patchTest(t *testing.T) { + c.Convey("test app patch success", t, func() { + t.Log("patch test") + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + repo := NewAppRepo(cfg.ReadConfig(env), logger.Log) + err := repo.Patch(context.TODO(), &api.Apps{ + Appid: appid, + AccessKey: accessKey, + Secret: secret, + Status: api.APPStatus_APP_DISABLED, + Name: fmt.Sprintf("app-%s", time.Now().Format("20060102150405")), + Description: "test UPDATE", + CreatedAt: timestamppb.New(time.Now()), + UpdatedAt: timestamppb.New(time.Now()), + UpdateMask: &fieldmaskpb.FieldMask{ + Paths: []string{"status", "description"}, + }, + }) + + c.So(err, c.ShouldBeNil) + t.Log("get test", appid) + updated,err:=repo.Get(context.Background(),appid) + c.So(err,c.ShouldBeNil) + c.So(updated.Status,c.ShouldEqual,api.APPStatus_APP_DISABLED) + c.So(updated.Description,c.ShouldEqual,"test UPDATE") + c.So(updated.Name,c.ShouldEqual,appName) + }) + +} +func delTest(t *testing.T) { + c.Convey("test app delete success", t, func() { + t.Log("delete test") + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + repo := NewAppRepo(cfg.ReadConfig(env), logger.Log) + err := repo.Del(context.TODO(), appid) + c.So(err, c.ShouldBeNil) + _, err = repo.Get(context.TODO(), appid) + c.So(err, c.ShouldNotBeNil) + }) +} +func TestApp(t *testing.T) { + t.Run("add app", addTest) + t.Run("get app", getTest) + t.Run("add duplicate name", duplicateNameTest) + t.Run("patch app", patchTest) + t.Run("del app", delTest) +} diff --git a/internal/data/authz.go b/internal/data/authz.go index 8fb5a8c..b891e57 100644 --- a/internal/data/authz.go +++ b/internal/data/authz.go @@ -5,7 +5,6 @@ import ( "time" "github.com/begonia-org/begonia/internal/biz" - api "github.com/begonia-org/go-sdk/api/user/v1" "github.com/begonia-org/go-sdk/logger" ) @@ -15,41 +14,27 @@ type authzRepo struct { local *LayeredCache } -func NewAuthzRepo(data *Data, log logger.Logger, local *LayeredCache) biz.AuthzRepo { +func NewAuthzRepoImpl(data *Data, log logger.Logger, local *LayeredCache) biz.AuthzRepo { return &authzRepo{data: data, log: log, local: local} } -func (r *authzRepo) ListUsers(ctx context.Context, page, pageSize int32, conds ...interface{}) ([]*api.Users, error) { - users := make([]*api.Users, 0) - if err := r.data.List(&api.Users{}, &users, page, pageSize, conds...); err != nil { - return nil, err - } - return users, nil - -} -func (r *authzRepo) GetUser(ctx context.Context, conds ...interface{}) (*api.Users, error) { - user := &api.Users{} - err := r.data.Get(user, user, conds...) - if err != nil { - return nil, err - } - return user, nil -} -func (r *authzRepo) CreateUsers(ctx context.Context, users []*api.Users) error { - sources := NewSourceTypeArray(users) - return r.data.CreateInBatches(sources) -} -func (t *authzRepo) UpdateUsers(ctx context.Context, models []*api.Users) error { - sources := NewSourceTypeArray(models) - - return t.data.BatchUpdates(ctx,sources) -} +// func (r *authzRepo) ListUsers(ctx context.Context, page, pageSize int32, conds ...interface{}) ([]*api.Users, error) { +// users := make([]*api.Users, 0) +// if err := r.data.List(&api.Users{}, &users, page, pageSize, conds...); err != nil { +// return nil, err +// } +// return users, nil -func (t *authzRepo) DeleteUsers(ctx context.Context, models []*api.Users) error { - sources := NewSourceTypeArray(models) +// } +// func (r *authzRepo) GetUser(ctx context.Context, conds ...interface{}) (*api.Users, error) { +// user := &api.Users{} +// err := r.data.Get(user, user, conds...) +// if err != nil { +// return nil, err +// } +// return user, nil +// } - return t.data.BatchDelete(sources) -} func (t *authzRepo) CacheToken(ctx context.Context, key, token string, exp time.Duration) error { return t.local.Set(ctx, key, []byte(token), exp) } @@ -63,7 +48,6 @@ func (t *authzRepo) GetToken(ctx context.Context, key string) string { } func (t *authzRepo) DelToken(ctx context.Context, key string) error { err := t.local.Del(ctx, key) - // err := t.data.DelCache(ctx, key) return err } func (t *authzRepo) CheckInBlackList(ctx context.Context, token string) (bool, error) { @@ -76,4 +60,3 @@ func (t *authzRepo) PutBlackList(ctx context.Context, token string) error { return t.local.AddToFilter(ctx, key, []byte(token)) } - diff --git a/internal/data/authz_test.go b/internal/data/authz_test.go new file mode 100644 index 0000000..cc0dd2c --- /dev/null +++ b/internal/data/authz_test.go @@ -0,0 +1,104 @@ +package data + +import ( + "context" + "testing" + "time" + + "github.com/begonia-org/begonia" + cfg "github.com/begonia-org/begonia/config" + "github.com/begonia-org/begonia/internal/pkg/logger" + c "github.com/smartystreets/goconvey/convey" + "github.com/spark-lence/tiga" +) +var token="" +func testCacheToken(t *testing.T) { + + c.Convey("test cache token", t, func() { + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + repo := NewAuthzRepo(cfg.ReadConfig(env), logger.Log) + snk, _ := tiga.NewSnowflake(1) + token = snk.GenerateIDString() + err := repo.CacheToken(context.TODO(), "test:token",token, 5 *time.Second) + c.So(err, c.ShouldBeNil) + }) +} +func testGetToken(t *testing.T) { + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + repo := NewAuthzRepo(cfg.ReadConfig(env), logger.Log) + c.Convey("test get token", t, func() { + + tk:=repo.GetToken(context.TODO(), "test:token") + c.So(tk, c.ShouldNotBeEmpty) + c.So(tk,c.ShouldEqual,token) + }) + c.Convey("test token expiration",t,func () { + time.Sleep(7*time.Second) + tk:=repo.GetToken(context.TODO(), "test:token") + c.So(tk, c.ShouldBeEmpty) + }) +} + +func deleteToken(t *testing.T) { + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + repo := NewAuthzRepo(cfg.ReadConfig(env), logger.Log) + c.Convey("test delete exp token", t, func() { + err := repo.DelToken(context.TODO(), "test:token") + c.So(err, c.ShouldBeNil) + }) + c.Convey("test token delete", t, func() { + snk, _ := tiga.NewSnowflake(1) + token = snk.GenerateIDString() + err := repo.CacheToken(context.TODO(), "test:token2",token, 5 *time.Second) + c.So(err, c.ShouldBeNil) + err = repo.DelToken(context.TODO(), "test:token2") + c.So(err, c.ShouldBeNil) + tk:=repo.GetToken(context.TODO(), "test:token2") + c.So(tk, c.ShouldBeEmpty) + }) +} +func testPutBlacklist(t *testing.T) { + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + repo := NewAuthzRepo(cfg.ReadConfig(env), logger.Log) + c.Convey("test put blacklist", t, func() { + err := repo.PutBlackList(context.TODO(), token) + c.So(err, c.ShouldBeNil) + }) +} +func testCheckInBlackList(t *testing.T){ + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + repo := NewAuthzRepo(cfg.ReadConfig(env), logger.Log) + c.Convey("test check in blacklist", t, func() { + + b,err:=repo.CheckInBlackList(context.TODO(), token) + c.So(err, c.ShouldBeNil) + c.So(b,c.ShouldBeTrue) + snk, _ := tiga.NewSnowflake(1) + + b,err=repo.CheckInBlackList(context.TODO(), snk.GenerateIDString()) + c.So(err, c.ShouldBeNil) + c.So(b,c.ShouldBeFalse) + }) +} +func TestAuthzRepo(t *testing.T) { +t.Run("testCacheToken",testCacheToken) +t.Run("testGetToken",testGetToken) +t.Run("deleteToken",deleteToken) +t.Run("testPutBlacklist",testPutBlacklist) +t.Run("testCheckInBlackList",testCheckInBlackList) +} diff --git a/internal/data/cache.go b/internal/data/cache.go index 514aca7..0b35e3e 100644 --- a/internal/data/cache.go +++ b/internal/data/cache.go @@ -21,41 +21,44 @@ type LayeredCache struct { filters glc.LayeredCuckooFilter onceOnStart sync.Once } - -func NewLayeredCache(ctx context.Context, data *Data, config *config.Config, log logger.Logger) *LayeredCache { - - kvWatcher := source.NewWatchOptions([]interface{}{config.GetKeyValuePubsubKey()}) - strategy := glc.CacheReadStrategy(config.GetMultiCacheReadStrategy()) - KvOptions := glc.LayeredBuildOptions{ - RDB: data.rdb.GetClient(), - Strategy: glc.CacheReadStrategy(strategy), - Watcher: kvWatcher, - Channel: config.GetKeyValuePubsubKey(), - Log: log.Logurs(), - KeyPrefix: config.GetKeyValuePrefix(), - } - kv, err := glc.NewKeyValueCache(ctx, KvOptions, 5*100*100) - if err != nil { - panic(err) - - } - filterWatcher := source.NewWatchOptions([]interface{}{config.GetFilterPubsubKey()}) - filterOptions := glc.LayeredBuildOptions{ - RDB: data.rdb.GetClient(), - Strategy: glc.LocalOnly, - Watcher: filterWatcher, - Channel: config.GetFilterPubsubKey(), - Log: log.Logurs(), - KeyPrefix: config.GetFilterPrefix(), - } - filter := glc.NewLayeredCuckoo(&filterOptions, gocuckoo.CuckooBuildOptions{ - Entries: 100000, - BucketSize: 4, - MaxIterations: 20, - Expansion: 2, +var layered *LayeredCache + +func NewLayeredCache(data *Data, config *config.Config, log logger.Logger) *LayeredCache { + onceLayered.Do(func() { + kvWatcher := source.NewWatchOptions([]interface{}{config.GetKeyValuePubsubKey()}) + strategy := glc.CacheReadStrategy(config.GetMultiCacheReadStrategy()) + KvOptions := glc.LayeredBuildOptions{ + RDB: data.rdb.GetClient(), + Strategy: glc.CacheReadStrategy(strategy), + Watcher: kvWatcher, + Channel: config.GetKeyValuePubsubKey(), + Log: log.Logurs(), + KeyPrefix: config.GetKeyValuePrefix(), + } + kv, err := glc.NewKeyValueCache(context.Background(), KvOptions, 5*100*100) + if err != nil { + panic(err) + + } + filterWatcher := source.NewWatchOptions([]interface{}{config.GetFilterPubsubKey()}) + filterOptions := glc.LayeredBuildOptions{ + RDB: data.rdb.GetClient(), + Strategy: glc.LocalOnly, + Watcher: filterWatcher, + Channel: config.GetFilterPubsubKey(), + Log: log.Logurs(), + KeyPrefix: config.GetFilterPrefix(), + } + filter := glc.NewLayeredCuckoo(&filterOptions, gocuckoo.CuckooBuildOptions{ + Entries: 100000, + BucketSize: 4, + MaxIterations: 20, + Expansion: 2, + }) + layered= &LayeredCache{kv: kv, data: data, config: config, log: log, mux: sync.Mutex{}, onceOnStart: sync.Once{}, filters: filter} }) - local := &LayeredCache{kv: kv, data: data, config: config, log: log, mux: sync.Mutex{}, onceOnStart: sync.Once{}, filters: filter} - return local + return layered + } func (l *LayeredCache) OnStart() { l.onceOnStart.Do(func() { diff --git a/internal/data/curd.go b/internal/data/curd.go index 434a4c5..b7e2537 100644 --- a/internal/data/curd.go +++ b/internal/data/curd.go @@ -67,7 +67,7 @@ func (c *curdImpl) Get(ctx context.Context, model interface{},needDecrypt bool, query = fmt.Sprintf("(%s) and is_deleted=0", query) } - if err := c.db.Find(ctx,model, query, args...); err != nil { + if err := c.db.First(ctx,model, query, args...); err != nil { return fmt.Errorf("get model failed: %w", err) } if needDecrypt { @@ -103,6 +103,7 @@ func (c *curdImpl) Update(ctx context.Context, model biz.Model, needEncrypt bool } } + err = c.db.UpdateSelectColumns(ctx,fmt.Sprintf("%s=%s", key, val), model, paths...) if err != nil { return fmt.Errorf("update model for %s=%v failed: %w", key, val, err) diff --git a/internal/data/data.go b/internal/data/data.go index 7ad9d62..1ba7ddf 100644 --- a/internal/data/data.go +++ b/internal/data/data.go @@ -27,6 +27,7 @@ func GetRDBClient(rdb *tiga.RedisDao) *redis.Client { var onceRDB sync.Once var onceMySQL sync.Once var onceEtcd sync.Once +var onceLayered sync.Once var rdb *tiga.RedisDao var mysql *tiga.MySQLDao var etcd *tiga.EtcdDao @@ -61,9 +62,9 @@ var ProviderSet = wire.NewSet(NewMySQL, NewCurdImpl, NewLayeredCache, - NewAuthzRepo, + NewAuthzRepoImpl, NewUserRepoImpl, - NewFileRepoImpl, + // NewFileRepoImpl, NewEndpointRepoImpl, NewAppRepoImpl, NewDataOperatorRepo) diff --git a/internal/data/endpoint.go b/internal/data/endpoint.go index fc428d7..73587b5 100644 --- a/internal/data/endpoint.go +++ b/internal/data/endpoint.go @@ -11,6 +11,7 @@ import ( "github.com/begonia-org/begonia/internal/pkg/config" api "github.com/begonia-org/go-sdk/api/endpoint/v1" clientv3 "go.etcd.io/etcd/client/v3" + "golang.org/x/exp/slices" ) type endpointRepoImpl struct { @@ -22,10 +23,6 @@ func NewEndpointRepoImpl(data *Data, cfg *config.Config) gateway.EndpointRepo { return &endpointRepoImpl{data: data, cfg: cfg} } -func (r *endpointRepoImpl) AddEndpoint(ctx context.Context, endpoints []*api.Endpoints) error { - sources := NewSourceTypeArray(endpoints) - return r.data.CreateInBatches(sources) -} func (r *endpointRepoImpl) Get(ctx context.Context, key string) (string, error) { return r.data.etcd.GetString(ctx, key) @@ -33,8 +30,6 @@ func (r *endpointRepoImpl) Get(ctx context.Context, key string) (string, error) func (e *endpointRepoImpl) Del(ctx context.Context, id string) error { srvKey := e.cfg.GetServiceKey(id) ops := make([]clientv3.Op, 0) - // details := getDetailsKey(e.cfg, id) - // ops = append(ops, clientv3.OpDelete(details)) ops = append(ops, clientv3.OpDelete(srvKey)) kvs, err := e.data.etcd.GetWithPrefix(ctx, filepath.Join(e.cfg.GetEndpointsPrefix(), "tags")) if err != nil { @@ -67,7 +62,7 @@ func (e *endpointRepoImpl) Put(ctx context.Context, endpoint *api.Endpoints) err ops = append(ops, clientv3.OpPut(srvKey, string(details))) ok, err := e.data.PutEtcdWithTxn(ctx, ops) if err != nil { - log.Printf("put endpoint fail: %v", err) + log.Printf("put endpoint fail: %s", err.Error()) return fmt.Errorf("put endpoint fail: %w", err) } if !ok { @@ -121,7 +116,40 @@ func (e *endpointRepoImpl) List(ctx context.Context, keys []string) ([]*api.Endp } return endpoints, nil } +func (e *endpointRepoImpl) patchTags(oldTags []interface{}, newTags []interface{}, id string) []clientv3.Op { + ops := make([]clientv3.Op, 0) + for _, tag := range oldTags { + if val, ok := tag.(string); ok { + // Del old tags if not in new tags + if !slices.Contains(newTags, tag) { + tagKey := e.cfg.GetTagsKey(val, id) + ops = append(ops, clientv3.OpDelete(tagKey)) + } + } + } + for _, tag := range newTags { + if val, ok := tag.(string); ok { + tagKey := e.cfg.GetTagsKey(val, id) + ops = append(ops, clientv3.OpPut(tagKey, e.cfg.GetServiceKey(id))) + } + + } + return ops +} +func (e *endpointRepoImpl) getTags(v interface{}) ([]interface{}, error) { + tags := make([]interface{}, 0) + if val, ok := v.([]interface{}); ok { + tags = val + } else if val, ok := v.([]string); ok { + for _, tag := range val { + tags = append(tags, tag) + } + } else { + return nil, fmt.Errorf("tags type error") + } + return tags, nil +} func (e *endpointRepoImpl) Patch(ctx context.Context, id string, patch map[string]interface{}) error { origin, err := e.Get(ctx, e.cfg.GetServiceKey(id)) if err != nil { @@ -137,20 +165,19 @@ func (e *endpointRepoImpl) Patch(ctx context.Context, id string, patch map[strin } ops := make([]clientv3.Op, 0) // 更新tags - if tags, ok := patch["tags"]; ok { - // 先删除原有tags - if originTags := originConfig["tags"]; originTags != nil { - for _, tag := range originTags.([]string) { - tagKey := e.cfg.GetTagsKey(tag, id) - ops = append(ops, clientv3.OpDelete(tagKey)) - } + if tags, ok := patch["tags"]; ok && tags != nil { + oldTags,err := e.getTags(originConfig["tags"]) + if err!=nil{ + return fmt.Errorf("get old tags error: %w", err) } - // 添加新tags - for _, tag := range tags.([]string) { - tagKey := e.cfg.GetTagsKey(tag, id) - ops = append(ops, clientv3.OpPut(tagKey, e.cfg.GetServiceKey(id))) + newTags,err := e.getTags(tags) + if err!=nil{ + return fmt.Errorf("get new tags error: %w", err) } + + ops = append(ops, e.patchTags(oldTags, newTags, id)...) + } for k, v := range patch { originConfig[k] = v @@ -185,26 +212,24 @@ func (e *endpointRepoImpl) PutTags(ctx context.Context, id string, tags []string } ops := make([]clientv3.Op, 0) - filters := make(map[string]bool) - - // 先删除原有tags + srvKey := e.cfg.GetServiceKey(id) + oldTags := make([]interface{}, 0) + newTags := make([]interface{}, 0) for _, tag := range endpoint.Tags { - if _, ok := filters[tag]; ok { - continue - } - filters[tag] = true - tagKey := e.cfg.GetTagsKey(tag, id) - ops = append(ops, clientv3.OpDelete(tagKey)) + oldTags = append(oldTags, tag) } - srvKey := e.cfg.GetServiceKey(id) for _, tag := range tags { - if _, ok := filters[tag]; ok { - continue - } - filters[tag] = true - tagKey := e.cfg.GetTagsKey(tag, id) - ops = append(ops, clientv3.OpPut(tagKey, srvKey)) + newTags = append(newTags, tag) + } + ops = append(ops, e.patchTags(oldTags, newTags, id)...) + endpoint.Tags = tags + updated, err := json.Marshal(endpoint) + if err != nil { + return fmt.Errorf("marshal endpoint fail when update tags: %w", err) + } + ops = append(ops, clientv3.OpPut(srvKey, string(updated))) + ok, err := e.data.PutEtcdWithTxn(ctx, ops) if err != nil { return fmt.Errorf("put tags fail: %w", err) diff --git a/internal/data/endpoint_test.go b/internal/data/endpoint_test.go new file mode 100644 index 0000000..11fecc5 --- /dev/null +++ b/internal/data/endpoint_test.go @@ -0,0 +1,216 @@ +package data + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/begonia-org/begonia" + cfg "github.com/begonia-org/begonia/config" + "github.com/begonia-org/begonia/internal/pkg/config" + "github.com/begonia-org/begonia/internal/pkg/logger" + goloadbalancer "github.com/begonia-org/go-loadbalancer" + api "github.com/begonia-org/go-sdk/api/endpoint/v1" + c "github.com/smartystreets/goconvey/convey" + "github.com/spark-lence/tiga" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var endpointId = "" +var tag = fmt.Sprintf("test-%s", time.Now().Format("20060102150405")) +var tag3 = fmt.Sprintf("test3-%s", time.Now().Format("20060102150405")) +func putTest(t *testing.T) { + c.Convey("test app add success", t, func() { + t.Log("add test") + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + _, filename, _, _ := runtime.Caller(0) + pbFile := filepath.Join(filepath.Dir(filepath.Dir(filename)), "integration", "testdata", "helloworld.pb") + pb, _ := os.ReadFile(pbFile) + conf := cfg.ReadConfig(env) + repo := NewEndpointRepo(conf, logger.Log) + snk, _ := tiga.NewSnowflake(1) + endpointId = snk.GenerateIDString() + err := repo.Put(context.Background(), &api.Endpoints{ + Key: endpointId, + DescriptorSet: pb, + Name: "test", + ServiceName: "test", + Description: "test", + Balance: string(goloadbalancer.RRBalanceType), + Endpoints: []*api.EndpointMeta{ + { + Addr: "127.0.0.1:21213", + Weight: 0, + }, + { + Addr: "127.0.0.1:21214", + Weight: 0, + }, + { + Addr: "127.0.0.1:21215", + Weight: 0, + }, + }, + Tags: []string{tag,tag3}, + Version: fmt.Sprintf("%d", time.Now().UnixMilli()), + CreatedAt: timestamppb.New(time.Now()).AsTime().Format(time.RFC3339), + UpdatedAt: timestamppb.New(time.Now()).AsTime().Format(time.RFC3339), + }) + c.So(err, c.ShouldBeNil) + }) +} +func getEndpointTest(t *testing.T) { + c.Convey("test endpoint get success", t, func() { + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + conf := cfg.ReadConfig(env) + repo := NewEndpointRepo(conf, logger.Log) + cnf := config.NewConfig(conf) + endpointKey := cnf.GetServiceKey(endpointId) + data, err := repo.Get(context.Background(), endpointKey) + c.So(err, c.ShouldBeNil) + c.So(data, c.ShouldNotBeEmpty) + added := &api.Endpoints{} + err = json.Unmarshal([]byte(data), added) + c.So(err, c.ShouldBeNil) + c.So(added.Key, c.ShouldEqual, endpointId) + + }) +} + +func getKeysByTagsTest(t *testing.T) { + c.Convey("test get keys by tags success", t, func() { + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + conf := cfg.ReadConfig(env) + repo := NewEndpointRepo(conf, logger.Log) + keys, err := repo.GetKeysByTags(context.Background(), []string{tag}) + c.So(err, c.ShouldBeNil) + c.So(keys, c.ShouldNotBeEmpty) + cnf := config.NewConfig(conf) + endpointKey := cnf.GetServiceKey(endpointId) + c.So(keys, c.ShouldContain, endpointKey) + }) +} +func patchEndpointTest(t *testing.T) { + c.Convey("test endpoint patch success", t, func() { + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + conf := cfg.ReadConfig(env) + repo := NewEndpointRepo(conf, logger.Log) + cnf := config.NewConfig(conf) + endpointKey := cnf.GetServiceKey(endpointId) + tag1:=fmt.Sprintf("test-patch-%s", time.Now().Format("20060102150405")) + err := repo.Patch(context.Background(), endpointId, map[string]interface{}{ + "description": "test description", + "balance": string(goloadbalancer.WRRBalanceType), + "tags": []string{tag1,tag3}, + }) + c.So(err, c.ShouldBeNil) + + data, err := repo.Get(context.Background(), endpointKey) + c.So(err, c.ShouldBeNil) + c.So(data, c.ShouldNotBeEmpty) + updated := &api.Endpoints{} + err = json.Unmarshal([]byte(data), updated) + c.So(err, c.ShouldBeNil) + c.So(updated.Description, c.ShouldEqual, "test description") + c.So(updated.Balance, c.ShouldEqual, string(goloadbalancer.WRRBalanceType)) + c.So(updated.Name, c.ShouldEqual, "test") + // Tags should be updated + keys, err := repo.GetKeysByTags(context.Background(), []string{tag}) + c.So(err, c.ShouldBeNil) + c.So(keys, c.ShouldBeEmpty) + keys, err = repo.GetKeysByTags(context.Background(), []string{tag1}) + c.So(err, c.ShouldBeNil) + c.So(keys, c.ShouldContain, endpointKey) + + }) +} +func delEndpointTest(t *testing.T){ + c.Convey("test endpoint delete", t, func() { + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + conf := cfg.ReadConfig(env) + repo := NewEndpointRepo(conf, logger.Log) + cnf := config.NewConfig(conf) + endpointKey := cnf.GetServiceKey(endpointId) + err := repo.Del(context.Background(), endpointId) + c.So(err, c.ShouldBeNil) + data, err := repo.Get(context.Background(), endpointKey) + // t.Logf("err:%v", err) + c.So(err, c.ShouldBeNil) + c.So(data, c.ShouldBeEmpty) + + // Tags should be del + keys, err := repo.GetKeysByTags(context.Background(), []string{tag3}) + c.So(err, c.ShouldBeNil) + c.So(keys, c.ShouldBeEmpty) + // c.So(keys, c.ShouldContain, endpointKey) + + }) +} +func putTagsTest(t *testing.T){ + c.Convey("test endpoint add tags", t, func() { + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + conf := cfg.ReadConfig(env) + repo := NewEndpointRepo(conf, logger.Log) + cnf := config.NewConfig(conf) + endpointKey := cnf.GetServiceKey(endpointId) + tag1:=fmt.Sprintf("test1-%s", time.Now().Format("20060102150405")) + tag2:=fmt.Sprintf("test2-%s", time.Now().Format("20060102150405")) + err := repo.PutTags(context.Background(), endpointId, []string{tag1,tag2,tag3}) + c.So(err, c.ShouldBeNil) + + data, err := repo.Get(context.Background(), endpointKey) + c.So(err, c.ShouldBeNil) + c.So(data, c.ShouldNotBeEmpty) + updated := &api.Endpoints{} + err = json.Unmarshal([]byte(data), updated) + c.So(err, c.ShouldBeNil) + + // Tags should be updated + c.So(updated.Tags, c.ShouldContain, tag1) + c.So(updated.Tags, c.ShouldContain, tag2) + c.So(updated.Tags, c.ShouldContain, tag3) + c.So(updated.Tags, c.ShouldNotContain, tag) + + // Tags will be covered + keys, err := repo.GetKeysByTags(context.Background(), []string{tag}) + c.So(err, c.ShouldBeNil) + c.So(keys, c.ShouldBeEmpty) + keys, err = repo.GetKeysByTags(context.Background(), []string{tag1}) + c.So(err, c.ShouldBeNil) + c.So(keys, c.ShouldContain, endpointKey) + + }) +} + + +func TestEndpoint(t *testing.T) { + t.Run("put", putTest) + t.Run("get", getEndpointTest) + t.Run("getKeysByTags", getKeysByTagsTest) + t.Run("patch", patchEndpointTest) + t.Run("putTags", putTagsTest) + t.Run("del", delEndpointTest) +} diff --git a/internal/data/file.go b/internal/data/file.go index 956b875..632d637 100644 --- a/internal/data/file.go +++ b/internal/data/file.go @@ -1,42 +1,42 @@ package data -import ( - "context" - - "github.com/begonia-org/begonia/internal/biz/file" - common "github.com/begonia-org/go-sdk/common/api/v1" -) - -type fileRepoImpl struct { - data *Data -} - -func NewFileRepoImpl(data *Data) file.FileRepo { - return &fileRepoImpl{data: data} -} - -// mysql -// AddFile(ctx context.Context, files []*common.Files) error -// DeleteFile(ctx context.Context, files []*common.Files) error -// UpdateFile(ctx context.Context, files []*common.Files) error -// GetFile(ctx context.Context, uri string) (*common.Files, error) -// ListFile(ctx context.Context, name []string) ([]*common.Files, error) -func (r *fileRepoImpl) UploadFile(ctx context.Context, files []*common.Files) error { - return nil -} - -func (r *fileRepoImpl) DeleteFile(ctx context.Context, files []*common.Files) error { - return nil -} - -func (r *fileRepoImpl) UpdateFile(ctx context.Context, files []*common.Files) error { - return nil -} - -func (r *fileRepoImpl) GetFile(ctx context.Context, uri string) (*common.Files, error) { - return nil, nil -} - -func (r *fileRepoImpl) ListFile(ctx context.Context, name []string) ([]*common.Files, error) { - return nil, nil -} +// import ( +// "context" + +// "github.com/begonia-org/begonia/internal/biz/file" +// common "github.com/begonia-org/go-sdk/common/api/v1" +// ) + +// type fileRepoImpl struct { +// data *Data +// } + +// func NewFileRepoImpl(data *Data) file.FileRepo { +// return &fileRepoImpl{data: data} +// } + +// // mysql +// // AddFile(ctx context.Context, files []*common.Files) error +// // DeleteFile(ctx context.Context, files []*common.Files) error +// // UpdateFile(ctx context.Context, files []*common.Files) error +// // GetFile(ctx context.Context, uri string) (*common.Files, error) +// // ListFile(ctx context.Context, name []string) ([]*common.Files, error) +// func (r *fileRepoImpl) UploadFile(ctx context.Context, files []*common.Files) error { +// return nil +// } + +// func (r *fileRepoImpl) DeleteFile(ctx context.Context, files []*common.Files) error { +// return nil +// } + +// func (r *fileRepoImpl) UpdateFile(ctx context.Context, files []*common.Files) error { +// return nil +// } + +// func (r *fileRepoImpl) GetFile(ctx context.Context, uri string) (*common.Files, error) { +// return nil, nil +// } + +// func (r *fileRepoImpl) ListFile(ctx context.Context, name []string) ([]*common.Files, error) { +// return nil, nil +// } diff --git a/internal/data/operator.go b/internal/data/operator.go index 64566a4..baaccc8 100644 --- a/internal/data/operator.go +++ b/internal/data/operator.go @@ -38,10 +38,6 @@ func (r *dataOperatorRepo) Lock(ctx context.Context, key string, exp time.Durati return NewDataLock(r.data.rdb.GetClient(), key, exp, 3), nil } -// DistributedUnlock 释放锁 -// func (r *dataOperatorRepo) DistributedUnlock(ctx context.Context, lock *redislock.Lock) error { -// return lock.Release(ctx) -// } // GetAllForbiddenUsers 获取所有被禁用的用户 func (r *dataOperatorRepo) GetAllForbiddenUsers(ctx context.Context) ([]*api.Users, error) { @@ -50,7 +46,6 @@ func (r *dataOperatorRepo) GetAllForbiddenUsers(ctx context.Context) ([]*api.Use page := int32(1) for { user, err := r.user.List(ctx, nil, []api.USER_STATUS{api.USER_STATUS_LOCKED, api.USER_STATUS_DELETED}, page, 100) - // users, err := r.user.ListUsers(ctx, "status in (?,?)", api.USER_STATUS_LOCKED, api.USER_STATUS_DELETED) if err != nil { if strings.Contains(err.Error(), gorm.ErrRecordNotFound.Error()) { break @@ -164,9 +159,6 @@ func (r *dataOperatorRepo) PullBloom(ctx context.Context, key string) []byte { return r.data.rdb.GetBytes(ctx, key) } -// func (d *dataOperatorRepo) LoadLocalBloom(ctx context.Context, keys []*golayeredbloom.BloomConfig) error { -// return d.local.filters.LoadFrom(ctx, keys) -// } func (d *dataOperatorRepo) LastUpdated(ctx context.Context, key string) (time.Time, error) { // return d.data.rdb.SetBytes(ctx, keys, exp) diff --git a/internal/data/operator_test.go b/internal/data/operator_test.go new file mode 100644 index 0000000..cc817ea --- /dev/null +++ b/internal/data/operator_test.go @@ -0,0 +1,3 @@ +package data + + diff --git a/internal/data/user.go b/internal/data/user.go index bb6467b..fe8d9ce 100644 --- a/internal/data/user.go +++ b/internal/data/user.go @@ -31,7 +31,7 @@ func (r *userRepoImpl) Add(ctx context.Context, user *api.Users) error { func (r *userRepoImpl) Get(ctx context.Context, key string) (*api.Users, error) { app := &api.Users{} - err := r.curd.Get(ctx, app, true, "uid = ?", key) + err := r.curd.Get(ctx, app, true, "uid= ? or phone=? or email=? or name=?", key,key,key,key) if err != nil||app.Uid=="" { return nil, fmt.Errorf("get user failed: %w", err) } diff --git a/internal/data/user_test.go b/internal/data/user_test.go new file mode 100644 index 0000000..cd832c6 --- /dev/null +++ b/internal/data/user_test.go @@ -0,0 +1,211 @@ +package data + +import ( + "context" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/begonia-org/begonia" + cfg "github.com/begonia-org/begonia/config" + "github.com/begonia-org/begonia/internal/pkg/logger" + api "github.com/begonia-org/go-sdk/api/user/v1" + c "github.com/smartystreets/goconvey/convey" + "github.com/spark-lence/tiga" + "google.golang.org/protobuf/types/known/fieldmaskpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var uid = "" +var uid2 = "" +var uid3 = "" +var user1 = fmt.Sprintf("user1-%s", time.Now().Format("20060102150405")) +var user2 = fmt.Sprintf("user2-%s", time.Now().Format("20060102150405")) + +// var user3 = fmt.Sprintf("user3-%s", time.Now().Format("20060102150405")) +func testAddUser(t *testing.T) { + c.Convey("test user add success", t, func() { + t.Log("add test") + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + repo := NewUserRepo(cfg.ReadConfig(env), logger.Log) + snk, _ := tiga.NewSnowflake(1) + uid = snk.GenerateIDString() + err := repo.Add(context.TODO(), &api.Users{ + Uid: uid, + Name: user1, + Dept: "dev", + Email: fmt.Sprintf("%s%s@example.com", uid, time.Now().Format("20060102150405")), + Phone: time.Now().Format("20060102150405"), + Role: api.Role_ADMIN, + Avatar: "https://www.example.com/avatar.jpg", + Owner: "test-user-01", + CreatedAt: timestamppb.Now(), + UpdatedAt: timestamppb.Now(), + Status: api.USER_STATUS_ACTIVE, + }) + c.So(err, c.ShouldBeNil) + time.Sleep(2 * time.Second) + uid2 = snk.GenerateIDString() + err = repo.Add(context.TODO(), &api.Users{ + Uid: uid2, + Name: user2, + Dept: "dev", + Email: fmt.Sprintf("user2%s@example.com", time.Now().Format("20060102150405")), + Phone: time.Now().Format("20060102150405"), + Role: api.Role_ADMIN, + Avatar: "https://www.example.com/avatar.jpg", + Owner: "test-user-01", + CreatedAt: timestamppb.Now(), + UpdatedAt: timestamppb.Now(), + Status: api.USER_STATUS_ACTIVE, + }) + c.So(err, c.ShouldBeNil) + uid3 = snk.GenerateIDString() + err = repo.Add(context.TODO(), &api.Users{ + Uid: uid3, + Name: user2, + Dept: "dev", + Email: fmt.Sprintf("user2%s@example.com", time.Now().Format("20060102150405")), + Phone: time.Now().Format("20060102150405"), + Role: api.Role_ADMIN, + Avatar: "https://www.example.com/avatar.jpg", + Owner: "test-user-01", + CreatedAt: timestamppb.Now(), + UpdatedAt: timestamppb.Now(), + Status: api.USER_STATUS_ACTIVE, + }) + c.So(err, c.ShouldNotBeNil) + c.So(err.Error(), c.ShouldContainSubstring, "Duplicate entry") + + }) +} +func testGetUser(t *testing.T) { + c.Convey("test user get success", t, func() { + t.Log("get test") + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + repo := NewUserRepo(cfg.ReadConfig(env), logger.Log) + user, err := repo.Get(context.TODO(), uid) + t.Logf("user phone:%v", user.Phone) + c.So(err, c.ShouldBeNil) + c.So(user, c.ShouldNotBeNil) + c.So(user.Uid, c.ShouldEqual, uid) + c.So(user.Name, c.ShouldEqual, user1) + }) +} + +func testUpdateUser(t *testing.T) { + c.Convey("test user update success", t, func() { + t.Log("update test") + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + repo := NewUserRepo(cfg.ReadConfig(env), logger.Log) + user, err := repo.Get(context.TODO(), uid) + c.So(err, c.ShouldBeNil) + c.So(user, c.ShouldNotBeNil) + oldPhone := user.Phone + oldName := user.Name + lastedUpdateAt := user.UpdatedAt + createdAt := user.CreatedAt + user.Name = fmt.Sprintf("user-update-%s", time.Now().Format("20060102150405")) + time.Sleep(1 * time.Second) + user.Phone = time.Now().Format("20060102150405") + user.UpdateMask = &fieldmaskpb.FieldMask{Paths: []string{"name", "phone"}} + err = repo.Patch(context.TODO(), user) + + c.So(err, c.ShouldBeNil) + updated, err := repo.Get(context.TODO(), uid) + c.So(err, c.ShouldBeNil) + c.So(updated, c.ShouldNotBeNil) + c.So(updated.Uid, c.ShouldEqual, uid) + c.So(updated.Name, c.ShouldNotEqual, oldName) + c.So(updated.Phone, c.ShouldNotEqual, oldPhone) + c.So(updated.UpdatedAt.Seconds, c.ShouldBeGreaterThanOrEqualTo, lastedUpdateAt.Seconds) + c.So(updated.CreatedAt.Seconds, c.ShouldEqual, createdAt.Seconds) + updated.Uid = uid2 + updated.ID = 0 + updated.Name = fmt.Sprintf("user4-update-%s", time.Now().Format("20060102150405")) + updated.UpdateMask = &fieldmaskpb.FieldMask{Paths: []string{"name", "phone", "email"}} + err = repo.Patch(context.TODO(), updated) + c.So(err, c.ShouldNotBeNil) + + c.So(err.Error(), c.ShouldContainSubstring, "Duplicate entry") + + }) +} + +func testDelUser(t *testing.T) { + c.Convey("test user delete success", t, func() { + t.Log("delete test") + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + repo := NewUserRepo(cfg.ReadConfig(env), logger.Log) + err := repo.Del(context.TODO(), uid) + c.So(err, c.ShouldBeNil) + _, err = repo.Get(context.TODO(), uid) + c.So(err, c.ShouldNotBeNil) + c.So(err.Error(), c.ShouldContainSubstring, "record not found") + }) +} + +func testListUser(t *testing.T) { + c.Convey("test user list success", t, func() { + t.Log("list test") + env := "dev" + if begonia.Env != "" { + env = begonia.Env + } + repo := NewUserRepo(cfg.ReadConfig(env), logger.Log) + // users, err := repo.List(context.TODO(), &api.ListUserReq{ + // Name: user2, + // }) + snk, _ := tiga.NewSnowflake(1) + rand.Seed(time.Now().UnixNano()) + status := []api.USER_STATUS{api.USER_STATUS_ACTIVE, api.USER_STATUS_INACTIVE, api.USER_STATUS_LOCKED} + depts:=[3]string{"dev","test","prd"} + for i := 0; i < 20; i++ { + err:=repo.Add(context.TODO(), &api.Users{ + Uid: snk.GenerateIDString(), + Name: fmt.Sprintf("user-%d-%s@example.com", i, time.Now().Format("20060102150405")), + Dept: depts[rand.Intn(len(depts))], + Email: fmt.Sprintf("user-%d-%s@example.com", i, time.Now().Format("20060102150405")), + Phone: fmt.Sprintf("%d%s", i, time.Now().Format("20060102150405")), + Role: api.Role_ADMIN, + Avatar: "https://www.example.com/avatar.jpg", + Owner: "test-user-01", + CreatedAt: timestamppb.Now(), + UpdatedAt: timestamppb.Now(), + Status: status[rand.Intn(len(status))], + }) + if err!=nil{ + t.Errorf("add user error:%v",err) + } + } + users1, err := repo.List(context.TODO(), []string{"dev","test"},[]api.USER_STATUS{api.USER_STATUS_ACTIVE,api.USER_STATUS_INACTIVE}, 1, 5) + c.So(err, c.ShouldBeNil) + c.So(users1, c.ShouldNotBeEmpty) + users2, err := repo.List(context.TODO(), []string{"dev","test"},[]api.USER_STATUS{api.USER_STATUS_ACTIVE,api.USER_STATUS_INACTIVE}, 2, 5) + c.So(err, c.ShouldBeNil) + c.So(users2, c.ShouldNotBeEmpty) + c.So(users1[0].Uid, c.ShouldNotEqual, users2[0].Uid) + c.So(users1[4].Uid, c.ShouldBeLessThan, users2[0].Uid) + + }) +} +func TestUser(t *testing.T) { + t.Run("testAddUser", testAddUser) + t.Run("testGetUser", testGetUser) + t.Run("testUpdateUser", testUpdateUser) + t.Run("testDelUser", testDelUser) + t.Run("testListUser", testListUser) +} diff --git a/internal/data/wire.go b/internal/data/wire.go new file mode 100644 index 0000000..f24a05c --- /dev/null +++ b/internal/data/wire.go @@ -0,0 +1,38 @@ +//go:build wireinject +// +build wireinject + +package data + +import ( + "github.com/begonia-org/begonia/internal/biz" + "github.com/begonia-org/begonia/internal/biz/gateway" + "github.com/begonia-org/begonia/internal/pkg/config" + "github.com/begonia-org/go-sdk/logger" + "github.com/google/wire" + "github.com/spark-lence/tiga" +) + +func NewAppRepo(cfg *tiga.Configuration,log logger.Logger) biz.AppRepo { + panic(wire.Build(ProviderSet, config.NewConfig)) + // return &appRepoImpl{data: data, curd: curd, local: local, cfg: cfg} +} +// func NewFileRepo(cfg *tiga.Configuration,log logger.Logger) file.FileRepo { +// panic(wire.Build(ProviderSet)) +// } +func NewEndpointRepo(cfg *tiga.Configuration,log logger.Logger) gateway.EndpointRepo { + panic(wire.Build(ProviderSet, config.NewConfig)) +} +func NewAuthzRepo(cfg *tiga.Configuration,log logger.Logger) biz.AuthzRepo { + panic(wire.Build(ProviderSet, config.NewConfig)) +} +func NewUserRepo(cfg *tiga.Configuration,log logger.Logger) biz.UserRepo { + panic(wire.Build(ProviderSet,config.NewConfig)) +} + +func NewLayered(cfg *tiga.Configuration,log logger.Logger) *LayeredCache { + panic(wire.Build(ProviderSet,config.NewConfig)) +} + +func NewOperator(cfg *tiga.Configuration,log logger.Logger) *LayeredCache { + panic(wire.Build(ProviderSet,config.NewConfig)) +} \ No newline at end of file diff --git a/internal/data/wire_gen.go b/internal/data/wire_gen.go new file mode 100644 index 0000000..881884f --- /dev/null +++ b/internal/data/wire_gen.go @@ -0,0 +1,85 @@ +// Code generated by Wire. DO NOT EDIT. + +//go:generate go run github.com/google/wire/cmd/wire +//go:build !wireinject +// +build !wireinject + +package data + +import ( + "github.com/begonia-org/begonia/internal/biz" + "github.com/begonia-org/begonia/internal/biz/gateway" + "github.com/begonia-org/begonia/internal/pkg/config" + "github.com/begonia-org/go-sdk/logger" + "github.com/spark-lence/tiga" +) + +// Injectors from wire.go: + +func NewAppRepo(cfg *tiga.Configuration, log logger.Logger) biz.AppRepo { + mySQLDao := NewMySQL(cfg) + redisDao := NewRDB(cfg) + etcdDao := NewEtcd(cfg) + data := NewData(mySQLDao, redisDao, etcdDao) + configConfig := config.NewConfig(cfg) + curd := NewCurdImpl(mySQLDao, configConfig) + layeredCache := NewLayeredCache(data, configConfig, log) + appRepo := NewAppRepoImpl(data, curd, layeredCache, configConfig) + return appRepo +} + +// func NewFileRepo(cfg *tiga.Configuration,log logger.Logger) file.FileRepo { +// panic(wire.Build(ProviderSet)) +// } +func NewEndpointRepo(cfg *tiga.Configuration, log logger.Logger) gateway.EndpointRepo { + mySQLDao := NewMySQL(cfg) + redisDao := NewRDB(cfg) + etcdDao := NewEtcd(cfg) + data := NewData(mySQLDao, redisDao, etcdDao) + configConfig := config.NewConfig(cfg) + endpointRepo := NewEndpointRepoImpl(data, configConfig) + return endpointRepo +} + +func NewAuthzRepo(cfg *tiga.Configuration, log logger.Logger) biz.AuthzRepo { + mySQLDao := NewMySQL(cfg) + redisDao := NewRDB(cfg) + etcdDao := NewEtcd(cfg) + data := NewData(mySQLDao, redisDao, etcdDao) + configConfig := config.NewConfig(cfg) + layeredCache := NewLayeredCache(data, configConfig, log) + bizAuthzRepo := NewAuthzRepoImpl(data, log, layeredCache) + return bizAuthzRepo +} + +func NewUserRepo(cfg *tiga.Configuration, log logger.Logger) biz.UserRepo { + mySQLDao := NewMySQL(cfg) + redisDao := NewRDB(cfg) + etcdDao := NewEtcd(cfg) + data := NewData(mySQLDao, redisDao, etcdDao) + configConfig := config.NewConfig(cfg) + layeredCache := NewLayeredCache(data, configConfig, log) + curd := NewCurdImpl(mySQLDao, configConfig) + userRepo := NewUserRepoImpl(data, layeredCache, curd, configConfig) + return userRepo +} + +func NewLayered(cfg *tiga.Configuration, log logger.Logger) *LayeredCache { + mySQLDao := NewMySQL(cfg) + redisDao := NewRDB(cfg) + etcdDao := NewEtcd(cfg) + data := NewData(mySQLDao, redisDao, etcdDao) + configConfig := config.NewConfig(cfg) + layeredCache := NewLayeredCache(data, configConfig, log) + return layeredCache +} + +func NewOperator(cfg *tiga.Configuration, log logger.Logger) *LayeredCache { + mySQLDao := NewMySQL(cfg) + redisDao := NewRDB(cfg) + etcdDao := NewEtcd(cfg) + data := NewData(mySQLDao, redisDao, etcdDao) + configConfig := config.NewConfig(cfg) + layeredCache := NewLayeredCache(data, configConfig, log) + return layeredCache +}