diff --git a/cmd/main.go b/cmd/main.go index 5405d44..0f9950a 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -82,7 +82,7 @@ func prepareLogger(level string, json bool) *logrus.Entry { return log } -func assignAddress(c context.Context, log *logrus.Entry, client kubernetes.Interface, assigner address.Assigner, node *types.Node, cfg *config.Config) error { +func assignAddress(c context.Context, log *logrus.Entry, client kubernetes.Interface, assigner address.Assigner, node *types.Node, cfg *config.Config) (string, error) { ctx, cancel := context.WithCancel(c) defer cancel() @@ -101,22 +101,23 @@ func assignAddress(c context.Context, log *logrus.Entry, client kubernetes.Inter "retry-counter": retryCounter, "retry-attempts": cfg.RetryAttempts, }).Debug("assigning static public IP address to node") - err := func(ctx context.Context) error { + assignedAddress, err := func(ctx context.Context) (string, error) { if err := lock.Lock(ctx); err != nil { - return errors.Wrap(err, "failed to acquire lock") + return "", errors.Wrap(err, "failed to acquire lock") } log.Debug("lock acquired") defer func() { lock.Unlock(ctx) //nolint:errcheck log.Debug("lock released") }() - if err := assigner.Assign(ctx, node.Instance, node.Zone, cfg.Filter, cfg.OrderBy); err != nil { - return err //nolint:wrapcheck + assignedAddress, err := assigner.Assign(ctx, node.Instance, node.Zone, cfg.Filter, cfg.OrderBy) + if err != nil { + return "", err //nolint:wrapcheck } - return nil + return assignedAddress, nil }(c) if err == nil || errors.Is(err, address.ErrStaticIPAlreadyAssigned) { - return nil + return assignedAddress, nil } log.WithError(err).WithFields(logrus.Fields{ @@ -130,7 +131,64 @@ func assignAddress(c context.Context, log *logrus.Entry, client kubernetes.Inter continue case <-ctx.Done(): // If the context is done, return an error indicating that the operation was cancelled - return errors.Wrap(ctx.Err(), "context cancelled while assigning addresses") + return "", errors.Wrap(ctx.Err(), "context cancelled while assigning addresses") + } + } + return "", errors.New("reached maximum number of retries") +} + +func waitForAddressToBeReported(c context.Context, log *logrus.Entry, explorer nd.Explorer, node *types.Node, assignedAddress string, cfg *config.Config) error { + ctx, cancel := context.WithCancel(c) + defer cancel() + + // ticker for retry interval + ticker := time.NewTicker(cfg.RetryInterval) + defer ticker.Stop() + + for retryCounter := 0; retryCounter <= cfg.RetryAttempts; retryCounter++ { + log.WithFields(logrus.Fields{ + "node": node.Name, + "instance": node.Instance, + "address": assignedAddress, + "retry-counter": retryCounter, + "retry-attempts": cfg.RetryAttempts, + }).Debug("Waiting for node to report assigned address") + + nodeInfo, err := explorer.GetNode(ctx, node.Name) + if err == nil { + for _, ip := range nodeInfo.ExternalIPs { + if ip.String() == assignedAddress { + log.WithFields(logrus.Fields{ + "node": node.Name, + "instance": node.Instance, + "address": assignedAddress, + "retry-counter": retryCounter, + "retry-attempts": cfg.RetryAttempts, + }).Info("Node is reporting assigned address") + return nil + } + } + log.WithError(err).WithFields(logrus.Fields{ + "node": node.Name, + "instance": node.Instance, + "address": assignedAddress, + }).Warn("Node is not yet reporting the assigned address") + } else { + log.WithError(err).WithFields(logrus.Fields{ + "node": node.Name, + "instance": node.Instance, + "address": assignedAddress, + }).Error("failed to check if node is reporting the assigned address") + } + + log.Infof("retrying after %v", cfg.RetryInterval) + + select { + case <-ticker.C: + continue + case <-ctx.Done(): + // If the context is done, return an error indicating that the operation was cancelled + return errors.Wrap(ctx.Err(), "context cancelled while waiting for node to report assigned address") } } return errors.New("reached maximum number of retries") @@ -169,12 +227,16 @@ func run(c context.Context, log *logrus.Entry, cfg *config.Config) error { return errors.Wrap(err, "initializing assigner") } - err = assignAddress(ctx, log, clientset, assigner, n, cfg) + assignedAddress, err := assignAddress(ctx, log, clientset, assigner, n, cfg) if err != nil { return errors.Wrap(err, "assigning static public IP address") } if cfg.TaintKey != "" { + if err := waitForAddressToBeReported(ctx, log, explorer, n, assignedAddress, cfg); err != nil { + return errors.Wrap(err, "waiting for node to report assigned address") + } + logger := log.WithField("taint-key", cfg.TaintKey) tainter := nd.NewTainter(clientset) diff --git a/cmd/main_test.go b/cmd/main_test.go index 68439eb..b948dc7 100644 --- a/cmd/main_test.go +++ b/cmd/main_test.go @@ -2,13 +2,16 @@ package main import ( "context" + "net" "testing" "time" "github.com/doitintl/kubeip/internal/address" "github.com/doitintl/kubeip/internal/config" + "github.com/doitintl/kubeip/internal/node" "github.com/doitintl/kubeip/internal/types" mocks "github.com/doitintl/kubeip/mocks/address" + nodeMocks "github.com/doitintl/kubeip/mocks/node" "github.com/pkg/errors" tmock "github.com/stretchr/testify/mock" "k8s.io/client-go/kubernetes/fake" @@ -24,15 +27,17 @@ func Test_assignAddress(t *testing.T) { tests := []struct { name string args args + address string wantErr bool }{ { - name: "assign address successfully", + name: "assign address successfully", + address: "1.1.1.1", args: args{ c: context.Background(), assignerFn: func(t *testing.T) address.Assigner { mock := mocks.NewAssigner(t) - mock.EXPECT().Assign(tmock.Anything, "test-instance", "test-zone", []string{"test-filter"}, "test-order-by").Return(nil) + mock.EXPECT().Assign(tmock.Anything, "test-instance", "test-zone", []string{"test-filter"}, "test-order-by").Return("1.1.1.1", nil) return mock }, node: &types.Node{ @@ -51,14 +56,15 @@ func Test_assignAddress(t *testing.T) { }, }, { - name: "assign address after a few retries", + name: "assign address after a few retries", + address: "1.1.1.1", args: args{ c: context.Background(), assignerFn: func(t *testing.T) address.Assigner { mock := mocks.NewAssigner(t) - mock.EXPECT().Assign(tmock.Anything, "test-instance", "test-zone", []string{"test-filter"}, "test-order-by").Return(errors.New("first error")).Once() - mock.EXPECT().Assign(tmock.Anything, "test-instance", "test-zone", []string{"test-filter"}, "test-order-by").Return(errors.New("second error")).Once() - mock.EXPECT().Assign(tmock.Anything, "test-instance", "test-zone", []string{"test-filter"}, "test-order-by").Return(nil).Once() + mock.EXPECT().Assign(tmock.Anything, "test-instance", "test-zone", []string{"test-filter"}, "test-order-by").Return("", errors.New("first error")).Once() + mock.EXPECT().Assign(tmock.Anything, "test-instance", "test-zone", []string{"test-filter"}, "test-order-by").Return("", errors.New("second error")).Once() + mock.EXPECT().Assign(tmock.Anything, "test-instance", "test-zone", []string{"test-filter"}, "test-order-by").Return("1.1.1.1", nil).Once() return mock }, node: &types.Node{ @@ -82,7 +88,7 @@ func Test_assignAddress(t *testing.T) { c: context.Background(), assignerFn: func(t *testing.T) address.Assigner { mock := mocks.NewAssigner(t) - mock.EXPECT().Assign(tmock.Anything, "test-instance", "test-zone", []string{"test-filter"}, "test-order-by").Return(errors.New("error")).Times(4) + mock.EXPECT().Assign(tmock.Anything, "test-instance", "test-zone", []string{"test-filter"}, "test-order-by").Return("", errors.New("error")).Times(4) return mock }, node: &types.Node{ @@ -115,7 +121,7 @@ func Test_assignAddress(t *testing.T) { }(), assignerFn: func(t *testing.T) address.Assigner { mock := mocks.NewAssigner(t) - mock.EXPECT().Assign(tmock.Anything, "test-instance", "test-zone", []string{"test-filter"}, "test-order-by").Return(errors.New("error")).Maybe() + mock.EXPECT().Assign(tmock.Anything, "test-instance", "test-zone", []string{"test-filter"}, "test-order-by").Return("", errors.New("error")).Maybe() return mock }, node: &types.Node{ @@ -143,7 +149,7 @@ func Test_assignAddress(t *testing.T) { }(), assignerFn: func(t *testing.T) address.Assigner { mock := mocks.NewAssigner(t) - mock.EXPECT().Assign(tmock.Anything, "test-instance", "test-zone", []string{"test-filter"}, "test-order-by").Return(errors.New("error")).Maybe() + mock.EXPECT().Assign(tmock.Anything, "test-instance", "test-zone", []string{"test-filter"}, "test-order-by").Return("", errors.New("error")).Maybe() return mock }, node: &types.Node{ @@ -168,8 +174,208 @@ func Test_assignAddress(t *testing.T) { log := prepareLogger("debug", false) assigner := tt.args.assignerFn(t) client := fake.NewSimpleClientset() - if err := assignAddress(tt.args.c, log, client, assigner, tt.args.node, tt.args.cfg); (err != nil) != tt.wantErr { + assignedAddress, err := assignAddress(tt.args.c, log, client, assigner, tt.args.node, tt.args.cfg) + if err != nil != tt.wantErr { t.Errorf("assignAddress() error = %v, wantErr %v", err, tt.wantErr) + } else if assignedAddress != tt.address { + t.Fatalf("assignAddress() = %v, want %v", assignedAddress, tt.address) + } + }) + } +} + +func Test_waitForAddressToBeReported(t *testing.T) { + type args struct { + c context.Context + explorerFn func(t *testing.T) node.Explorer + node *types.Node + address string + cfg *config.Config + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "address reported with no retries", + args: args{ + c: context.Background(), + address: "1.1.1.1", + explorerFn: func(t *testing.T) node.Explorer { + mock := nodeMocks.NewExplorer(t) + mock.EXPECT().GetNode(tmock.Anything, "test-node").Return( + &types.Node{ + Name: "test-node", + Instance: "test-instance", + Region: "test-region", + Zone: "test-zone", + ExternalIPs: []net.IP{net.IPv4(1, 1, 1, 1)}, + }, + nil, + ) + return mock + }, + node: &types.Node{ + Name: "test-node", + Instance: "test-instance", + Region: "test-region", + Zone: "test-zone", + }, + cfg: &config.Config{ + Filter: []string{"test-filter"}, + OrderBy: "test-order-by", + RetryAttempts: 3, + RetryInterval: time.Millisecond, + LeaseDuration: 1, + }, + }, + }, + { + name: "address reported after a few retries", + args: args{ + c: context.Background(), + address: "1.1.1.1", + explorerFn: func(t *testing.T) node.Explorer { + mock := nodeMocks.NewExplorer(t) + mock.EXPECT().GetNode(tmock.Anything, "test-node").Return(&types.Node{ + Name: "test-node", + Instance: "test-instance", + Region: "test-region", + Zone: "test-zone", + ExternalIPs: []net.IP{net.IPv4(9, 9, 9, 9)}, + }, nil).Times(3) + mock.EXPECT().GetNode(tmock.Anything, "test-node").Return(&types.Node{ + Name: "test-node", + Instance: "test-instance", + Region: "test-region", + Zone: "test-zone", + ExternalIPs: []net.IP{net.IPv4(1, 1, 1, 1)}, + }, nil).Once() + return mock + }, + node: &types.Node{ + Name: "test-node", + Instance: "test-instance", + Region: "test-region", + Zone: "test-zone", + }, + cfg: &config.Config{ + Filter: []string{"test-filter"}, + OrderBy: "test-order-by", + RetryAttempts: 3, + RetryInterval: time.Millisecond, + LeaseDuration: 1, + }, + }, + }, + { + name: "error after a few retries and reached maximum number of retries", + args: args{ + c: context.Background(), + explorerFn: func(t *testing.T) node.Explorer { + mock := nodeMocks.NewExplorer(t) + mock.EXPECT().GetNode(tmock.Anything, "test-node").Return(&types.Node{ + Name: "test-node", + Instance: "test-instance", + Region: "test-region", + Zone: "test-zone", + ExternalIPs: []net.IP{net.IPv4(9, 9, 9, 9)}, + }, nil).Times(4) + mock.EXPECT().GetNode(tmock.Anything, "test-node").Return(&types.Node{ + Name: "test-node", + Instance: "test-instance", + Region: "test-region", + Zone: "test-zone", + ExternalIPs: []net.IP{net.IPv4(1, 1, 1, 1)}, + }, nil).Times(0) + return mock + }, + node: &types.Node{ + Name: "test-node", + Instance: "test-instance", + Region: "test-region", + Zone: "test-zone", + }, + cfg: &config.Config{ + Filter: []string{"test-filter"}, + OrderBy: "test-order-by", + RetryAttempts: 3, + RetryInterval: time.Millisecond, + LeaseDuration: 1, + }, + }, + wantErr: true, + }, + { + name: "context cancelled while waiting for address to be reported", + args: args{ + c: func() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + go func() { + // Simulate a shutdown signal being received after a short delay + time.Sleep(20 * time.Millisecond) + cancel() + }() + return ctx + }(), + explorerFn: func(t *testing.T) node.Explorer { + mock := nodeMocks.NewExplorer(t) + mock.EXPECT().GetNode(tmock.Anything, "test-node").Return(nil, errors.New("error")).Maybe() + return mock + }, + node: &types.Node{ + Name: "test-node", + Instance: "test-instance", + Region: "test-region", + Zone: "test-zone", + }, + cfg: &config.Config{ + Filter: []string{"test-filter"}, + OrderBy: "test-order-by", + RetryAttempts: 10, + RetryInterval: 5 * time.Millisecond, + LeaseDuration: 1, + }, + }, + wantErr: true, + }, + { + name: "error after a few retries and context is done", + args: args{ + c: func() context.Context { + ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond) //nolint:govet + return ctx + }(), + explorerFn: func(t *testing.T) node.Explorer { + mock := nodeMocks.NewExplorer(t) + mock.EXPECT().GetNode(tmock.Anything, "test-node").Return(nil, errors.New("error")).Maybe() + return mock + }, + node: &types.Node{ + Name: "test-node", + Instance: "test-instance", + Region: "test-region", + Zone: "test-zone", + }, + cfg: &config.Config{ + Filter: []string{"test-filter"}, + OrderBy: "test-order-by", + RetryAttempts: 3, + RetryInterval: 15 * time.Millisecond, + LeaseDuration: 1, + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + log := prepareLogger("debug", false) + explorer := tt.args.explorerFn(t) + err := waitForAddressToBeReported(tt.args.c, log, explorer, tt.args.node, tt.args.address, tt.args.cfg) + if err != nil != tt.wantErr { + t.Errorf("waitForAddressToBeReported() error = %v, wantErr %v", err, tt.wantErr) } }) } diff --git a/internal/address/assigner.go b/internal/address/assigner.go index d94cfb7..773111d 100644 --- a/internal/address/assigner.go +++ b/internal/address/assigner.go @@ -16,7 +16,7 @@ var ( ) type Assigner interface { - Assign(ctx context.Context, instanceID, zone string, filter []string, orderBy string) error + Assign(ctx context.Context, instanceID, zone string, filter []string, orderBy string) (string, error) Unassign(ctx context.Context, instanceID, zone string) error } diff --git a/internal/address/aws.go b/internal/address/aws.go index 910abda..0e4098d 100644 --- a/internal/address/aws.go +++ b/internal/address/aws.go @@ -165,32 +165,33 @@ func (a *awsAssigner) forceCheckAddressAssigned(ctx context.Context, allocationI return false, nil } -func (a *awsAssigner) Assign(ctx context.Context, instanceID, _ string, filter []string, orderBy string) error { +func (a *awsAssigner) Assign(ctx context.Context, instanceID, _ string, filter []string, orderBy string) (string, error) { // get elastic IP attached to the instance err := a.checkElasticIPAssigned(ctx, instanceID) if err != nil { - return errors.Wrapf(err, "check if elastic IP is already assigned to instance %s", instanceID) + return "", errors.Wrapf(err, "check if elastic IP is already assigned to instance %s", instanceID) } // get available elastic IPs based on filter and orderBy addresses, err := a.getAvailableElasticIPs(ctx, filter, orderBy) if err != nil { - return errors.Wrap(err, "failed to get available elastic IPs") + return "", errors.Wrap(err, "failed to get available elastic IPs") } // get EC2 instance instance, err := a.instanceGetter.Get(ctx, instanceID, a.region) if err != nil { - return errors.Wrapf(err, "failed to get instance %s", instanceID) + return "", errors.Wrapf(err, "failed to get instance %s", instanceID) } // get primary network interface ID with public IP address (DeviceIndex == 0) networkInterfaceID, err := a.getNetworkInterfaceID(instance) if err != nil { - return errors.Wrapf(err, "failed to get network interface ID for instance %s", instanceID) + return "", errors.Wrapf(err, "failed to get network interface ID for instance %s", instanceID) } // try to assign available addresses until succeeds // due to concurrency, it is possible that another kubeip instance will assign the same address + var assignedAddress string for i := range addresses { a.logger.WithFields(logrus.Fields{ "instance": instanceID, @@ -208,13 +209,14 @@ func (a *awsAssigner) Assign(ctx context.Context, instanceID, _ string, filter [ "address": *addresses[i].PublicIp, "allocation_id": *addresses[i].AllocationId, }).Info("elastic IP assigned to the instance") + assignedAddress = *addresses[i].PublicIp break // break if address assigned successfully } } if err != nil { - return errors.Wrap(err, "failed to assign elastic IP address") + return "", errors.Wrap(err, "failed to assign elastic IP address") } - return nil + return assignedAddress, nil } func (a *awsAssigner) tryAssignAddress(ctx context.Context, address *types.Address, networkInterfaceID, instanceID string) error { diff --git a/internal/address/aws_test.go b/internal/address/aws_test.go index ba2ecb4..1fcbd26 100644 --- a/internal/address/aws_test.go +++ b/internal/address/aws_test.go @@ -431,6 +431,7 @@ func Test_awsAssigner_Assign(t *testing.T) { type fields struct { region string logger *logrus.Entry + address string instanceGetterFn func(t *testing.T, args *args) cloud.Ec2InstanceGetter eipListerFn func(t *testing.T, args *args) cloud.EipLister eipAssignerFn func(t *testing.T, args *args) cloud.EipAssigner @@ -444,8 +445,9 @@ func Test_awsAssigner_Assign(t *testing.T) { { name: "assign EIP to instance", fields: fields{ - region: "us-east-1", - logger: logrus.NewEntry(logrus.New()), + region: "us-east-1", + logger: logrus.NewEntry(logrus.New()), + address: "100.0.0.1", instanceGetterFn: func(t *testing.T, args *args) cloud.Ec2InstanceGetter { mock := mocks.NewEc2InstanceGetter(t) mock.EXPECT().Get(args.ctx, args.instanceID, "us-east-1").Return(&types.Instance{ @@ -584,8 +586,11 @@ func Test_awsAssigner_Assign(t *testing.T) { eipLister: tt.fields.eipListerFn(t, &tt.args), eipAssigner: tt.fields.eipAssignerFn(t, &tt.args), } - if err := a.Assign(tt.args.ctx, tt.args.instanceID, "", tt.args.filter, tt.args.orderBy); (err != nil) != tt.wantErr { + address, err := a.Assign(tt.args.ctx, tt.args.instanceID, "", tt.args.filter, tt.args.orderBy) + if err != nil != tt.wantErr { t.Errorf("Assign() error = %v, wantErr %v", err, tt.wantErr) + } else if address != tt.fields.address { + t.Fatalf("Assign() = %v, want %v", address, tt.fields.address) } }) } diff --git a/internal/address/azure.go b/internal/address/azure.go index 79d870f..6f8889d 100644 --- a/internal/address/azure.go +++ b/internal/address/azure.go @@ -5,8 +5,8 @@ import "context" type azureAssigner struct { } -func (a *azureAssigner) Assign(_ context.Context, _, _ string, _ []string, _ string) error { - return nil +func (a *azureAssigner) Assign(_ context.Context, _, _ string, _ []string, _ string) (string, error) { + return "", nil } func (a *azureAssigner) Unassign(_ context.Context, _, _ string) error { diff --git a/internal/address/gcp.go b/internal/address/gcp.go index 8927ee1..7fdc244 100644 --- a/internal/address/gcp.go +++ b/internal/address/gcp.go @@ -214,20 +214,23 @@ func (a *gcpAssigner) CheckAddressAssigned(region, addressName string) (bool, er return address.Status == inUseStatus, nil } -func (a *gcpAssigner) Assign(ctx context.Context, instanceID, zone string, filter []string, orderBy string) error { +func (a *gcpAssigner) Assign(ctx context.Context, instanceID, zone string, filter []string, orderBy string) (string, error) { // check if instance already has a public static IP address assigned - instance, err := a.checkStaticIPAssigned(zone, instanceID) + instance, address, err := a.checkStaticIPAssigned(zone, instanceID) if err != nil { - return errors.Wrapf(err, "check if static public IP is already assigned to instance %s", instanceID) + if errors.Is(err, ErrStaticIPAlreadyAssigned) { + return address, nil + } + return "", errors.Wrapf(err, "check if static public IP is already assigned to instance %s", instanceID) } // get available reserved public IP addresses addresses, err := a.listAddresses(filter, orderBy, reservedStatus) if err != nil { - return errors.Wrap(err, "failed to list available addresses") + return "", errors.Wrap(err, "failed to list available addresses") } if len(addresses) == 0 { - return errors.Errorf("no available addresses") + return "", errors.Errorf("no available addresses") } // log available addresses IPs ips := make([]string, 0, len(addresses)) @@ -238,51 +241,53 @@ func (a *gcpAssigner) Assign(ctx context.Context, instanceID, zone string, filte // delete current ephemeral public IP address if err = a.DeleteInstanceAddress(ctx, instance, zone); err != nil && !errors.Is(err, ErrNoPublicIPAssigned) { - return errors.Wrap(err, "failed to delete current public IP address") + return "", errors.Wrap(err, "failed to delete current public IP address") } // get instance details again to refresh the network interface fingerprint (required for adding a new ipv6 address) instance, err = a.instanceGetter.Get(a.project, zone, instanceID) if err != nil { - return errors.Wrapf(err, "failed refresh network interface fingerprint for instance %s", instanceID) + return "", errors.Wrapf(err, "failed refresh network interface fingerprint for instance %s", instanceID) } // try to assign all available addresses until one succeeds // due to concurrency, it is possible that another kubeip instance will assign the same address + var assignedAddress string for _, address := range addresses { // check if context is done before trying to assign an address if ctx.Err() != nil { - return errors.Wrap(ctx.Err(), "context cancelled while assigning addresses") + return "", errors.Wrap(ctx.Err(), "context cancelled while assigning addresses") } if err = tryAssignAddress(ctx, a, instance, a.region, zone, address); err != nil { a.logger.WithError(err).WithField("address", address.Address).Error("failed to assign static public IP address") continue } + assignedAddress = address.Address // break the loop after successfully assigning an address break } if err != nil { - return errors.Wrap(err, "failed to assign static public IP address") + return "", errors.Wrap(err, "failed to assign static public IP address") } - return nil + return assignedAddress, nil } -func (a *gcpAssigner) checkStaticIPAssigned(zone, instanceID string) (*compute.Instance, error) { +func (a *gcpAssigner) checkStaticIPAssigned(zone, instanceID string) (*compute.Instance, string, error) { instance, err := a.instanceGetter.Get(a.project, zone, instanceID) if err != nil { - return nil, errors.Wrapf(err, "failed to get instance %s", instanceID) + return nil, "", errors.Wrapf(err, "failed to get instance %s", instanceID) } assigned, err := a.listAddresses(nil, "", inUseStatus) if err != nil { - return nil, errors.Wrap(err, "failed to list assigned addresses") + return nil, "", errors.Wrap(err, "failed to list assigned addresses") } // create a map of users for quick lookup users := a.createUserMap(assigned) // check if the instance's self link is in the list of users - if _, ok := users[instance.SelfLink]; ok { - return nil, ErrStaticIPAlreadyAssigned + if address, ok := users[instance.SelfLink]; ok { + return nil, address, ErrStaticIPAlreadyAssigned } - return instance, nil + return instance, "", nil } func (a *gcpAssigner) listAddresses(filter []string, orderBy, status string) ([]*compute.Address, error) { @@ -396,11 +401,11 @@ func tryAssignAddress(ctx context.Context, as internalAssigner, instance *comput return nil } -func (a *gcpAssigner) createUserMap(assigned []*compute.Address) map[string]struct{} { - users := make(map[string]struct{}) +func (a *gcpAssigner) createUserMap(assigned []*compute.Address) map[string]string { + users := make(map[string]string) for _, address := range assigned { for _, user := range address.Users { - users[user] = struct{}{} + users[user] = address.Address } } return users diff --git a/internal/address/gcp_test.go b/internal/address/gcp_test.go index 424b3d4..b96c752 100644 --- a/internal/address/gcp_test.go +++ b/internal/address/gcp_test.go @@ -329,6 +329,7 @@ func Test_gcpAssigner_Assign(t *testing.T) { instanceGetterFn func(t *testing.T) cloud.InstanceGetter project string region string + address string } type args struct { ctx context.Context @@ -348,6 +349,7 @@ func Test_gcpAssigner_Assign(t *testing.T) { fields: fields{ project: "test-project", region: "test-region", + address: "100.0.0.3", listerFn: func(t *testing.T) cloud.Lister { mock := mocks.NewLister(t) mockCall := mocks.NewListCall(t) @@ -407,6 +409,55 @@ func Test_gcpAssigner_Assign(t *testing.T) { orderBy: "test-order-by", }, }, + { + name: "assign when static IP address already allocted", + fields: fields{ + project: "test-project", + region: "test-region", + address: "100.0.0.2", + listerFn: func(t *testing.T) cloud.Lister { + mock := mocks.NewLister(t) + mockCall := mocks.NewListCall(t) + mock.EXPECT().List("test-project", "test-region").Return(mockCall) + mockCall.EXPECT().Filter("(status=IN_USE) (addressType=EXTERNAL) (ipVersion!=IPV6)").Return(mockCall).Once() + mockCall.EXPECT().Do().Return(&compute.AddressList{ + Items: []*compute.Address{ + {Name: "test-address-1", Status: inUseStatus, Address: "100.0.0.1", NetworkTier: defaultNetworkTier, AddressType: "EXTERNAL", Users: []string{"self-link-test-instance-1"}}, + {Name: "test-address-2", Status: inUseStatus, Address: "100.0.0.2", NetworkTier: defaultNetworkTier, AddressType: "EXTERNAL", Users: []string{"self-link-test-instance-2"}}, + }, + }, nil).Once() + return mock + }, + instanceGetterFn: func(t *testing.T) cloud.InstanceGetter { + mock := mocks.NewInstanceGetter(t) + mock.EXPECT().Get("test-project", "test-zone", "test-instance-0").Return(&compute.Instance{ + Name: "test-instance-0", + Zone: "test-zone", + SelfLink: "self-link-test-instance-2", + NetworkInterfaces: []*compute.NetworkInterface{ + { + Name: "test-network-interface", + AccessConfigs: []*compute.AccessConfig{ + {Name: "test-access-config", NatIP: "200.0.0.1", Type: defaultAccessConfigType, Kind: accessConfigKind}, + }, + Fingerprint: "test-fingerprint", + }, + }, + }, nil) + return mock + }, + addressManagerFn: func(t *testing.T) cloud.AddressManager { + return mocks.NewAddressManager(t) + }, + }, + args: args{ + ctx: context.TODO(), + instanceID: "test-instance-0", + zone: "test-zone", + filter: []string{"test-filter-1", "test-filter-2"}, + orderBy: "test-order-by", + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -419,8 +470,11 @@ func Test_gcpAssigner_Assign(t *testing.T) { region: tt.fields.region, logger: logger, } - if err := a.Assign(tt.args.ctx, tt.args.instanceID, tt.args.zone, tt.args.filter, tt.args.orderBy); (err != nil) != tt.wantErr { + address, err := a.Assign(tt.args.ctx, tt.args.instanceID, tt.args.zone, tt.args.filter, tt.args.orderBy) + if err != nil != tt.wantErr { t.Errorf("Assign() error = %v, wantErr %v", err, tt.wantErr) + } else if address != tt.fields.address { + t.Fatalf("Assign() = %v, want %v", address, tt.fields.address) } }) } diff --git a/mocks/address/Assigner.go b/mocks/address/Assigner.go index 15140f7..ec25015 100644 --- a/mocks/address/Assigner.go +++ b/mocks/address/Assigner.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.30.16. DO NOT EDIT. +// Code generated by mockery v2.35.2. DO NOT EDIT. package mocks @@ -22,17 +22,27 @@ func (_m *Assigner) EXPECT() *Assigner_Expecter { } // Assign provides a mock function with given fields: ctx, instanceID, zone, filter, orderBy -func (_m *Assigner) Assign(ctx context.Context, instanceID string, zone string, filter []string, orderBy string) error { +func (_m *Assigner) Assign(ctx context.Context, instanceID string, zone string, filter []string, orderBy string) (string, error) { ret := _m.Called(ctx, instanceID, zone, filter, orderBy) - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, string, []string, string) error); ok { + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, []string, string) (string, error)); ok { + return rf(ctx, instanceID, zone, filter, orderBy) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, []string, string) string); ok { r0 = rf(ctx, instanceID, zone, filter, orderBy) } else { - r0 = ret.Error(0) + r0 = ret.Get(0).(string) } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, string, string, []string, string) error); ok { + r1 = rf(ctx, instanceID, zone, filter, orderBy) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // Assigner_Assign_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Assign' @@ -57,12 +67,12 @@ func (_c *Assigner_Assign_Call) Run(run func(ctx context.Context, instanceID str return _c } -func (_c *Assigner_Assign_Call) Return(_a0 error) *Assigner_Assign_Call { - _c.Call.Return(_a0) +func (_c *Assigner_Assign_Call) Return(_a0 string, _a1 error) *Assigner_Assign_Call { + _c.Call.Return(_a0, _a1) return _c } -func (_c *Assigner_Assign_Call) RunAndReturn(run func(context.Context, string, string, []string, string) error) *Assigner_Assign_Call { +func (_c *Assigner_Assign_Call) RunAndReturn(run func(context.Context, string, string, []string, string) (string, error)) *Assigner_Assign_Call { _c.Call.Return(run) return _c }