-
Notifications
You must be signed in to change notification settings - Fork 30
/
Copy pathmain.go
127 lines (111 loc) · 3.42 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
//go:generate ./hack/update-codegen.sh
package main
import (
"context"
"fmt"
"os"
"github.com/go-logr/zapr"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/buildinfo"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/clusterset"
"github.com/rancher/k3k/pkg/log"
"github.com/urfave/cli"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
var (
scheme = runtime.NewScheme()
clusterCIDR string
sharedAgentImage string
kubeconfig string
debug bool
logger *log.Logger
flags = []cli.Flag{
cli.StringFlag{
Name: "kubeconfig",
EnvVar: "KUBECONFIG",
Usage: "Kubeconfig path",
Destination: &kubeconfig,
},
cli.StringFlag{
Name: "cluster-cidr",
EnvVar: "CLUSTER_CIDR",
Usage: "Cluster CIDR to be added to the networkpolicy of the clustersets",
Destination: &clusterCIDR,
},
cli.StringFlag{
Name: "shared-agent-image",
EnvVar: "SHARED_AGENT_IMAGE",
Usage: "K3K Virtual Kubelet image",
Value: "rancher/k3k:k3k-kubelet-dev",
Destination: &sharedAgentImage,
},
cli.BoolFlag{
Name: "debug",
EnvVar: "DEBUG",
Usage: "Debug level logging",
Destination: &debug,
},
}
)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = v1alpha1.AddToScheme(scheme)
}
func main() {
app := cmds.NewApp()
app.Flags = flags
app.Action = run
app.Version = buildinfo.Version
app.Before = func(clx *cli.Context) error {
logger = log.New(debug)
return nil
}
if err := app.Run(os.Args); err != nil {
logger.Fatalw("failed to run k3k controller", zap.Error(err))
}
}
func run(clx *cli.Context) error {
ctx := context.Background()
logger.Info("Starting k3k - Version: " + buildinfo.Version)
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return fmt.Errorf("failed to create config from kubeconfig file: %v", err)
}
mgr, err := ctrl.NewManager(restConfig, manager.Options{
Scheme: scheme,
})
if err != nil {
return fmt.Errorf("failed to create new controller runtime manager: %v", err)
}
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
logger.Info("adding cluster controller")
if err := cluster.Add(ctx, mgr, sharedAgentImage, logger); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
}
logger.Info("adding etcd pod controller")
if err := cluster.AddPodController(ctx, mgr, logger); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
}
logger.Info("adding clusterset controller")
if err := clusterset.Add(ctx, mgr, clusterCIDR, logger); err != nil {
return fmt.Errorf("failed to add the clusterset controller: %v", err)
}
if clusterCIDR == "" {
logger.Info("adding networkpolicy node controller")
if err := clusterset.AddNodeController(ctx, mgr, logger); err != nil {
return fmt.Errorf("failed to add the clusterset node controller: %v", err)
}
}
if err := mgr.Start(ctx); err != nil {
return fmt.Errorf("failed to start the manager: %v", err)
}
return nil
}