diff --git a/packages/aws/rancher-custom-cluster.yaml b/packages/aws/rancher-custom-cluster.yaml index 2e88dd3..23987f6 100644 --- a/packages/aws/rancher-custom-cluster.yaml +++ b/packages/aws/rancher-custom-cluster.yaml @@ -9,5 +9,8 @@ templates: - rancher-custom-cluster variables: airgap_setup: + - false - true + proxy_setup: - false + - true \ No newline at end of file diff --git a/packages/aws/rancher-proxy.yaml b/packages/aws/rancher-proxy.yaml new file mode 100644 index 0000000..64ec62a --- /dev/null +++ b/packages/aws/rancher-proxy.yaml @@ -0,0 +1,21 @@ +manifest: + name: rancher-proxy + description: rancher-proxy + variables: + server_count: + default: 1 +templates: + - aws/registry_nodes + - aws/cluster_nodes + - proxy-standalone + - rke2 + - rancher-proxy +variables: + cni: + - calico + proxy_setup: + - true + docker_compose_version: + - 2.15.1 + cert_manager_version: + - 1.15.0 \ No newline at end of file diff --git a/templates/aws/cluster_nodes/manifest.yaml b/templates/aws/cluster_nodes/manifest.yaml index 5c2d661..f5b5c7a 100644 --- a/templates/aws/cluster_nodes/manifest.yaml +++ b/templates/aws/cluster_nodes/manifest.yaml @@ -61,6 +61,10 @@ variables: type: boolean description: "Boolean that when set, will create rke setup rather regular cluster nodes." default: false + proxy_setup: + type: boolean + description: "Boolean that when set, will create proxied nodes rather regular cluster nodes." + default: false instance_type: type: string optional: false diff --git a/templates/aws/cluster_nodes/terraform/pools/corral.tf b/templates/aws/cluster_nodes/terraform/pools/corral.tf index a540841..185a504 100644 --- a/templates/aws/cluster_nodes/terraform/pools/corral.tf +++ b/templates/aws/cluster_nodes/terraform/pools/corral.tf @@ -23,3 +23,4 @@ variable "server_count" {} variable "agent_count" {} variable "airgap_setup" {} variable "rke_setup" {} +variable "proxy_setup" {} \ No newline at end of file diff --git a/templates/aws/cluster_nodes/terraform/pools/main.tf b/templates/aws/cluster_nodes/terraform/pools/main.tf index 5df3383..0b85aeb 100644 --- a/templates/aws/cluster_nodes/terraform/pools/main.tf +++ b/templates/aws/cluster_nodes/terraform/pools/main.tf @@ -36,7 +36,7 @@ resource "aws_instance" "server" { key_name = aws_key_pair.corral_key.key_name vpc_security_group_ids = [var.aws_security_group] subnet_id = var.aws_subnet - associate_public_ip_address = var.airgap_setup ? false : true + associate_public_ip_address = var.airgap_setup || var.proxy_setup ? false : true ebs_block_device { device_name = "/dev/sda1" @@ -47,7 +47,7 @@ resource "aws_instance" "server" { } provisioner "remote-exec" { - inline = var.airgap_setup || var.rke_setup ? [ + inline = var.airgap_setup || var.rke_setup || var.proxy_setup ? [ "sudo su < /root/.ssh/authorized_keys", "echo \"${var.corral_private_key}\"", @@ -62,12 +62,12 @@ resource "aws_instance" "server" { } connection { type = "ssh" - host = var.airgap_setup ? self.private_ip : self.public_ip + host = var.airgap_setup || var.proxy_setup ? self.private_ip : self.public_ip user = var.aws_ssh_user private_key = var.corral_private_key timeout = "4m" - bastion_host = var.airgap_setup ? var.registry_ip : null - bastion_user = var.airgap_setup ? var.aws_ssh_user : null + bastion_host = var.airgap_setup || var.proxy_setup ? var.registry_ip : null + bastion_user = var.airgap_setup || var.proxy_setup ? var.aws_ssh_user : null } tags = { @@ -82,7 +82,7 @@ resource "aws_instance" "agent" { key_name = aws_key_pair.corral_key.key_name vpc_security_group_ids = [var.aws_security_group] subnet_id = var.aws_subnet - associate_public_ip_address = var.airgap_setup ? false : true + associate_public_ip_address = var.airgap_setup || var.proxy_setup ? false : true ebs_block_device { device_name = "/dev/sda1" @@ -93,7 +93,7 @@ resource "aws_instance" "agent" { } provisioner "remote-exec" { - inline = var.airgap_setup ? [ + inline = var.airgap_setup || var.proxy_setup ? [ "sudo su < /root/.ssh/authorized_keys", "echo \"${var.corral_private_key}\"", @@ -108,12 +108,12 @@ resource "aws_instance" "agent" { } connection { type = "ssh" - host = var.airgap_setup ? self.private_ip : self.public_ip + host = var.airgap_setup || var.proxy_setup ? self.private_ip : self.public_ip user = var.aws_ssh_user private_key = var.corral_private_key timeout = "4m" - bastion_host = var.airgap_setup ? var.registry_ip : null - bastion_user = var.airgap_setup ? var.aws_ssh_user : null + bastion_host = var.airgap_setup || var.proxy_setup ? var.registry_ip : null + bastion_user = var.airgap_setup || var.proxy_setup ? var.aws_ssh_user : null } tags = { @@ -150,91 +150,91 @@ resource "aws_lb_target_group_attachment" "aws_tg_attachment_6443_server" { } resource "aws_lb_target_group_attachment" "aws_internal_tg_attachment_80_server" { - count = var.airgap_setup ? var.server_count : 0 + count = var.airgap_setup || var.proxy_setup ? var.server_count : 0 target_group_arn = aws_lb_target_group.aws_internal_tg_80[0].arn target_id = aws_instance.server[count.index].id port = 80 } resource "aws_lb_target_group_attachment" "aws_internal_tg_attachment_443_server" { - count = var.airgap_setup ? var.server_count : 0 + count = var.airgap_setup || var.proxy_setup ? var.server_count : 0 target_group_arn = aws_lb_target_group.aws_internal_tg_443[0].arn target_id = aws_instance.server[count.index].id port = 443 } resource "aws_lb_target_group_attachment" "aws_internal_tg_attachment_6443_server" { - count = var.airgap_setup ? var.server_count : 0 + count = var.airgap_setup || var.proxy_setup ? var.server_count : 0 target_group_arn = aws_lb_target_group.aws_internal_tg_6443[0].arn target_id = aws_instance.server[count.index].id port = 6443 } resource "aws_lb_target_group_attachment" "aws_internal_tg_attachment_9345_server" { - count = var.airgap_setup ? var.server_count : 0 + count = var.airgap_setup || var.proxy_setup ? var.server_count : 0 target_group_arn = aws_lb_target_group.aws_internal_tg_9345[0].arn target_id = aws_instance.server[count.index].id port = 9345 } resource "aws_lb_target_group_attachment" "aws_tg_attachment_80" { - count = var.airgap_setup ? var.agent_count : 0 + count = var.airgap_setup || var.proxy_setup ? var.agent_count : 0 target_group_arn = aws_lb_target_group.aws_tg_80.arn target_id = aws_instance.agent[count.index].id port = 80 } resource "aws_lb_target_group_attachment" "aws_tg_attachment_443" { - count = var.airgap_setup ? var.agent_count : 0 + count = var.airgap_setup || var.proxy_setup ? var.agent_count : 0 target_group_arn = aws_lb_target_group.aws_tg_443.arn target_id = aws_instance.agent[count.index].id port = 443 } resource "aws_lb_target_group_attachment" "aws_tg_attachment_6443" { - count = var.airgap_setup ? var.agent_count : 0 + count = var.airgap_setup || var.proxy_setup ? var.agent_count : 0 target_group_arn = aws_lb_target_group.aws_tg_6443.arn target_id = aws_instance.agent[count.index].id port = 6443 } resource "aws_lb_target_group_attachment" "aws_tg_attachment_9345" { - count = var.airgap_setup ? var.agent_count : 0 + count = var.airgap_setup || var.proxy_setup ? var.agent_count : 0 target_group_arn = aws_lb_target_group.aws_tg_9345.arn target_id = aws_instance.agent[count.index].id port = 9345 } resource "aws_lb_target_group_attachment" "aws_internal_tg_attachment_80" { - count = var.airgap_setup ? var.agent_count : 0 + count = var.airgap_setup || var.proxy_setup ? var.agent_count : 0 target_group_arn = aws_lb_target_group.aws_internal_tg_80[0].arn target_id = aws_instance.agent[count.index].id port = 80 } resource "aws_lb_target_group_attachment" "aws_internal_tg_attachment_443" { - count = var.airgap_setup ? var.agent_count : 0 + count = var.airgap_setup || var.proxy_setup ? var.agent_count : 0 target_group_arn = aws_lb_target_group.aws_internal_tg_443[0].arn target_id = aws_instance.agent[count.index].id port = 443 } resource "aws_lb_target_group_attachment" "aws_internal_tg_attachment_6443" { - count = var.airgap_setup ? var.agent_count : 0 + count = var.airgap_setup || var.proxy_setup ? var.agent_count : 0 target_group_arn = aws_lb_target_group.aws_internal_tg_6443[0].arn target_id = aws_instance.agent[count.index].id port = 6443 } resource "aws_lb_target_group_attachment" "aws_internal_tg_attachment_9345" { - count = var.airgap_setup ? var.agent_count : 0 + count = var.airgap_setup || var.proxy_setup ? var.agent_count : 0 target_group_arn = aws_lb_target_group.aws_internal_tg_9345[0].arn target_id = aws_instance.agent[count.index].id port = 9345 } resource "aws_lb" "aws_internal_nlb" { - count = var.airgap_setup ? 1 : 0 + count = var.airgap_setup || var.proxy_setup ? 1 : 0 internal = true load_balancer_type = "network" subnets = [var.aws_subnet] @@ -317,7 +317,7 @@ resource "aws_lb_target_group" "aws_tg_9345" { } resource "aws_lb_target_group" "aws_internal_tg_80" { - count = var.airgap_setup ? 1 : 0 + count = var.airgap_setup || var.proxy_setup ? 1 : 0 port = 80 protocol = "TCP" vpc_id = var.aws_vpc @@ -335,7 +335,7 @@ resource "aws_lb_target_group" "aws_internal_tg_80" { } resource "aws_lb_target_group" "aws_internal_tg_443" { - count = var.airgap_setup ? 1 : 0 + count = var.airgap_setup || var.proxy_setup ? 1 : 0 port = 443 protocol = "TCP" vpc_id = var.aws_vpc @@ -353,7 +353,7 @@ resource "aws_lb_target_group" "aws_internal_tg_443" { } resource "aws_lb_target_group" "aws_internal_tg_6443" { - count = var.airgap_setup ? 1 : 0 + count = var.airgap_setup || var.proxy_setup ? 1 : 0 port = 6443 protocol = "TCP" vpc_id = var.aws_vpc @@ -371,7 +371,7 @@ resource "aws_lb_target_group" "aws_internal_tg_6443" { } resource "aws_lb_target_group" "aws_internal_tg_9345" { - count = var.airgap_setup ? 1 : 0 + count = var.airgap_setup || var.proxy_setup ? 1 : 0 port = 9345 protocol = "TCP" vpc_id = var.aws_vpc @@ -429,7 +429,7 @@ resource "aws_lb_listener" "aws_nlb_listener_9345" { } resource "aws_lb_listener" "aws_internal_nlb_listener_80" { - count = var.airgap_setup ? 1 : 0 + count = var.airgap_setup || var.proxy_setup ? 1 : 0 load_balancer_arn = aws_lb.aws_internal_nlb[0].arn port = "80" protocol = "TCP" @@ -440,7 +440,7 @@ resource "aws_lb_listener" "aws_internal_nlb_listener_80" { } resource "aws_lb_listener" "aws_internal_nlb_listener_443" { - count = var.airgap_setup ? 1 : 0 + count = var.airgap_setup || var.proxy_setup ? 1 : 0 load_balancer_arn = aws_lb.aws_internal_nlb[0].arn port = "443" protocol = "TCP" @@ -451,7 +451,7 @@ resource "aws_lb_listener" "aws_internal_nlb_listener_443" { } resource "aws_lb_listener" "aws_internal_nlb_listener_6443" { - count = var.airgap_setup ? 1 : 0 + count = var.airgap_setup || var.proxy_setup ? 1 : 0 load_balancer_arn = aws_lb.aws_internal_nlb[0].arn port = "6443" protocol = "TCP" @@ -462,7 +462,7 @@ resource "aws_lb_listener" "aws_internal_nlb_listener_6443" { } resource "aws_lb_listener" "aws_internal_nlb_listener_9345" { - count = var.airgap_setup ? 1 : 0 + count = var.airgap_setup || var.proxy_setup ? 1 : 0 load_balancer_arn = aws_lb.aws_internal_nlb[0].arn port = "9345" protocol = "TCP" @@ -481,7 +481,7 @@ resource "aws_route53_record" "aws_route53" { } resource "aws_route53_record" "aws_route53_internal" { - count = var.airgap_setup ? 1 : 0 + count = var.airgap_setup || var.proxy_setup ? 1 : 0 zone_id = data.aws_route53_zone.selected.zone_id name = "${var.aws_hostname_prefix}-internal" type = "CNAME" diff --git a/templates/aws/cluster_nodes/terraform/pools/outputs.tf b/templates/aws/cluster_nodes/terraform/pools/outputs.tf index 1651979..0c5cc88 100644 --- a/templates/aws/cluster_nodes/terraform/pools/outputs.tf +++ b/templates/aws/cluster_nodes/terraform/pools/outputs.tf @@ -7,38 +7,42 @@ output "internal_fqdn" { } output "kube_api_host" { - value = var.airgap_setup ? aws_instance.server[0].private_ip : aws_instance.server[0].public_ip + value = var.airgap_setup || var.proxy_setup ? aws_instance.server[0].private_ip : aws_instance.server[0].public_ip } output "airgap_setup" { value = var.airgap_setup } +output "proxy_setup" { + value = var.proxy_setup +} + output "corral_node_pools" { value = { bastion = [for instance in [aws_instance.server[0]] : { name = instance.tags.Name // unique name of node user = "root" // ssh username ssh_user = var.aws_ssh_user - address = var.airgap_setup ? instance.private_ip : instance.public_ip // address of ssh host + address = var.airgap_setup || var.proxy_setup ? instance.private_ip : instance.public_ip // address of ssh host internal_address = instance.private_ip - bastion_address = var.airgap_setup ? var.registry_ip : "" + bastion_address = var.airgap_setup || var.proxy_setup ? var.registry_ip : "" }] server = [for instance in slice(aws_instance.server, 1, var.server_count) : { name = instance.tags.Name // unique name of node user = "root" // ssh username ssh_user = var.aws_ssh_user - address = var.airgap_setup ? instance.private_ip : instance.public_ip // address of ssh host + address = var.airgap_setup || var.proxy_setup ? instance.private_ip : instance.public_ip // address of ssh host internal_address = instance.private_ip - bastion_address = var.airgap_setup ? var.registry_ip : "" + bastion_address = var.airgap_setup || var.proxy_setup ? var.registry_ip : "" }] agent = [for instance in aws_instance.agent : { name = instance.tags.Name // unique name of node user = "root" // ssh username ssh_user = var.aws_ssh_user - address = var.airgap_setup ? instance.private_ip : instance.public_ip // address of ssh host + address = var.airgap_setup || var.proxy_setup ? instance.private_ip : instance.public_ip // address of ssh host internal_address= instance.private_ip - bastion_address = var.airgap_setup ? var.registry_ip : "" + bastion_address = var.airgap_setup || var.proxy_setup ? var.registry_ip : "" }] } } \ No newline at end of file diff --git a/templates/aws/nodes/manifest.yaml b/templates/aws/nodes/manifest.yaml index 7ea6713..3f01be8 100644 --- a/templates/aws/nodes/manifest.yaml +++ b/templates/aws/nodes/manifest.yaml @@ -64,10 +64,18 @@ variables: type: string optional: true description: "Public IP address of the bastion node" + bastion_private_ip: + type: string + optional: true + description: "Private IP address of the bastion node" airgap_setup: type: boolean default: false description: "Boolean that when set, will create airgap nodes rather regular cluster nodes." + proxy_setup: + type: boolean + default: false + description: "Boolean that when set, will create proxied nodes rather regular cluster nodes." commands: - module: pools - command: "echo \"$CORRAL_corral_user_public_key\" >> /$(whoami)/.ssh/authorized_keys" diff --git a/templates/aws/nodes/terraform/pools/corral.tf b/templates/aws/nodes/terraform/pools/corral.tf index 0462730..4f21e40 100644 --- a/templates/aws/nodes/terraform/pools/corral.tf +++ b/templates/aws/nodes/terraform/pools/corral.tf @@ -20,3 +20,5 @@ variable "instance_type" {} variable "node_count" {} variable "airgap_setup" {} variable "bastion_ip" {} +variable "bastion_private_ip" {} +variable "proxy_setup" {} \ No newline at end of file diff --git a/templates/aws/nodes/terraform/pools/main.tf b/templates/aws/nodes/terraform/pools/main.tf index 25c6ac1..6f1d27a 100644 --- a/templates/aws/nodes/terraform/pools/main.tf +++ b/templates/aws/nodes/terraform/pools/main.tf @@ -31,7 +31,7 @@ resource "aws_instance" "node" { key_name = aws_key_pair.corral_key.key_name vpc_security_group_ids = [var.aws_security_group] subnet_id = var.aws_subnet - associate_public_ip_address = var.airgap_setup ? false : true + associate_public_ip_address = var.airgap_setup || var.proxy_setup ? false : true ebs_block_device { device_name = "/dev/sda1" @@ -42,8 +42,14 @@ resource "aws_instance" "node" { delete_on_termination = true } + user_data = <> /home/${var.aws_ssh_user}/.ssh/authorized_keys +EOF + provisioner "remote-exec" { - inline = var.airgap_setup ? [ + inline = var.airgap_setup || var.proxy_setup ? [ "sudo su < /root/.ssh/authorized_keys", "echo \"${var.corral_private_key}\" > /root/.ssh/id_rsa", @@ -57,12 +63,12 @@ resource "aws_instance" "node" { } connection { type = "ssh" - host = var.airgap_setup ? self.private_ip : self.public_ip + host = var.airgap_setup || var.proxy_setup ? self.private_ip : self.public_ip user = var.aws_ssh_user private_key = var.corral_private_key timeout = "4m" - bastion_host = var.airgap_setup ? var.bastion_ip : null - bastion_user = var.airgap_setup ? "root" : null + bastion_host = var.airgap_setup || var.proxy_setup ? var.bastion_ip : null + bastion_user = var.airgap_setup || var.proxy_setup ? "root" : null } tags = { diff --git a/templates/aws/nodes/terraform/pools/outputs.tf b/templates/aws/nodes/terraform/pools/outputs.tf index d16fd30..6611aaf 100644 --- a/templates/aws/nodes/terraform/pools/outputs.tf +++ b/templates/aws/nodes/terraform/pools/outputs.tf @@ -6,9 +6,10 @@ output "corral_node_pools" { value = { node = [for instance in aws_instance.node : { name = instance.tags.Name // unique name of node - user = "root" // ssh username - address = var.airgap_setup ? instance.private_ip : instance.public_ip // address of ssh host - bastion_address = var.airgap_setup ? var.bastion_ip : "" + user = var.proxy_setup ? "root" : var.aws_ssh_user // ssh username + address = var.airgap_setup || var.proxy_setup ? instance.private_ip : instance.public_ip // address of ssh host + bastion_address = var.airgap_setup || var.proxy_setup ? var.bastion_ip : "" + bastion_internal_address = var.airgap_setup || var.proxy_setup ? var.bastion_private_ip : "" }] } } \ No newline at end of file diff --git a/templates/aws/registry_nodes/manifest.yaml b/templates/aws/registry_nodes/manifest.yaml index b17742d..e0561b7 100644 --- a/templates/aws/registry_nodes/manifest.yaml +++ b/templates/aws/registry_nodes/manifest.yaml @@ -40,6 +40,10 @@ variables: type: string optional: false description: "The subnet where the ec2 instance and the load balancer is created" + proxy_setup: + type: boolean + description: "Boolean that when set to true, will utilize registry_nodes as the bastion node for a proxied setup, ranther than a registry." + default: false airgap_setup: type: boolean description: "Boolean that when set to true, will set the registry_fqdn to the private IP address, rather than the public IP address." diff --git a/templates/aws/registry_nodes/terraform/pools/corral.tf b/templates/aws/registry_nodes/terraform/pools/corral.tf index ed6bdbe..11f1a75 100644 --- a/templates/aws/registry_nodes/terraform/pools/corral.tf +++ b/templates/aws/registry_nodes/terraform/pools/corral.tf @@ -18,3 +18,4 @@ variable "aws_volume_size" {} variable "install_docker" {} variable "instance_type" {} variable "airgap_setup" {} +variable "proxy_setup" {} \ No newline at end of file diff --git a/templates/aws/registry_nodes/terraform/pools/main.tf b/templates/aws/registry_nodes/terraform/pools/main.tf index 9a28a45..11dbd31 100644 --- a/templates/aws/registry_nodes/terraform/pools/main.tf +++ b/templates/aws/registry_nodes/terraform/pools/main.tf @@ -51,7 +51,7 @@ resource "aws_instance" "registry" { } provisioner "remote-exec" { - inline = var.airgap_setup ? [ + inline = var.airgap_setup || var.proxy_setup ? [ "sudo su < /root/.ssh/authorized_keys", "echo \"${var.corral_private_key}\"", @@ -73,7 +73,7 @@ resource "aws_instance" "registry" { } tags = { - Name = "${var.corral_user_id}-${random_id.cluster_id.hex}-registry" + Name = "${var.corral_user_id}-${random_id.cluster_id.hex}-${var.proxy_setup ? "proxy-bastion" : "registry"}" } } diff --git a/templates/aws/registry_nodes/terraform/pools/outputs.tf b/templates/aws/registry_nodes/terraform/pools/outputs.tf index c58b762..3bfb383 100644 --- a/templates/aws/registry_nodes/terraform/pools/outputs.tf +++ b/templates/aws/registry_nodes/terraform/pools/outputs.tf @@ -1,5 +1,5 @@ output "registry_fqdn" { - value = aws_route53_record.aws_route53.fqdn + value = var.proxy_setup ? null : aws_route53_record.aws_route53.fqdn } output "registry_ip" { @@ -10,6 +10,14 @@ output "registry_private_ip" { value = aws_instance.registry.private_ip } +output "bastion_ip" { + value = aws_instance.registry.public_ip +} + +output "bastion_private_ip" { + value = aws_instance.registry.private_ip +} + output "corral_node_pools" { value = { registry = [for node in [aws_instance.registry] : { diff --git a/templates/proxy-standalone/manifest.yaml b/templates/proxy-standalone/manifest.yaml new file mode 100644 index 0000000..a9c0c65 --- /dev/null +++ b/templates/proxy-standalone/manifest.yaml @@ -0,0 +1,26 @@ +name: proxy +description: | + A docker squid proxy +variables: + bastion_ip: + type: string + readOnly: true + description: "Public IP address of the proxy bastion node" + bastion_private_ip: + type: string + readOnly: true + description: "Private IP address of the proxy bastion node" + rancher_version: + type: string + description: "The rancher version to download the images for" + cert_manager_version: + type: string + description: "The cert-manager version for HA rancher install" + proxy_setup: + type: boolean + description: "Boolean to set a registry_node as a proxy bastion node" + default: true +commands: + - command: /opt/corral/proxy/proxy-install.sh + node_pools: + - registry diff --git a/templates/proxy-standalone/overlay/opt/basic-proxy/squid/squid.conf b/templates/proxy-standalone/overlay/opt/basic-proxy/squid/squid.conf new file mode 100644 index 0000000..6b14300 --- /dev/null +++ b/templates/proxy-standalone/overlay/opt/basic-proxy/squid/squid.conf @@ -0,0 +1,79 @@ +# +# Recommended minimum configuration: +# + +# Example rule allowing access from your local networks. +# Adapt to list your (internal) IP networks from where browsing +# should be allowed +acl localnet src 10.0.0.0/8 # RFC1918 possible internal network +acl localnet src 172.0.0.0/8 # RFC1918 possible internal network +acl localnet src 192.168.0.0/16 # RFC1918 possible internal network +acl localnet src fc00::/7 # RFC 4193 local private network range +acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines + +acl Safe_ports port 22 # ssh +acl Safe_ports port 2376 # docker port +acl Safe_ports port 2379 # docker port +acl Safe_ports port 8443 # keycloak +acl SSL_ports port 22 +acl SSL_ports port 2376 +acl SSL_ports port 2379 + +acl SSL_ports port 443 +acl SSL_ports port 6443 +acl SSL_ports port 8443 # keycloak +acl Safe_ports port 80 # http +acl Safe_ports port 21 # ftp +acl Safe_ports port 443 # https +acl Safe_ports port 70 # gopher +acl Safe_ports port 210 # wais +acl Safe_ports port 1025-65535 # unregistered ports +acl Safe_ports port 280 # http-mgmt +acl Safe_ports port 488 # gss-http +acl Safe_ports port 591 # filemaker +acl Safe_ports port 777 # multiling http +acl Safe_ports port 6443 # k8s +acl CONNECT method CONNECT + +# Recommended minimum Access Permission configuration: +# Deny requests to certain unsafe ports +http_access deny !Safe_ports + +# Deny CONNECT to other than secure SSL ports +http_access deny CONNECT !SSL_ports + +# Only allow cachemgr access from localhost +http_access allow localhost manager +http_access deny manager + +# We strongly recommend the following be uncommented to protect innocent +# web applications running on the proxy server who think the only +# one who can access services on "localhost" is a local user +#http_access deny to_localhost + +# Example rule allowing access from your local networks. +# Adapt localnet in the ACL section to list your (internal) IP networks +# from where browsing should be allowed +http_access allow localnet +http_access allow localhost +http_access allow all + +# Squid normally listens to port 3128 +http_port 3219 + +# Uncomment and adjust the following to add a disk cache directory. +#cache_dir ufs /var/cache/squid 100 16 256 + +# Leave coredumps in the first cache dir +coredump_dir /var/cache/squid + +# Add any of your own refresh_pattern entries above these. +refresh_pattern ^ftp: 1440 20% 10080 +refresh_pattern ^gopher: 1440 0% 1440 +refresh_pattern -i (/cgi-bin/|\?) 0 0% 0 +refresh_pattern . 0 20% 4320 + + + + + diff --git a/templates/proxy-standalone/overlay/opt/corral/proxy/proxy-install.sh b/templates/proxy-standalone/overlay/opt/corral/proxy/proxy-install.sh new file mode 100755 index 0000000..772d458 --- /dev/null +++ b/templates/proxy-standalone/overlay/opt/corral/proxy/proxy-install.sh @@ -0,0 +1,18 @@ +#!/bin/bash +set -ex + +function corral_set() { + echo "corral_set $1=$2" +} + +function corral_log() { + echo "corral_log $1" +} + +echo "$CORRAL_corral_user_public_key" >> "$HOME"/.ssh/authorized_key + +docker run -d -v /opt/basic-proxy/squid/squid.conf:/etc/squid/squid.conf -p 3219:3219 ubuntu/squid + +CORRAL_squid_container=$(docker ps --format {{.ID}}) + +echo "corral_set squid_container=$CORRAL_squid_container" diff --git a/templates/rancher-custom-cluster/manifest.yaml b/templates/rancher-custom-cluster/manifest.yaml index 0181a17..a32db4e 100644 --- a/templates/rancher-custom-cluster/manifest.yaml +++ b/templates/rancher-custom-cluster/manifest.yaml @@ -6,6 +6,9 @@ variables: type: string description: "registration command to register a node as cluster to a existing rancher instance" commands: + - command: "/opt/corral/cluster/proxy-env.sh" + node_pools: + - node - command: "/opt/corral/cluster/register-cluster.sh" node_pools: - node \ No newline at end of file diff --git a/templates/rancher-custom-cluster/overlay/opt/corral/cluster/proxy-env.sh b/templates/rancher-custom-cluster/overlay/opt/corral/cluster/proxy-env.sh new file mode 100644 index 0000000..959a76f --- /dev/null +++ b/templates/rancher-custom-cluster/overlay/opt/corral/cluster/proxy-env.sh @@ -0,0 +1,121 @@ +#!/bin/bash +set -ex + +if [ "$CORRAL_proxy_setup" = true ]; then +#proxy settings for .bashrc +env=" +export HTTP_PROXY=http://${CORRAL_bastion_private_ip}:3219 +export HTTPS_PROXY=http://${CORRAL_bastion_private_ip}:3219 +export http_proxy=http://${CORRAL_bastion_private_ip}:3219 +export https_proxy=http://${CORRAL_bastion_private_ip}:3219 +export proxy_host=${CORRAL_bastion_private_ip}:3219 +export NO_PROXY=localhost,127.0.0.1,0.0.0.0,10.0.0.0/8,172.16.0.0/12,cattle-system.svc,192.168.0.0/16,169.254.169.254,172.66.47.109,172.66.47.147" +#NO_PROXY -> AWS metadata requires 169.254.169.254 -> cert-manager install requires 172.66.47.109 and 172.66.47.147 + + +#set .bashrc for ubuntu user +cat > /home/ubuntu/.bashrc <<- EOF +${env} +EOF + +#set .bashrc for root user +cat > /root/.bashrc <<- EOF +${env} +EOF + +#proxy settings for rke2 server +touch /etc/default/rke2-server +rke2env=" +HTTP_PROXY=http://${CORRAL_bastion_private_ip}:3219 +HTTPS_PROXY=http://${CORRAL_bastion_private_ip}:3219 +NO_PROXY=127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,169.254.169.254,.svc,.cluster.local,cattle-system.svc +CONTAINERD_HTTP_PROXY=http://${CORRAL_bastion_private_ip}:3219 +CONTAINERD_HTTPS_PROXY=http://${CORRAL_bastion_private_ip}:3219 +CONTAINERD_NO_PROXY=127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,169.254.169.254,.svc,.cluster.local,cattle-system.svc +http_proxy=http://${CORRAL_bastion_private_ip}:3219 +https_proxy=http://${CORRAL_bastion_private_ip}:3219" +cat > /etc/default/rke2-server <<- EOF +${rke2env} +EOF + +#proxy settings for rke2 agent +touch /etc/default/rke2-agent +cat > /etc/default/rke2-agent <<- EOF +${rke2env} +EOF + +#proxy settings for curl +touch /home/ubuntu/.curlrc +curlenv="proxy=${CORRAL_bastion_private_ip}:3219" +cat > /home/ubuntu/.curlrc <<- EOF +${curlenv} +EOF + +#proxy settings for curl as root user +touch /root/.curlrc +cat > /root/.curlrc <<- EOF +${curlenv} +EOF + +#proxy settings for wget +touch /home/ubuntu/.wgetrc +wgetenv=" +use_proxy=yes +http_proxy=${CORRAL_bastion_private_ip}:3219 +https_proxy=${CORRAL_bastion_private_ip}:3219" +cat > /home/ubuntu/.wgetrc <<- EOF +${wgetenv} +EOF + +#proxy settings for wget as root user +touch /root/.wgetrc +cat > /root/.wgetrc <<- EOF +${wgetenv} +EOF + +#proxy settings for apt-get +touch /etc/apt/apt.conf +aptenv="Acquire::http::Proxy \"http://${CORRAL_bastion_private_ip}:3219\";" +cat > /etc/apt/apt.conf <<- EOF +${aptenv} +EOF + +#proxy settings for docker hub +mkdir -p /home/ubuntu/.docker +touch /home/ubuntu/.docker/config.json +dockerenv="{ + \"proxies\": { + \"default\": { + \"httpProxy\": \"http://${CORRAL_bastion_private_ip}:3219\", + \"httpsProxy\": \"http://${CORRAL_bastion_private_ip}:3219\", + \"noProxy\": \"localhost,127.0.0.1,127.0.0.0/8,0.0.0.0,10.0.0.0/8,cattle-system.svc,192.168.10.0/24,.svc,.cluster.local\" + } + } +}" +cat > /home/ubuntu/.docker/config.json <<- EOF +${dockerenv} +EOF + +#proxy settings for docker hub as root user +mkdir -p ~/.docker +touch /root/.docker/config.json +cat > /root/.docker/config.json <<- EOF +${dockerenv} +EOF + +#proxy settings for docker daemon as root +sudo mkdir -p /etc/systemd/system/docker.service.d +sudo touch /etc/systemd/system/docker.service.d/http-proxy.conf +dockerproxy=" +[Service] +Environment="HTTPS_PROXY=http://${CORRAL_bastion_private_ip}:3219" +Environment="HTTP_PROXY=http://${CORRAL_bastion_private_ip}:3219"" +cat > /etc/systemd/system/docker.service.d/http-proxy.conf <<- EOF +${dockerproxy} +EOF + +sudo systemctl daemon-reload +sudo systemctl restart docker + +fi + diff --git a/templates/rancher-custom-cluster/overlay/opt/corral/cluster/register-cluster.sh b/templates/rancher-custom-cluster/overlay/opt/corral/cluster/register-cluster.sh index c79b1ec..ecda973 100755 --- a/templates/rancher-custom-cluster/overlay/opt/corral/cluster/register-cluster.sh +++ b/templates/rancher-custom-cluster/overlay/opt/corral/cluster/register-cluster.sh @@ -1,3 +1,13 @@ #!/bin/bash +set -ex -eval ${CORRAL_registration_command} \ No newline at end of file +if [ "$CORRAL_proxy_setup" = true ]; then + +reg_command=$(echo ${CORRAL_registration_command} | sed "s/"\\\""/"\""/g") +eval ${reg_command} + +else + +eval ${CORRAL_registration_command} + +fi diff --git a/templates/rancher-proxy/manifest.yaml b/templates/rancher-proxy/manifest.yaml new file mode 100644 index 0000000..874b2e8 --- /dev/null +++ b/templates/rancher-proxy/manifest.yaml @@ -0,0 +1,52 @@ +name: rancher-proxy +description: Install rancher on the airgapped kubernetes cluster behind a proxy. +variables: + rancher_version: + type: string + description: "Specify rancher version to install. Defaults to latest stable version." + bootstrap_password: + readOnly: true + type: string + description: "Initial password for the rancher `admin` user." + rancher_host: + readOnly: true + type: string + description: "Host of newly created rancher instance." + rancher_url: + readOnly: true + type: string + description: "Host of newly created rancher instance." + cert_manager_version: + type: string + description: "The cert-manager version for rancher install" + rancher_chart_repo: + optional: false + type: string + default: "latest" + description: "Name of Helm chart to use for Rancher install. Example: latest, alpha, stable, prime or staging" + rancher_image: + type: string + optional: true + description: "Specify rancher image for rancher image in a separate docker hub/registry" + rancher_image_tag: + type: string + optional: true + description: "Specify rancher image tag for the latest commit/version of rancher." + rancher_chart_url: + type: string + description: "the URL of the helm repo where rancher chart exists. i.e. https://releases.rancher.com/server-charts/latest" + optional: true +commands: + - command: "/opt/corral/rancher/preflight.sh" + node_pools: + - bastion + - server + - command: "/opt/corral/rancher/install-cert-manager.sh" + node_pools: + - bastion + - command: "/opt/corral/rancher/install-rancher.sh" + node_pools: + - bastion + - command: "/opt/corral/rancher/wait-for-password.sh" + node_pools: + - bastion \ No newline at end of file diff --git a/templates/rancher-proxy/overlay/opt/corral/rancher/install-cert-manager.sh b/templates/rancher-proxy/overlay/opt/corral/rancher/install-cert-manager.sh new file mode 100644 index 0000000..92f5c3e --- /dev/null +++ b/templates/rancher-proxy/overlay/opt/corral/rancher/install-cert-manager.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -ex + +helm repo add jetstack https://charts.jetstack.io +helm repo update +kubectl create namespace cert-manager +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v$CORRAL_cert_manager_version/cert-manager.crds.yaml + +helm upgrade --install cert-manager jetstack/cert-manager --namespace cert-manager --version v$CORRAL_cert_manager_version --set http_proxy=http://$CORRAL_registry_private_ip:3219 --set https_proxy=http://$CORRAL_registry_private_ip:3219 --set no_proxy=127.0.0.0/8\\,10.0.0.0/8\\,172.0.0.0/8\\,192.168.0.0/16\\,.svc\\,.cluster.local\\,cattle-system.svc\\,169.254.169.254 +# when attempting to install rancher right after the cert-manager install there is some intermitten issues +# allowing it to sleep for at least a 1m fixes the issue. +sleep 1m diff --git a/templates/rancher-proxy/overlay/opt/corral/rancher/install-rancher.sh b/templates/rancher-proxy/overlay/opt/corral/rancher/install-rancher.sh new file mode 100644 index 0000000..a747231 --- /dev/null +++ b/templates/rancher-proxy/overlay/opt/corral/rancher/install-rancher.sh @@ -0,0 +1,67 @@ +#!/bin/bash +set -ex +repos=("latest" "alpha" "stable" "staging" "prime") +if [[ ! ${repos[*]} =~ ${CORRAL_rancher_chart_repo} ]]; then + echo 'Error: `rancher_chart_repo` must be one of ["latest", "alpha", "stable", "staging", "prime"]' + exit 1 +fi + +CORRAL_rancher_host=${CORRAL_rancher_host:="${CORRAL_fqdn}"} +CORRAL_rancher_version=${CORRAL_rancher_version:=$(helm search repo rancher-latest/rancher -o json | jq -r .[0].version)} +minor_version=$(echo "$CORRAL_kubernetes_version" | cut -d. -f2) + +kubectl create namespace cattle-system + +community=("latest" "alpha" "stable") + +if [ "$minor_version" -gt 24 ]; then + + args=("rancher-$CORRAL_rancher_chart_repo/rancher" "--namespace cattle-system" "--set global.cattle.psp.enabled=false" "--set hostname=$CORRAL_rancher_host" "--version=$CORRAL_rancher_version" "--set proxy=http://$CORRAL_bastion_private_ip:3219") + + if [[ ${community[*]} =~ ${CORRAL_rancher_chart_repo} ]]; then + if [ ! -z "$CORRAL_rancher_chart_url" ]; then + helm repo add "rancher-$CORRAL_rancher_chart_repo" "$CORRAL_rancher_chart_url" + else + helm repo add "rancher-$CORRAL_rancher_chart_repo" "https://releases.rancher.com/server-charts/$CORRAL_rancher_chart_repo" + fi + args2=("") + fi + + if [[ "$CORRAL_rancher_chart_repo" == "prime" ]]; then + helm repo add "rancher-prime" "https://charts.rancher.com/server-charts/prime" + args2=("--set rancherImage=registry.suse.com/rancher/rancher") + fi + + if [[ "$CORRAL_rancher_chart_repo" == "staging" ]]; then + helm repo add "rancher-staging" "https://charts.optimus.rancher.io/server-charts/latest" + args2=("--set rancherImage=stgregistry.suse.com/rancher/rancher") + + if [ ! -z "$CORRAL_rancher_image_tag" ]; then + args2+=("--set rancherImageTag=$CORRAL_rancher_image_tag") + fi + + helm repo update + + if [ ! -z "$CORRAL_rancher_image" ]; then + helm upgrade --install rancher ${args[*]} --set noProxy=127.0.0.0/8\\,10.0.0.0/8\\,172.0.0.0/8\\,192.168.0.0/16\\,.svc\\,.cluster.local\\,cattle-system.svc\\,169.254.169.254 ${args2[*]} --set 'extraEnv[0].name=CATTLE_AGENT_IMAGE' --set 'extraEnv[0].value=stgregistry.suse.com/rancher/rancher-agent:'$CORRAL_rancher_image'' + else + helm upgrade --install rancher ${args[*]} --set noProxy=127.0.0.0/8\\,10.0.0.0/8\\,172.0.0.0/8\\,192.168.0.0/16\\,.svc\\,.cluster.local\\,cattle-system.svc\\,169.254.169.254 ${args2[*]} + fi + echo "corral_set rancher_version=$CORRAL_rancher_version" + echo "corral_set rancher_host=$CORRAL_rancher_host" + exit 0 + fi + + helm repo update + + if [ ! -z "$CORRAL_rancher_image_tag" ]; then + args2+=("--set rancherImageTag=$CORRAL_rancher_image_tag") + fi + + helm upgrade --install rancher ${args[*]} --set noProxy=127.0.0.0/8\\,10.0.0.0/8\\,172.0.0.0/8\\,192.168.0.0/16\\,.svc\\,.cluster.local\\,cattle-system.svc\\,169.254.169.254 ${args2[*]} +else + helm upgrade --install rancher rancher-$CORRAL_rancher_chart_repo/rancher --namespace cattle-system --set hostname=$CORRAL_rancher_host --version=$CORRAL_rancher_version --set proxy=http://$CORRAL_bastion_private_ip:3219 --set noProxy=127.0.0.0/8\\,10.0.0.0/8\\,172.0.0.0/8\\,192.168.0.0/16\\,.svc\\,.cluster.local\\,cattle-system.svc\\,169.254.169.254 +fi + +echo "corral_set rancher_version=$CORRAL_rancher_version" +echo "corral_set rancher_host=$CORRAL_rancher_host" diff --git a/templates/rancher-proxy/overlay/opt/corral/rancher/preflight.sh b/templates/rancher-proxy/overlay/opt/corral/rancher/preflight.sh new file mode 100644 index 0000000..252b063 --- /dev/null +++ b/templates/rancher-proxy/overlay/opt/corral/rancher/preflight.sh @@ -0,0 +1,18 @@ +#!/bin/bash +set -ex + +apt-get update || true + +apt install -y jq || true + +curl --proxy http://$CORRAL_bastion_private_ip:3219 https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash + +curl --proxy http://$CORRAL_bastion_private_ip:3219 -LO https://storage.googleapis.com/kubernetes-release/release/$(curl --proxy http://$CORRAL_bastion_private_ip:3219 -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl + +chmod +x ./kubectl +mv ./kubectl /usr/local/bin/kubectl + +mkdir ~/.kube + +echo $CORRAL_kubeconfig | base64 -d > ~/.kube/config +chmod 400 ~/.kube/config \ No newline at end of file diff --git a/templates/rancher-proxy/overlay/opt/corral/rancher/wait-for-password.sh b/templates/rancher-proxy/overlay/opt/corral/rancher/wait-for-password.sh new file mode 100644 index 0000000..96544c1 --- /dev/null +++ b/templates/rancher-proxy/overlay/opt/corral/rancher/wait-for-password.sh @@ -0,0 +1,47 @@ +#!/bin/bash +set -ex + +function corral_set() { + echo "corral_set $1=$2" +} + +function corral_log() { + echo "corral_log $1" +} + +if [[ $CORRAL_rancher_version == "2.5.*" ]]; then + echo "corral_set bootstrap_password=admin" + return 0 +fi + +if [ ${CORRAL_bootstrap_password} -ne "" ]; then + echo "bootstrap_password=${CORRAL_bootstrap_password}" + exit 0 +fi + +echo "waiting for bootstrap password" +until [ "$(kubectl -n cattle-system get secret/bootstrap-secret -o json --ignore-not-found=true | jq -r '.data.bootstrapPassword | length > 0')" == "true" ]; do + sleep 0.1 + echo -n "." +done +echo + +echo "corral_set bootstrap_password=$(kubectl -n cattle-system get secret/bootstrap-secret -o json | jq -r '.data.bootstrapPassword' | base64 -d)" +bootstrap_password=$(kubectl -n cattle-system get secret/bootstrap-secret -o json | jq -r '.data.bootstrapPassword' | base64 -d) + + +corral_log "Bastion public address: ${CORRAL_registry_ip}" + +corral_log "Bastion private address: ${CORRAL_bastion_private_ip}" + +corral_log "Save private key: echo \"${CORRAL_corral_private_key}\" | tr -d '\"' > id_rsa" + +corral_log "Save public key: echo \"${CORRAL_corral_public_key}\" | tr -d '\"' > id_rsa.pub" + +corral_log "Follow squid proxy logs: ssh -i id_rsa root@${CORRAL_registry_ip} \"sudo docker exec $CORRAL_squid_container tail -f /var/log/squid/access.log\" " + +corral_log "Connect to bastion node: ssh -i id_rsa root@${CORRAL_registry_ip}" + +corral_log "From bastion, connect to rancher server node with: ssh ubuntu@${CORRAL_kube_api_host}" + +corral_log "Rancher instance running at: https://$CORRAL_rancher_host/dashboard/?setup=$bootstrap_password" diff --git a/templates/rke2/manifest.yaml b/templates/rke2/manifest.yaml index 4122c9d..0a29e27 100644 --- a/templates/rke2/manifest.yaml +++ b/templates/rke2/manifest.yaml @@ -19,6 +19,11 @@ commands: - bastion - server - agent + - command: "/opt/corral/rke2/proxy-env.sh" + node_pools: + - bastion + - server + - agent - command: "/opt/corral/rke2/init-cluster.sh" node_pools: - bastion diff --git a/templates/rke2/overlay/opt/corral/rke2/init-cluster.sh b/templates/rke2/overlay/opt/corral/rke2/init-cluster.sh index 439c435..0ab6b22 100755 --- a/templates/rke2/overlay/opt/corral/rke2/init-cluster.sh +++ b/templates/rke2/overlay/opt/corral/rke2/init-cluster.sh @@ -19,6 +19,8 @@ fi if [ "$CORRAL_airgap_setup" = true ]; then config=$(echo "$config" | sed "/tls-san:/a \ - $CORRAL_internal_fqdn ") +elif [ "$CORRAL_proxy_setup" = true ]; then + echo "proxy setup" else CORRAL_rke2_install_command="curl -sfL https://get.rke2.io | INSTALL_RKE2_VERSION=${CORRAL_kubernetes_version}" echo "corral_set rke2_install_command=${CORRAL_rke2_install_command}" @@ -37,7 +39,7 @@ eval ${FULL_COMMAND} systemctl enable rke2-server.service systemctl start rke2-server.service -if [ "$CORRAL_airgap_setup" = true ]; then +if [ "$CORRAL_airgap_setup" = true ] || [ "$CORRAL_proxy_setup" = true ]; then CORRAL_kubeconfig_host="${CORRAL_kube_api_host}" else CORRAL_kubeconfig_host="${CORRAL_api_host}" diff --git a/templates/rke2/overlay/opt/corral/rke2/proxy-env.sh b/templates/rke2/overlay/opt/corral/rke2/proxy-env.sh new file mode 100644 index 0000000..b5b7bc8 --- /dev/null +++ b/templates/rke2/overlay/opt/corral/rke2/proxy-env.sh @@ -0,0 +1,84 @@ +#!/bin/bash +set -ex + +if [ "$CORRAL_proxy_setup" = true ]; then +#proxy settings for .bashrc +env=" +export HTTP_PROXY=http://${CORRAL_bastion_private_ip}:3219 +export HTTPS_PROXY=http://${CORRAL_bastion_private_ip}:3219 +export http_proxy=http://${CORRAL_bastion_private_ip}:3219 +export https_proxy=http://${CORRAL_bastion_private_ip}:3219 +export proxy_host=${CORRAL_bastion_private_ip}:3219 +export NO_PROXY=localhost,127.0.0.1,0.0.0.0,10.0.0.0/8,172.16.0.0/12,cattle-system.svc,192.168.0.0/16,169.254.169.254,172.66.47.109,172.66.47.147" +#NO_PROXY -> AWS metadata requires 169.254.169.254 -> cert-manager install requires 172.66.47.109 and 172.66.47.147 + + +#set .bashrc for ubuntu user +cat > /home/ubuntu/.bashrc <<- EOF +${env} +EOF + +#set .bashrc for root user +cat > /root/.bashrc <<- EOF +${env} +EOF + +#proxy settings for rke2 server +touch /etc/default/rke2-server +rke2env=" +HTTP_PROXY=http://${CORRAL_bastion_private_ip}:3219 +HTTPS_PROXY=http://${CORRAL_bastion_private_ip}:3219 +NO_PROXY=127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,169.254.169.254,.svc,.cluster.local,cattle-system.svc +CONTAINERD_HTTP_PROXY=http://${CORRAL_bastion_private_ip}:3219 +CONTAINERD_HTTPS_PROXY=http://${CORRAL_bastion_private_ip}:3219 +CONTAINERD_NO_PROXY=127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,169.254.169.254,.svc,.cluster.local,cattle-system.svc +http_proxy=http://${CORRAL_bastion_private_ip}:3219 +https_proxy=http://${CORRAL_bastion_private_ip}:3219" +cat > /etc/default/rke2-server <<- EOF +${rke2env} +EOF + +#proxy settings for rke2 agent +touch /etc/default/rke2-agent +cat > /etc/default/rke2-agent <<- EOF +${rke2env} +EOF + +#proxy settings for curl +touch /home/ubuntu/.curlrc +curlenv="proxy=${CORRAL_bastion_private_ip}:3219" +cat > /home/ubuntu/.curlrc <<- EOF +${curlenv} +EOF + +#proxy settings for curl as root user +touch /root/.curlrc +cat > /root/.curlrc <<- EOF +${curlenv} +EOF + +#proxy settings for wget +touch /home/ubuntu/.wgetrc +wgetenv=" +use_proxy=yes +http_proxy=${CORRAL_bastion_private_ip}:3219 +https_proxy=${CORRAL_bastion_private_ip}:3219" +cat > /home/ubuntu/.wgetrc <<- EOF +${wgetenv} +EOF + +#proxy settings for wget as root user +touch /root/.wgetrc +cat > /root/.wgetrc <<- EOF +${wgetenv} +EOF + +#proxy settings for apt-get +touch /etc/apt/apt.conf +aptenv="Acquire::http::Proxy \"http://${CORRAL_bastion_private_ip}:3219\";" +cat > /etc/apt/apt.conf <<- EOF +${aptenv} +EOF + +fi + diff --git a/templates/rke2/overlay/opt/corral/rke2/scp-rke2-file.sh b/templates/rke2/overlay/opt/corral/rke2/scp-rke2-file.sh index 9cc8c93..8c4cf6a 100755 --- a/templates/rke2/overlay/opt/corral/rke2/scp-rke2-file.sh +++ b/templates/rke2/overlay/opt/corral/rke2/scp-rke2-file.sh @@ -1,5 +1,5 @@ #!/bin/bash -if [ "$CORRAL_airgap_setup" = true ]; then - scp -r -o StrictHostKeyChecking=no root@"${CORRAL_registry_private_ip}":/root/rke2-artifacts /root/rke2-artifacts +if [ "$CORRAL_airgap_setup" = true ] || [ "$CORRAL_proxy_setup" = true ]; then + scp -r -o StrictHostKeyChecking=no root@"${CORRAL_bastion_private_ip}":/root/rke2-artifacts /root/rke2-artifacts fi \ No newline at end of file