diff --git a/k8s/cloud/base/kustomization.yaml b/k8s/cloud/base/kustomization.yaml index c3472a97cac..9c918877946 100644 --- a/k8s/cloud/base/kustomization.yaml +++ b/k8s/cloud/base/kustomization.yaml @@ -44,3 +44,4 @@ resources: - cron_script_deployment.yaml - cron_script_service.yaml - support_access_config.yaml +- proxy_nginx_config.yaml diff --git a/k8s/cloud/base/proxy_deployment.yaml b/k8s/cloud/base/proxy_deployment.yaml index 41238e6c6d3..06f07cdfd00 100644 --- a/k8s/cloud/base/proxy_deployment.yaml +++ b/k8s/cloud/base/proxy_deployment.yaml @@ -78,6 +78,36 @@ spec: volumeMounts: - name: certs mountPath: /certs + - name: nginx-config + mountPath: /usr/local/openresty/nginx/conf/nginx.conf.tmpl + subPath: nginx.conf.tmpl + - name: nginx-config + mountPath: /usr/local/openresty/nginx/conf/pixie_vars.conf + subPath: pixie_vars.conf + - name: nginx-config + mountPath: /usr/local/openresty/nginx/conf/pixie_api.conf + subPath: pixie_api.conf + - name: nginx-config + mountPath: /usr/local/openresty/nginx/conf/pixie_cache.conf + subPath: pixie_cache.conf + - name: nginx-config + mountPath: /usr/local/openresty/nginx/conf/pixie_compression.conf + subPath: pixie_compression.conf + - name: nginx-config + mountPath: /usr/local/openresty/nginx/conf/pixie_filter.conf + subPath: pixie_filter.conf + - name: nginx-config + mountPath: /usr/local/openresty/nginx/conf/pixie_health_check.conf + subPath: pixie_health_check.conf + - name: nginx-config + mountPath: /usr/local/openresty/nginx/conf/pixie_server_defaults.conf + subPath: pixie_server_defaults.conf + - name: nginx-config + mountPath: /usr/local/openresty/nginx/conf/headers_common.conf + subPath: headers_common.conf + - name: nginx-config + mountPath: /usr/local/openresty/nginx/conf/nginx_vars.conf + subPath: nginx_vars.conf securityContext: allowPrivilegeEscalation: false capabilities: @@ -146,3 +176,6 @@ spec: - name: certs secret: secretName: cloud-proxy-tls-certs + - name: nginx-config + configMap: + name: pl-proxy-nginx-config diff --git a/k8s/cloud/base/proxy_nginx_config.yaml b/k8s/cloud/base/proxy_nginx_config.yaml new file mode 100644 index 00000000000..57ee90507f8 --- /dev/null +++ b/k8s/cloud/base/proxy_nginx_config.yaml @@ -0,0 +1,421 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: pl-proxy-nginx-config +data: + headers_common.conf: |- + # HSTS config. + add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload"; + + pixie_server_defaults.conf: |- + # On redirects don't rewrite the host/port. This can cause problems because the nginx service + # is behind a load balancer/proxy. + absolute_redirect off; + + pixie_health_check.conf: |- + location /healthz { + access_log off; + return 200 "healthy\n"; + } + + pixie_filter.conf: |- + # These re-write variables used in our assets. + sub_filter_types application/javascript; + sub_filter '__CONFIG_OAUTH_PROVIDER__' "'${oauth_provider}'"; + sub_filter '__CONFIG_AUTH_URI__' "'${auth_uri}'"; + sub_filter '__CONFIG_AUTH_CLIENT_ID__' "'${auth_client_id}'"; + sub_filter '__CONFIG_AUTH_EMAIL_PASSWORD_CONN__' "'${auth_email_password_conn}'"; + sub_filter '__CONFIG_OIDC_HOST__' "'${oidc_host}'"; + sub_filter '__CONFIG_OIDC_METADATA_URL__' "'${oidc_metadata_url}'"; + sub_filter '__CONFIG_OIDC_CLIENT_ID__' "'${oidc_client_id}'"; + sub_filter '__CONFIG_OIDC_ADDITIONAL_SCOPES__' "'${oidc_additional_scopes}'"; + sub_filter '__CONFIG_OIDC_SOCIAL_CONFIG_LOGIN__' "`${oidc_social_config_login}`"; + sub_filter '__CONFIG_OIDC_SOCIAL_CONFIG_SIGNUP__' "`${oidc_social_config_signup}`"; + sub_filter '__CONFIG_DOMAIN_NAME__' "'${domain_name}'"; + sub_filter '__CONFIG_SCRIPT_BUNDLE_URLS__' "'${script_bundle_urls}'"; + sub_filter '__CONFIG_SCRIPT_BUNDLE_DEV__' "'${script_bundle_dev}'"; + sub_filter '__SEGMENT_UI_WRITE_KEY__' "'${segment_ui_write_key}'"; + sub_filter '__SEGMENT_ANALYTICS_JS_DOMAIN__' "'segment.${domain_name}'"; + sub_filter '__CONFIG_LD_CLIENT_ID__' "'${ld_client_id}'"; + sub_filter '__ANALYTICS_ENABLED__' "${analytics_enabled}"; + sub_filter '__ANNOUNCEMENT_ENABLED__' "${announcement_enabled}"; + sub_filter '__ANNOUNCE_WIDGET_URL__' "'${announce_widget_url}'"; + sub_filter '__CONTACT_ENABLED__' "${contact_enabled}"; + sub_filter '__PASSTHROUGH_PROXY_PORT__' "'${passthrough_proxy_port}'"; + # Only need to filter once since these variables should not be repeated. + sub_filter_once on; + + pixie_compression.conf: |- + gzip on; + + # Enable compression for common types. + gzip_types + application/x-font-opentype + application/grpc-web-text + application/javascript + application/json + application/x-font-ttf + application/x-javascript + application/xml + image/svg+xml + font/woff2 + text/css + text/javascript + text/plain + text/xml; + + # Try to serve .gz files directly if they exist. + # TODO(zasgar): Re-enable once we fix env subs. + gzip_static off; + + # Increase the compression level, at the expense of additional CPU + # cpu cycles are cheap virtually everywhere now, bandwidth not nearly as much. + gzip_comp_level 9; + + gzip_proxied any; + + # How many buffers to hold. + gzip_buffers 16 64k; + + # Min size before we compress. + gzip_min_length 50; + + pixie_cache.conf: |- + etag on; + expires 60m; + add_header Cache-Control "public"; + include /usr/local/openresty/nginx/conf/headers_common.conf; + + pixie_api.conf: |- + location /api/ { + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_pass https://httpapisvc; + } + + + location ~ ^/pl.* { + rewrite ^/pl\.(.*)$ /px.$1 last; + } + + location ~ ^/px.cloudapi.* { + # The nginx parser does not seem to understand that the $api_service + # variable contains the port. Just referring to it as api-service since this + # works in a service agnosistic way. + grpc_pass grpcs://grpcapisvc; + } + + location ~ ^/px.api.* { + # The nginx parser does not seem to understand that the $api_service + # variable contains the port. Just referring to it as api-service since this + # works in a service agnosistic way. + grpc_pass grpcs://grpcapisvc; + } + pixie_vars.conf: |- + # Get the environment information so that we can inject the correct config + # into the javascript assets. + # The environment variable also need an ENV directive above. + set_by_lua_block $oauth_provider { return os.getenv("PL_OAUTH_PROVIDER") } + set_by_lua_block $auth_uri { return os.getenv("PL_AUTH_URI") } + set_by_lua_block $auth_client_id { return os.getenv("PL_AUTH_CLIENT_ID") } + set_by_lua_block $auth_email_password_conn { return os.getenv("PL_AUTH_EMAIL_PASSWORD_CONN") } + set_by_lua_block $oidc_host { return os.getenv("PL_OIDC_HOST") } + set_by_lua_block $oidc_metadata_url { return os.getenv("PL_OIDC_METADATA_URL") } + set_by_lua_block $oidc_client_id { return os.getenv("PL_OIDC_CLIENT_ID") } + set_by_lua_block $oidc_additional_scopes { return os.getenv("PL_OIDC_ADDITIONAL_SCOPES") } + set_by_lua_block $oidc_social_config_login { return os.getenv("PL_OIDC_SOCIAL_CONFIG_LOGIN") } + set_by_lua_block $oidc_social_config_signup { return os.getenv("PL_OIDC_SOCIAL_CONFIG_SIGNUP") } + set_by_lua_block $domain_name { return os.getenv("PL_DOMAIN_NAME") } + set_by_lua_block $api_service { return os.getenv("PL_API_SERVICE_HTTP") } + set_by_lua_block $segment_ui_write_key { return os.getenv("PL_SEGMENT_UI_WRITE_KEY") } + set_by_lua_block $segment_cli_write_key { return os.getenv("PL_SEGMENT_CLI_WRITE_KEY") } + set_by_lua_block $script_bundle_urls { return os.getenv("SCRIPT_BUNDLE_URLS") } + set_by_lua_block $script_bundle_dev { return os.getenv("SCRIPT_BUNDLE_DEV") } + set_by_lua_block $analytics_enabled { return os.getenv("ANALYTICS_ENABLED") } + set_by_lua_block $announcement_enabled { return os.getenv("ANNOUNCEMENT_ENABLED") } + set_by_lua_block $announce_widget_url { return os.getenv("ANNOUNCE_WIDGET_URL") } + set_by_lua_block $ld_client_id { return os.getenv("PL_LD_CLIENT_ID") } + set_by_lua_block $contact_enabled { return os.getenv("CONTACT_ENABLED") } + set_by_lua_block $passthrough_proxy_port { return os.getenv("PASSTHROUGH_PROXY_PORT") } + set_by_lua_block $hydra_service { return os.getenv("PL_HYDRA_SERVICE") } + set_by_lua_block $kratos_service { return os.getenv("PL_KRATOS_SERVICE") } + + # This file will have @PL_DOMAIN_NAME@ replaced with the domain name specified in the + # pl-domain-config Configmap. See the entrypoint.sh script for how this is done. + nginx.conf.tmpl: |- + # File borrowed from openresty config. + + # Environment variable need to be specified here (and below). + env PL_OAUTH_PROVIDER; + env PL_AUTH_URI; + env PL_AUTH_CLIENT_ID; + env PL_AUTH_EMAIL_PASSWORD_CONN; + env PL_OIDC_HOST; + env PL_OIDC_METADATA_URL; + env PL_OIDC_CLIENT_ID; + env PL_OIDC_ADDITIONAL_SCOPES; + env PL_OIDC_SOCIAL_CONFIG_LOGIN; + env PL_OIDC_SOCIAL_CONFIG_SIGNUP; + env PL_LD_CLIENT_ID; + env PL_DOMAIN_NAME; + env PL_API_SERVICE_HTTP; + env PL_SEGMENT_UI_WRITE_KEY; + env PL_SEGMENT_CLI_WRITE_KEY; + env PL_HYDRA_SERVICE; + env PL_KRATOS_SERVICE; + env SCRIPT_BUNDLE_URLS; + env SCRIPT_BUNDE_DEV; + env ANALYTICS_ENABLED; + env ANNOUNCEMENT_ENABLED; + env ANNOUNCE_WIDGET_URL; + env CONTACT_ENABLED; + env PASSTHROUGH_PROXY_PORT; + + worker_processes auto; + + events { + worker_connections 1024; + } + + http { + include mime.types; + default_type application/octet-stream; + sendfile on; + server_tokens off; + + log_format upstreamlog '[$time_local] $remote_addr - $remote_user - $server_name to: $upstream_addr: $request upstream_response_time $upstream_response_time msec $msec request_time $request_time'; + + resolver kube-dns.kube-system.svc.cluster.local valid=5s; + + # Tune nginx keepalives to work with the GCP HTTP(S) Load Balancer: + keepalive_timeout 650; + keepalive_requests 10000; + + # Increase default server names length to 128: + server_names_hash_bucket_size 128; + + proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=my_cache:10m max_size=100m inactive=1d; + proxy_temp_path /var/cache/nginx/tmp; + + ssl_buffer_size 4k; + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 1h; + ssl_protocols TLSv1.2 TLSv1.3; + + upstream httpapisvc { + # The nginx parser does not seem to understand that the $api_service + # variable contains the port. Just referring to it as api-service since this + # works in a service agnosistic way. + server api-service:51200; + keepalive 128; + } + + upstream grpcapisvc { + # The nginx parser does not seem to understand that the $api_service + # variable contains the port. Just referring to it as api-service since this + # works in a service agnosistic way. + server api-service:51200; + keepalive 128; + } + + # This is the default fallback server if none of the subdomains match. + server { + listen [::]:56000 default_server; + listen 56000; + server_name _; + + ssl_certificate /certs/tls.crt; + ssl_certificate_key /certs/tls.key; + + include /usr/local/openresty/nginx/conf/pixie_health_check.conf; + + if ($http_x_forwarded_proto = "http") { + return 404; + } + + location / { + return 404; + } + } + + # This is the server that we use for all the locations that don't have a subdomain. + # This just serves the minimal pages necessary to authenticate and get into a subdomain specfic site. + server { + listen [::]:56000 ssl http2; + listen 56000 ssl http2; + server_name @PL_DOMAIN_NAME@ *.cluster.local; + + include /usr/local/openresty/nginx/conf/pixie_compression.conf; + include /usr/local/openresty/nginx/conf/pixie_vars.conf; + include /usr/local/openresty/nginx/conf/pixie_server_defaults.conf; + include /usr/local/openresty/nginx/conf/pixie_api.conf; + + if ($http_x_forwarded_proto = "http") { + return 307 https://$host$request_uri; + } + + location /install.sh { + root /installer; + try_files "/install.sh" =404; + } + + location / { + return 307 https://work.$domain_name$request_uri; + } + } + + # This is the server for the work subdomain. + server { + listen [::]:56000 ssl http2; + listen 56000 ssl http2; + server_name work.@PL_DOMAIN_NAME@; + + error_page 404 = @error404; + include /usr/local/openresty/nginx/conf/pixie_compression.conf; + include /usr/local/openresty/nginx/conf/pixie_vars.conf; + include /usr/local/openresty/nginx/conf/pixie_server_defaults.conf; + include /usr/local/openresty/nginx/conf/pixie_health_check.conf; + include /usr/local/openresty/nginx/conf/pixie_api.conf; + include /usr/local/openresty/nginx/conf/headers_common.conf; + include /usr/local/openresty/nginx/conf/private/*.conf; + + # Disable caching by default. + add_header Cache-Control "no-store"; + + if ($http_x_forwarded_proto = "http") { + return 307 https://$host$request_uri; + } + + location /oauth/hydra { + if ($hydra_service = "") { + return 404; + } + # Using a variable in the proxy_pass argument and a trailing slash on the location + # doesn't forward the subpath to the proxy destination. Instead we need to manually + # manage this change by rewriting the path. + rewrite ^/oauth/hydra/(.*) /$1 break; + # Note - storing the protocol in the env variable causes nginx to reject the proxy_pass dest. + proxy_pass https://$hydra_service$uri$is_args$args; + } + + # Disable self-service registration in Kratos is the only way to prevent registration to orgs + # that a user does not have permission to join. Org Admins should instead invite their users. + # In the future Kratos will support disabling registration within kratos_config.yaml. + # We want to replace this rule when that becomes available. + # Issue tracking the feature: https://github.com/ory/kratos/issues/882 + location /oauth/kratos/self-service/registration { + error_page 404 = @error404; + return 404; + } + + location /oauth/kratos { + error_page 404 = @error404; + if ($kratos_service = "") { + return 404; + } + # Using a variable in the proxy_pass argument and a trailing slash on the location + # doesn't forward the subpath to the proxy destination. Instead we need to manually + # manage this change by rewriting the path. + rewrite ^/oauth/kratos/(.*) /$1 break; + # Note - storing the protocol in the env variable causes nginx to reject the proxy_pass dest. + proxy_pass https://$kratos_service$uri$is_args$args; + } + + location /install.sh { + root /installer; + try_files "/install.sh" =404; + } + + location / { + gzip_static off; + root /assets; + try_files $uri "/index.html"; + } + + # Cache all contents hosted under /static. This is actually the same as the contents hosted under '/' + # but nginx knows to cache these and webpack puts all the hashed assets in here. + location ~ ^/static(/.*)$ { + gzip_static off; + root /assets; + include /usr/local/openresty/nginx/conf/pixie_cache.conf; + include /usr/local/openresty/nginx/conf/pixie_filter.conf; + try_files $1 "/index.html"; + } + + location /auth-complete { + root /assets; + include /usr/local/openresty/nginx/conf/pixie_filter.conf; + try_files $uri "/index.html"; + } + + location ~ ^/docs(?:/(.*))?$ { + return 307 https://docs.pixielabs.ai/$1; + } + + location @error404 { + return 307 https://$domain_name?not_found=$uri; + } + } + + # This is the proxy server for segment. + server { + listen [::]:56000 ssl http2; + listen 56000 ssl http2; + server_name segment.@PL_DOMAIN_NAME@; + + include /usr/local/openresty/nginx/conf/pixie_compression.conf; + include /usr/local/openresty/nginx/conf/pixie_vars.conf; + + set $segment_cdn "cdn.segment.com"; + + location /v1/projects { + proxy_pass https://$segment_cdn$uri$is_args$args; + proxy_cache my_cache; + proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; + } + + location /analytics.js/v1 { + proxy_pass https://$segment_cdn$uri$is_args$args; + proxy_cache my_cache; + proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; + } + + location /analytics-next { + proxy_pass https://$segment_cdn$uri$is_args$args; + proxy_cache my_cache; + proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; + } + + location /next-integrations { + proxy_pass https://$segment_cdn$uri$is_args$args; + proxy_cache my_cache; + proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; + } + + location /cli-write-key { + return 200 $segment_cli_write_key; + default_type text/plain; + } + + location / { + set $segment_api "api.segment.io"; + proxy_pass https://$segment_api$uri$is_args$args; + proxy_cache my_cache; + proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; + } + } + + # This is the redirect to the netlify hosted site for docs. + server { + listen [::]:56000 ssl http2; + listen 56000 ssl http2; + server_name docs.@PL_DOMAIN_NAME@; + + location / { + return 307 https://docs.pixielabs.ai$request_uri; + } + } + } diff --git a/src/cloud/proxy/BUILD.bazel b/src/cloud/proxy/BUILD.bazel index bc144f97d91..b46d36138ec 100644 --- a/src/cloud/proxy/BUILD.bazel +++ b/src/cloud/proxy/BUILD.bazel @@ -19,28 +19,6 @@ load("@io_bazel_rules_docker//container:layer.bzl", "container_layer") package(default_visibility = ["//src/cloud:__subpackages__"]) -container_layer( - name = "conf", - directory = "/usr/local/openresty/nginx/conf", - files = [ - "headers_common.conf", - "nginx.conf", - "pixie_api.conf", - "pixie_cache.conf", - "pixie_compression.conf", - "pixie_filter.conf", - "pixie_health_check.conf", - "pixie_server_defaults.conf", - "pixie_vars.conf", - ], -) - -container_layer( - name = "conf_private", - directory = "/usr/local/openresty/nginx/conf/private", - files = glob(["private/*.conf"]), -) - container_layer( name = "ui_assets", directory = "/assets", @@ -68,8 +46,6 @@ container_image( base = "@openresty//image", entrypoint = ["/scripts/entrypoint.sh"], layers = [ - ":conf", - ":conf_private", ":ui_assets", ":installer", ":entrypoint", diff --git a/src/cloud/proxy/entrypoint.sh b/src/cloud/proxy/entrypoint.sh index 833bb0d1aa2..d8a04ddc319 100644 --- a/src/cloud/proxy/entrypoint.sh +++ b/src/cloud/proxy/entrypoint.sh @@ -17,7 +17,7 @@ # SPDX-License-Identifier: Apache-2.0 if [ -n "$PL_DOMAIN_NAME" ]; then - sed -i -e "s/[@]PL_DOMAIN_NAME[@]/$PL_DOMAIN_NAME/" /usr/local/openresty/nginx/conf/nginx.conf + sed -e "s/[@]PL_DOMAIN_NAME[@]/$PL_DOMAIN_NAME/" /usr/local/openresty/nginx/conf/nginx.conf.tmpl > /usr/local/openresty/nginx/conf/nginx.conf else echo "PL_DOMAIN_NAME undefined, exiting" exit 1 diff --git a/src/cloud/proxy/headers_common.conf b/src/cloud/proxy/headers_common.conf deleted file mode 100644 index 2c2a290b7af..00000000000 --- a/src/cloud/proxy/headers_common.conf +++ /dev/null @@ -1,2 +0,0 @@ -# HSTS config. -add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload"; diff --git a/src/cloud/proxy/nginx.conf b/src/cloud/proxy/nginx.conf deleted file mode 100644 index 16a4ecc9b4a..00000000000 --- a/src/cloud/proxy/nginx.conf +++ /dev/null @@ -1,273 +0,0 @@ -# File borrowed from openresty config. - -# Environment variable need to be specified here (and below). -env PL_OAUTH_PROVIDER; -env PL_AUTH_URI; -env PL_AUTH_CLIENT_ID; -env PL_AUTH_EMAIL_PASSWORD_CONN; -env PL_OIDC_HOST; -env PL_OIDC_METADATA_URL; -env PL_OIDC_CLIENT_ID; -env PL_OIDC_ADDITIONAL_SCOPES; -env PL_OIDC_SOCIAL_CONFIG_LOGIN; -env PL_OIDC_SOCIAL_CONFIG_SIGNUP; -env PL_LD_CLIENT_ID; -env PL_DOMAIN_NAME; -env PL_API_SERVICE_HTTP; -env PL_SEGMENT_UI_WRITE_KEY; -env PL_SEGMENT_CLI_WRITE_KEY; -env PL_HYDRA_SERVICE; -env PL_KRATOS_SERVICE; -env SCRIPT_BUNDLE_URLS; -env SCRIPT_BUNDE_DEV; -env ANALYTICS_ENABLED; -env ANNOUNCEMENT_ENABLED; -env ANNOUNCE_WIDGET_URL; -env CONTACT_ENABLED; -env PASSTHROUGH_PROXY_PORT; - -worker_processes auto; - -events { - worker_connections 1024; -} - -http { - include mime.types; - default_type application/octet-stream; - sendfile on; - server_tokens off; - - log_format upstreamlog '[$time_local] $remote_addr - $remote_user - $server_name to: $upstream_addr: $request upstream_response_time $upstream_response_time msec $msec request_time $request_time'; - - resolver kube-dns.kube-system.svc.cluster.local valid=5s; - - # Tune nginx keepalives to work with the GCP HTTP(S) Load Balancer: - keepalive_timeout 650; - keepalive_requests 10000; - - # Increase default server names length to 128: - server_names_hash_bucket_size 128; - - proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=my_cache:10m max_size=100m inactive=1d; - proxy_temp_path /var/cache/nginx/tmp; - - ssl_buffer_size 4k; - ssl_session_cache shared:SSL:10m; - ssl_session_timeout 1h; - ssl_protocols TLSv1.2 TLSv1.3; - - upstream httpapisvc { - # The nginx parser does not seem to understand that the $api_service - # variable contains the port. Just referring to it as api-service since this - # works in a service agnosistic way. - server api-service:51200; - keepalive 128; - } - - upstream grpcapisvc { - # The nginx parser does not seem to understand that the $api_service - # variable contains the port. Just referring to it as api-service since this - # works in a service agnosistic way. - server api-service:51200; - keepalive 128; - } - - # This is the default fallback server if none of the subdomains match. - server { - listen [::]:56000 default_server; - listen 56000; - server_name _; - - ssl_certificate /certs/tls.crt; - ssl_certificate_key /certs/tls.key; - - include /usr/local/openresty/nginx/conf/pixie_health_check.conf; - - if ($http_x_forwarded_proto = "http") { - return 404; - } - - location / { - return 404; - } - } - - # This is the server that we use for all the locations that don't have a subdomain. - # This just serves the minimal pages necessary to authenticate and get into a subdomain specfic site. - server { - listen [::]:56000 ssl http2; - listen 56000 ssl http2; - server_name @PL_DOMAIN_NAME@ *.cluster.local; - - include /usr/local/openresty/nginx/conf/pixie_compression.conf; - include /usr/local/openresty/nginx/conf/pixie_vars.conf; - include /usr/local/openresty/nginx/conf/pixie_server_defaults.conf; - include /usr/local/openresty/nginx/conf/pixie_api.conf; - - if ($http_x_forwarded_proto = "http") { - return 307 https://$host$request_uri; - } - - location /install.sh { - root /installer; - try_files "/install.sh" =404; - } - - location / { - return 307 https://work.$domain_name$request_uri; - } - } - - # This is the server for the work subdomain. - server { - listen [::]:56000 ssl http2; - listen 56000 ssl http2; - server_name work.@PL_DOMAIN_NAME@; - - error_page 404 = @error404; - include /usr/local/openresty/nginx/conf/pixie_compression.conf; - include /usr/local/openresty/nginx/conf/pixie_vars.conf; - include /usr/local/openresty/nginx/conf/pixie_server_defaults.conf; - include /usr/local/openresty/nginx/conf/pixie_health_check.conf; - include /usr/local/openresty/nginx/conf/pixie_api.conf; - include /usr/local/openresty/nginx/conf/headers_common.conf; - include /usr/local/openresty/nginx/conf/private/*.conf; - - # Disable caching by default. - add_header Cache-Control "no-store"; - - if ($http_x_forwarded_proto = "http") { - return 307 https://$host$request_uri; - } - - location /oauth/hydra { - if ($hydra_service = "") { - return 404; - } - # Using a variable in the proxy_pass argument and a trailing slash on the location - # doesn't forward the subpath to the proxy destination. Instead we need to manually - # manage this change by rewriting the path. - rewrite ^/oauth/hydra/(.*) /$1 break; - # Note - storing the protocol in the env variable causes nginx to reject the proxy_pass dest. - proxy_pass https://$hydra_service$uri$is_args$args; - } - - # Disable self-service registration in Kratos is the only way to prevent registration to orgs - # that a user does not have permission to join. Org Admins should instead invite their users. - # In the future Kratos will support disabling registration within kratos_config.yaml. - # We want to replace this rule when that becomes available. - # Issue tracking the feature: https://github.com/ory/kratos/issues/882 - location /oauth/kratos/self-service/registration { - error_page 404 = @error404; - return 404; - } - - location /oauth/kratos { - error_page 404 = @error404; - if ($kratos_service = "") { - return 404; - } - # Using a variable in the proxy_pass argument and a trailing slash on the location - # doesn't forward the subpath to the proxy destination. Instead we need to manually - # manage this change by rewriting the path. - rewrite ^/oauth/kratos/(.*) /$1 break; - # Note - storing the protocol in the env variable causes nginx to reject the proxy_pass dest. - proxy_pass https://$kratos_service$uri$is_args$args; - } - - location /install.sh { - root /installer; - try_files "/install.sh" =404; - } - - location / { - gzip_static off; - root /assets; - try_files $uri "/index.html"; - } - - # Cache all contents hosted under /static. This is actually the same as the contents hosted under '/' - # but nginx knows to cache these and webpack puts all the hashed assets in here. - location ~ ^/static(/.*)$ { - gzip_static off; - root /assets; - include /usr/local/openresty/nginx/conf/pixie_cache.conf; - include /usr/local/openresty/nginx/conf/pixie_filter.conf; - try_files $1 "/index.html"; - } - - location /auth-complete { - root /assets; - include /usr/local/openresty/nginx/conf/pixie_filter.conf; - try_files $uri "/index.html"; - } - - location ~ ^/docs(?:/(.*))?$ { - return 307 https://docs.pixielabs.ai/$1; - } - - location @error404 { - return 307 https://$domain_name?not_found=$uri; - } - } - - # This is the proxy server for segment. - server { - listen [::]:56000 ssl http2; - listen 56000 ssl http2; - server_name segment.@PL_DOMAIN_NAME@; - - include /usr/local/openresty/nginx/conf/pixie_compression.conf; - include /usr/local/openresty/nginx/conf/pixie_vars.conf; - - set $segment_cdn "cdn.segment.com"; - - location /v1/projects { - proxy_pass https://$segment_cdn$uri$is_args$args; - proxy_cache my_cache; - proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; - } - - location /analytics.js/v1 { - proxy_pass https://$segment_cdn$uri$is_args$args; - proxy_cache my_cache; - proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; - } - - location /analytics-next { - proxy_pass https://$segment_cdn$uri$is_args$args; - proxy_cache my_cache; - proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; - } - - location /next-integrations { - proxy_pass https://$segment_cdn$uri$is_args$args; - proxy_cache my_cache; - proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; - } - - location /cli-write-key { - return 200 $segment_cli_write_key; - default_type text/plain; - } - - location / { - set $segment_api "api.segment.io"; - proxy_pass https://$segment_api$uri$is_args$args; - proxy_cache my_cache; - proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; - } - } - - # This is the redirect to the netlify hosted site for docs. - server { - listen [::]:56000 ssl http2; - listen 56000 ssl http2; - server_name docs.@PL_DOMAIN_NAME@; - - location / { - return 307 https://docs.pixielabs.ai$request_uri; - } - } -} diff --git a/src/cloud/proxy/pixie_api.conf b/src/cloud/proxy/pixie_api.conf deleted file mode 100644 index c42b6e91409..00000000000 --- a/src/cloud/proxy/pixie_api.conf +++ /dev/null @@ -1,26 +0,0 @@ -location /api/ { - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_pass https://httpapisvc; -} - - -location ~ ^/pl.* { - rewrite ^/pl\.(.*)$ /px.$1 last; -} - -location ~ ^/px.cloudapi.* { - # The nginx parser does not seem to understand that the $api_service - # variable contains the port. Just referring to it as api-service since this - # works in a service agnosistic way. - grpc_pass grpcs://grpcapisvc; -} - -location ~ ^/px.api.* { - # The nginx parser does not seem to understand that the $api_service - # variable contains the port. Just referring to it as api-service since this - # works in a service agnosistic way. - grpc_pass grpcs://grpcapisvc; -} diff --git a/src/cloud/proxy/pixie_cache.conf b/src/cloud/proxy/pixie_cache.conf deleted file mode 100644 index e85b564934b..00000000000 --- a/src/cloud/proxy/pixie_cache.conf +++ /dev/null @@ -1,4 +0,0 @@ -etag on; -expires 60m; -add_header Cache-Control "public"; -include /usr/local/openresty/nginx/conf/headers_common.conf; diff --git a/src/cloud/proxy/pixie_compression.conf b/src/cloud/proxy/pixie_compression.conf deleted file mode 100644 index fdf9540b9f2..00000000000 --- a/src/cloud/proxy/pixie_compression.conf +++ /dev/null @@ -1,33 +0,0 @@ -gzip on; - -# Enable compression for common types. -gzip_types - application/x-font-opentype - application/grpc-web-text - application/javascript - application/json - application/x-font-ttf - application/x-javascript - application/xml - image/svg+xml - font/woff2 - text/css - text/javascript - text/plain - text/xml; - -# Try to serve .gz files directly if they exist. -# TODO(zasgar): Re-enable once we fix env subs. -gzip_static off; - -# Increase the compression level, at the expense of additional CPU -# cpu cycles are cheap virtually everywhere now, bandwidth not nearly as much. -gzip_comp_level 9; - -gzip_proxied any; - -# How many buffers to hold. -gzip_buffers 16 64k; - -# Min size before we compress. -gzip_min_length 50; diff --git a/src/cloud/proxy/pixie_filter.conf b/src/cloud/proxy/pixie_filter.conf deleted file mode 100644 index 4c1c7088144..00000000000 --- a/src/cloud/proxy/pixie_filter.conf +++ /dev/null @@ -1,25 +0,0 @@ -# These re-write variables used in our assets. -sub_filter_types application/javascript; -sub_filter '__CONFIG_OAUTH_PROVIDER__' "'${oauth_provider}'"; -sub_filter '__CONFIG_AUTH_URI__' "'${auth_uri}'"; -sub_filter '__CONFIG_AUTH_CLIENT_ID__' "'${auth_client_id}'"; -sub_filter '__CONFIG_AUTH_EMAIL_PASSWORD_CONN__' "'${auth_email_password_conn}'"; -sub_filter '__CONFIG_OIDC_HOST__' "'${oidc_host}'"; -sub_filter '__CONFIG_OIDC_METADATA_URL__' "'${oidc_metadata_url}'"; -sub_filter '__CONFIG_OIDC_CLIENT_ID__' "'${oidc_client_id}'"; -sub_filter '__CONFIG_OIDC_ADDITIONAL_SCOPES__' "'${oidc_additional_scopes}'"; -sub_filter '__CONFIG_OIDC_SOCIAL_CONFIG_LOGIN__' "`${oidc_social_config_login}`"; -sub_filter '__CONFIG_OIDC_SOCIAL_CONFIG_SIGNUP__' "`${oidc_social_config_signup}`"; -sub_filter '__CONFIG_DOMAIN_NAME__' "'${domain_name}'"; -sub_filter '__CONFIG_SCRIPT_BUNDLE_URLS__' "'${script_bundle_urls}'"; -sub_filter '__CONFIG_SCRIPT_BUNDLE_DEV__' "'${script_bundle_dev}'"; -sub_filter '__SEGMENT_UI_WRITE_KEY__' "'${segment_ui_write_key}'"; -sub_filter '__SEGMENT_ANALYTICS_JS_DOMAIN__' "'segment.${domain_name}'"; -sub_filter '__CONFIG_LD_CLIENT_ID__' "'${ld_client_id}'"; -sub_filter '__ANALYTICS_ENABLED__' "${analytics_enabled}"; -sub_filter '__ANNOUNCEMENT_ENABLED__' "${announcement_enabled}"; -sub_filter '__ANNOUNCE_WIDGET_URL__' "'${announce_widget_url}'"; -sub_filter '__CONTACT_ENABLED__' "${contact_enabled}"; -sub_filter '__PASSTHROUGH_PROXY_PORT__' "'${passthrough_proxy_port}'"; -# Only need to filter once since these variables should not be repeated. -sub_filter_once on; diff --git a/src/cloud/proxy/pixie_health_check.conf b/src/cloud/proxy/pixie_health_check.conf deleted file mode 100644 index 9487bd87bdb..00000000000 --- a/src/cloud/proxy/pixie_health_check.conf +++ /dev/null @@ -1,4 +0,0 @@ -location /healthz { - access_log off; - return 200 "healthy\n"; -} diff --git a/src/cloud/proxy/pixie_server_defaults.conf b/src/cloud/proxy/pixie_server_defaults.conf deleted file mode 100644 index 9e7089d4dc1..00000000000 --- a/src/cloud/proxy/pixie_server_defaults.conf +++ /dev/null @@ -1,3 +0,0 @@ -# On redirects don't rewrite the host/port. This can cause problems because the nginx service -# is behind a load balancer/proxy. -absolute_redirect off; diff --git a/src/cloud/proxy/pixie_vars.conf b/src/cloud/proxy/pixie_vars.conf deleted file mode 100644 index 87befd60add..00000000000 --- a/src/cloud/proxy/pixie_vars.conf +++ /dev/null @@ -1,27 +0,0 @@ -# Get the environment information so that we can inject the correct config -# into the javascript assets. -# The environment variable also need an ENV directive above. -set_by_lua_block $oauth_provider { return os.getenv("PL_OAUTH_PROVIDER") } -set_by_lua_block $auth_uri { return os.getenv("PL_AUTH_URI") } -set_by_lua_block $auth_client_id { return os.getenv("PL_AUTH_CLIENT_ID") } -set_by_lua_block $auth_email_password_conn { return os.getenv("PL_AUTH_EMAIL_PASSWORD_CONN") } -set_by_lua_block $oidc_host { return os.getenv("PL_OIDC_HOST") } -set_by_lua_block $oidc_metadata_url { return os.getenv("PL_OIDC_METADATA_URL") } -set_by_lua_block $oidc_client_id { return os.getenv("PL_OIDC_CLIENT_ID") } -set_by_lua_block $oidc_additional_scopes { return os.getenv("PL_OIDC_ADDITIONAL_SCOPES") } -set_by_lua_block $oidc_social_config_login { return os.getenv("PL_OIDC_SOCIAL_CONFIG_LOGIN") } -set_by_lua_block $oidc_social_config_signup { return os.getenv("PL_OIDC_SOCIAL_CONFIG_SIGNUP") } -set_by_lua_block $domain_name { return os.getenv("PL_DOMAIN_NAME") } -set_by_lua_block $api_service { return os.getenv("PL_API_SERVICE_HTTP") } -set_by_lua_block $segment_ui_write_key { return os.getenv("PL_SEGMENT_UI_WRITE_KEY") } -set_by_lua_block $segment_cli_write_key { return os.getenv("PL_SEGMENT_CLI_WRITE_KEY") } -set_by_lua_block $script_bundle_urls { return os.getenv("SCRIPT_BUNDLE_URLS") } -set_by_lua_block $script_bundle_dev { return os.getenv("SCRIPT_BUNDLE_DEV") } -set_by_lua_block $analytics_enabled { return os.getenv("ANALYTICS_ENABLED") } -set_by_lua_block $announcement_enabled { return os.getenv("ANNOUNCEMENT_ENABLED") } -set_by_lua_block $announce_widget_url { return os.getenv("ANNOUNCE_WIDGET_URL") } -set_by_lua_block $ld_client_id { return os.getenv("PL_LD_CLIENT_ID") } -set_by_lua_block $contact_enabled { return os.getenv("CONTACT_ENABLED") } -set_by_lua_block $passthrough_proxy_port { return os.getenv("PASSTHROUGH_PROXY_PORT") } -set_by_lua_block $hydra_service { return os.getenv("PL_HYDRA_SERVICE") } -set_by_lua_block $kratos_service { return os.getenv("PL_KRATOS_SERVICE") }