From e465af2fc34b09236496fa07f559ed83039d5ca6 Mon Sep 17 00:00:00 2001 From: lgtm <1gtm@users.noreply.github.com> Date: Mon, 29 May 2023 10:14:19 -0700 Subject: [PATCH] Prepare for release v0.30.0 (#1527) ProductLine: Stash Release: v2023.05.31 Release-tracker: https://github.com/stashed/CHANGELOG/pull/66 Signed-off-by: 1gtm <1gtm@appscode.com> Signed-off-by: Tamal Saha --- go.mod | 17 +- go.sum | 34 +- .../github.com/PuerkitoBio/purell/.travis.yml | 12 - vendor/github.com/PuerkitoBio/purell/LICENSE | 3 +- .../github.com/PuerkitoBio/purell/README.md | 2 +- .../github.com/PuerkitoBio/purell/purell.go | 5 +- .../PuerkitoBio/{urlesc => purell}/urlesc.go | 12 +- .../github.com/PuerkitoBio/urlesc/.travis.yml | 15 - vendor/github.com/PuerkitoBio/urlesc/LICENSE | 27 - .../github.com/PuerkitoBio/urlesc/README.md | 16 - vendor/go.bytebuilders.dev/audit/lib/nats.go | 2 +- .../license-verifier/Makefile | 6 +- .../license-verifier/info/lib.go | 85 ++- .../license-verifier/kubernetes/Makefile | 4 +- .../license-verifier/kubernetes/lib.go | 83 ++- .../kmodules.xyz/offshoot-api/api/v1/pvc.go | 8 + vendor/modules.txt | 21 +- .../v1alpha1/openapi_generated.go | 635 +++++++++++++++--- .../apis/stash/v1alpha1/openapi_generated.go | 635 +++++++++++++++--- .../apis/stash/v1beta1/openapi_generated.go | 635 +++++++++++++++--- .../apis/ui/v1alpha1/openapi_generated.go | 635 +++++++++++++++--- 21 files changed, 2359 insertions(+), 533 deletions(-) delete mode 100644 vendor/github.com/PuerkitoBio/purell/.travis.yml rename vendor/github.com/PuerkitoBio/{urlesc => purell}/urlesc.go (94%) delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/.travis.yml delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/LICENSE delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/README.md diff --git a/go.mod b/go.mod index e9c39756f..255b19d9a 100644 --- a/go.mod +++ b/go.mod @@ -11,10 +11,10 @@ require ( github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - go.bytebuilders.dev/audit v0.0.26 + go.bytebuilders.dev/audit v0.0.27 go.bytebuilders.dev/license-proxyserver v0.0.3 - go.bytebuilders.dev/license-verifier v0.12.1 - go.bytebuilders.dev/license-verifier/kubernetes v0.12.1 + go.bytebuilders.dev/license-verifier v0.13.2 + go.bytebuilders.dev/license-verifier/kubernetes v0.13.2 golang.org/x/text v0.9.0 gomodules.xyz/blobfs v0.1.10 gomodules.xyz/cert v1.5.0 @@ -38,14 +38,14 @@ require ( kmodules.xyz/client-go v0.25.23 kmodules.xyz/constants v0.0.0-20220317041001-545c1e31a70a kmodules.xyz/csi-utils v0.25.4 - kmodules.xyz/custom-resources v0.25.1 - kmodules.xyz/objectstore-api v0.25.1-0.20221104003322-f0289b5b6ca2 - kmodules.xyz/offshoot-api v0.25.3 + kmodules.xyz/custom-resources v0.25.2 + kmodules.xyz/objectstore-api v0.25.1 + kmodules.xyz/offshoot-api v0.25.4 kmodules.xyz/openshift v0.25.0 kmodules.xyz/prober v0.25.0 kmodules.xyz/webhook-runtime v0.25.0 sigs.k8s.io/controller-runtime v0.13.1 - stash.appscode.dev/apimachinery v0.29.1-0.20230529131221-1e979c48da10 + stash.appscode.dev/apimachinery v0.30.0 ) require ( @@ -63,8 +63,7 @@ require ( github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Masterminds/sprig/v3 v3.2.2 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/PuerkitoBio/purell v1.2.0 // indirect github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect github.com/aws/aws-sdk-go v1.44.100 // indirect github.com/beorn7/perks v1.0.1 // indirect diff --git a/go.sum b/go.sum index 8280ccc0e..132aa531d 100644 --- a/go.sum +++ b/go.sum @@ -121,10 +121,10 @@ github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cq github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.2.0 h1:/Jdm5QfyM8zdlqT6WVZU4cfP23sot6CEHA4CS49Ezig= +github.com/PuerkitoBio/purell v1.2.0/go.mod h1:OhLRTaaIzhvIyofkJfB24gokC7tM42Px5UhoT32THBk= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -540,7 +540,7 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nats-io/jwt/v2 v2.3.0 h1:z2mA1a7tIf5ShggOFlR1oBPgd6hGqcDYsISxZByUzdI= github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= -github.com/nats-io/nats-server/v2 v2.9.0 h1:DLWu+7/VgGOoChcDKytnUZPAmudpv7o/MhKmNrnH1RE= +github.com/nats-io/nats-server/v2 v2.9.10 h1:LMC46Oi9E6BUx/xBsaCVZgofliAqKQzRPU6eKWkN8jE= github.com/nats-io/nats.go v1.22.1 h1:XzfqDspY0RNufzdrB8c4hFR+R3dahkxlpWe5+IWJzbE= github.com/nats-io/nats.go v1.22.1/go.mod h1:tLqubohF7t4z3du1QDPYJIQQyhb4wl6DhjxEajSI7UA= github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= @@ -678,14 +678,14 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.bytebuilders.dev/audit v0.0.26 h1:YwYWHSEq0QGs0Vk81FqouvGsyG+GYRzB/1Ywt6r68S4= -go.bytebuilders.dev/audit v0.0.26/go.mod h1:itgg6k3gNmSjR8fY8Sk1cP9TD5O2KNIc4x4MCA7EmD0= +go.bytebuilders.dev/audit v0.0.27 h1:d9kYia/S17JhkauRXw/ERXAtfuujkGDGjpeQcAurmkY= +go.bytebuilders.dev/audit v0.0.27/go.mod h1:88UkSOMKSKShknE5c6V1HtBHCXArIO/LHio61OSzaww= go.bytebuilders.dev/license-proxyserver v0.0.3 h1:vAFMBWfrlmFKNspjBm2KfPXnxYnC17xLwZiHmVzUmzs= go.bytebuilders.dev/license-proxyserver v0.0.3/go.mod h1:iMJbPzDf2R2EJOZwRi7ziEr5DBMfT9Cm75/XfPb/QnU= -go.bytebuilders.dev/license-verifier v0.12.1 h1:2bQJ8naHTv0a04/x+luoabCGQ37VuaccA7Yz5fVPHaU= -go.bytebuilders.dev/license-verifier v0.12.1/go.mod h1:47HPt3RVp9gsujYpOeXi28IsTV5lilZ+EOpuhz/DIJM= -go.bytebuilders.dev/license-verifier/kubernetes v0.12.1 h1:Xr2AiglEgVhFZ2JzhPMCGWniEx037eT6UKEFS3DsdiA= -go.bytebuilders.dev/license-verifier/kubernetes v0.12.1/go.mod h1:mJMkwXOfV4mHu75hcMFQru3y3AeITyNjCMwii5/Vrpk= +go.bytebuilders.dev/license-verifier v0.13.2 h1:wV1ynl+GR+zKb3dh19WEzuC0uzTdiSGgVg9G78Nh4XU= +go.bytebuilders.dev/license-verifier v0.13.2/go.mod h1:PTTlWgokzoisBezn2zt+JeGkhTJZ0flvLzdhHVBy86M= +go.bytebuilders.dev/license-verifier/kubernetes v0.13.2 h1:ZIPTce9sAR9/GaPvQtkbOTXGE1Nyyv0GcMqnInUaqxM= +go.bytebuilders.dev/license-verifier/kubernetes v0.13.2/go.mod h1:xiM7bX84LNWQPJRC/m9rQASuCclJSsDdf2qFdafrz1k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc= @@ -1249,12 +1249,12 @@ kmodules.xyz/constants v0.0.0-20220317041001-545c1e31a70a h1:wo6TxmquRJwXXX/HejI kmodules.xyz/constants v0.0.0-20220317041001-545c1e31a70a/go.mod h1:3C5i73Z7fcMVyu5TXtPuizGD8vWAbesXFVp1ESbIa1k= kmodules.xyz/csi-utils v0.25.4 h1:fwTgihG/MEfjhGQylzcb0vqlsCfFT5pdYKRxdg7XrM0= kmodules.xyz/csi-utils v0.25.4/go.mod h1:4qHSDnu+5L55wNbzpZDcEez0utzcrtHMfRfb7MrZ2yI= -kmodules.xyz/custom-resources v0.25.1 h1:0qHPTxbT/q0afl2GCOnwPFaoxKziRIPXgVu77YwrCa4= -kmodules.xyz/custom-resources v0.25.1/go.mod h1:ULwzvLmOqZJcPSXKI7iLclYL5eYRlKx8Nbex28Ht19E= -kmodules.xyz/objectstore-api v0.25.1-0.20221104003322-f0289b5b6ca2 h1:efc0glYeBw+ok5s5ZecKdB9zgnRo/IvsLlSaQUPQjZE= -kmodules.xyz/objectstore-api v0.25.1-0.20221104003322-f0289b5b6ca2/go.mod h1:X5aCkyU91p9TOn4jcWw0cfcJL0HCKd/Z6FJHdzKz1ZU= -kmodules.xyz/offshoot-api v0.25.3 h1:KOGzW+TRHJvZ/KauvVQfKHztHF3HTHQ0ibbCHB3sWRg= -kmodules.xyz/offshoot-api v0.25.3/go.mod h1:PUk4EuJFhhyQykCflHj7EgXcljGIqs9vi0IN0RpxtY4= +kmodules.xyz/custom-resources v0.25.2 h1:+PJgUZvbbSgyNT7EX9gUZ3PIzY2LAW03TDW8cevvXqo= +kmodules.xyz/custom-resources v0.25.2/go.mod h1:b9XjjKQMZ6KrLHXKqQz7YwV3M3BK8Hwi4KEwu5RadCo= +kmodules.xyz/objectstore-api v0.25.1 h1:lYQlxk+edgZYakhq+OoRBXTbHbZTGKhatGZWnKixgEQ= +kmodules.xyz/objectstore-api v0.25.1/go.mod h1:6wBtktN7/EXyE429OTCB9nwEe+d0ADaoCtm6+IZnJso= +kmodules.xyz/offshoot-api v0.25.4 h1:IjJNvkphcdYUG8XO/pBwXpuP8W+jxAWJZ3yH8vgI/as= +kmodules.xyz/offshoot-api v0.25.4/go.mod h1:PUk4EuJFhhyQykCflHj7EgXcljGIqs9vi0IN0RpxtY4= kmodules.xyz/openshift v0.25.0 h1:U0zJA04piTDvogXseG7daltaw1yiiNhyCWqsB/Klb4Y= kmodules.xyz/openshift v0.25.0/go.mod h1:37Ougj3fNj/2lNyg7LsJwLPPAQwQOJqe/pp2zRqAffQ= kmodules.xyz/prober v0.25.0 h1:R5uRLHJEvEtEoogj+vaTAob0Btph6+PX5IlS6hPh8PA= @@ -1286,5 +1286,5 @@ sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -stash.appscode.dev/apimachinery v0.29.1-0.20230529131221-1e979c48da10 h1:QOMoITdH8rqfDD1biUw334dpWieMXl4hzBcgkd1//gk= -stash.appscode.dev/apimachinery v0.29.1-0.20230529131221-1e979c48da10/go.mod h1:7ao2U0Jgntc10uRf9KRmzXuabO12Nm7+2WpcRE/ZXWo= +stash.appscode.dev/apimachinery v0.30.0 h1:imc3OuXaCI8B9ImB1e4QtDYhZCc2VyiXQHW5gheKG/s= +stash.appscode.dev/apimachinery v0.30.0/go.mod h1:xEWpZn0wmhP0Acpq7cpebNbMXwIui8Crh4dpuE9FHSQ= diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml deleted file mode 100644 index cf31e6af6..000000000 --- a/vendor/github.com/PuerkitoBio/purell/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - "1.10.x" - - "1.11.x" - - tip diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE index 4b9986dea..640c3ac58 100644 --- a/vendor/github.com/PuerkitoBio/purell/LICENSE +++ b/vendor/github.com/PuerkitoBio/purell/LICENSE @@ -1,4 +1,5 @@ -Copyright (c) 2012, Martin Angers +Copyright (c) 2012-2022, The Go Authors +Copyright (c) 2012-2022, Martin Angers, Yuki Okushi & Contributors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md index 07de0c498..846127902 100644 --- a/vendor/github.com/PuerkitoBio/purell/README.md +++ b/vendor/github.com/PuerkitoBio/purell/README.md @@ -4,7 +4,7 @@ Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. -[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell) +[![CI](https://github.com/PuerkitoBio/purell/actions/workflows/ci.yml/badge.svg)](https://github.com/PuerkitoBio/purell/actions/workflows/ci.yml) ## Install diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go index 6d0fc190a..74c827246 100644 --- a/vendor/github.com/PuerkitoBio/purell/purell.go +++ b/vendor/github.com/PuerkitoBio/purell/purell.go @@ -13,7 +13,6 @@ import ( "strconv" "strings" - "github.com/PuerkitoBio/urlesc" "golang.org/x/net/idna" "golang.org/x/text/unicode/norm" "golang.org/x/text/width" @@ -180,7 +179,7 @@ func NormalizeURL(u *url.URL, f NormalizationFlags) string { flags[k](u) } } - return urlesc.Escape(u) + return escapeURL(u) } func lowercaseScheme(u *url.URL) { @@ -311,7 +310,7 @@ func sortQuery(u *url.URL) { if buf.Len() > 0 { buf.WriteRune('&') } - buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) + buf.WriteString(fmt.Sprintf("%s=%s", k, url.QueryEscape(v))) } } diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/purell/urlesc.go similarity index 94% rename from vendor/github.com/PuerkitoBio/urlesc/urlesc.go rename to vendor/github.com/PuerkitoBio/purell/urlesc.go index 1b8462459..53d815fd4 100644 --- a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go +++ b/vendor/github.com/PuerkitoBio/purell/urlesc.go @@ -2,11 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package urlesc implements query escaping as per RFC 3986. +// This file implements query escaping as per RFC 3986. // It contains some parts of the net/url package, modified so as to allow // some reserved characters incorrectly escaped by net/url. // See https://github.com/golang/go/issues/5684 -package urlesc +package purell import ( "bytes" @@ -69,12 +69,6 @@ func shouldEscape(c byte, mode encoding) bool { return true } -// QueryEscape escapes the string so it can be safely placed -// inside a URL query. -func QueryEscape(s string) string { - return escape(s, encodeQueryComponent) -} - func escape(s string, mode encoding) string { spaceCount, hexCount := 0, 0 for i := 0; i < len(s); i++ { @@ -144,7 +138,7 @@ func unescapeUserinfo(s string) string { // the form host/path does not add its own /. // - if u.RawQuery is empty, ?query is omitted. // - if u.Fragment is empty, #fragment is omitted. -func Escape(u *url.URL) string { +func escapeURL(u *url.URL) string { var buf bytes.Buffer if u.Scheme != "" { buf.WriteString(u.Scheme) diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml deleted file mode 100644 index ba6b225f9..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - tip - -install: - - go build . - -script: - - go test -v diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/github.com/PuerkitoBio/urlesc/LICENSE deleted file mode 100644 index 744875676..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md deleted file mode 100644 index 57aff0a53..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/README.md +++ /dev/null @@ -1,16 +0,0 @@ -urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) -====== - -Package urlesc implements query escaping as per RFC 3986. - -It contains some parts of the net/url package, modified so as to allow -some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). - -## Install - - go get github.com/PuerkitoBio/urlesc - -## License - -Go license (BSD-3-Clause) - diff --git a/vendor/go.bytebuilders.dev/audit/lib/nats.go b/vendor/go.bytebuilders.dev/audit/lib/nats.go index f25e5b69d..64730f48a 100644 --- a/vendor/go.bytebuilders.dev/audit/lib/nats.go +++ b/vendor/go.bytebuilders.dev/audit/lib/nats.go @@ -93,7 +93,7 @@ func NewNatsConfig(cfg *rest.Config, clusterID string, LicenseFile string) (*Nat return nil, err } - resp, err := http.Post(info.RegistrationAPIEndpoint(), "application/json", bytes.NewReader(data)) + resp, err := http.Post(info.MustRegistrationAPIEndpoint(), "application/json", bytes.NewReader(data)) if err != nil { return nil, err } diff --git a/vendor/go.bytebuilders.dev/license-verifier/Makefile b/vendor/go.bytebuilders.dev/license-verifier/Makefile index 43737c5ea..ac51f2717 100644 --- a/vendor/go.bytebuilders.dev/license-verifier/Makefile +++ b/vendor/go.bytebuilders.dev/license-verifier/Makefile @@ -21,7 +21,7 @@ COMPRESS ?= no # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) CRD_OPTIONS ?= "crd:maxDescLen=0,generateEmbeddedObjectMeta=true,allowDangerousTypes=true" -CODE_GENERATOR_IMAGE ?= appscode/gengo:release-1.25 +CODE_GENERATOR_IMAGE ?= ghcr.io/appscode/gengo:release-1.25 API_GROUPS ?= licenses:v1alpha1 # Where to push the docker image. @@ -64,8 +64,8 @@ ARCH := $(if $(GOARCH),$(GOARCH),$(shell go env GOARCH)) BASEIMAGE_PROD ?= gcr.io/distroless/static-debian11 BASEIMAGE_DBG ?= debian:bullseye -GO_VERSION ?= 1.19 -BUILD_IMAGE ?= appscode/golang-dev:$(GO_VERSION) +GO_VERSION ?= 1.20 +BUILD_IMAGE ?= ghcr.io/appscode/golang-dev:$(GO_VERSION) OUTBIN = bin/$(OS)_$(ARCH)/$(BIN) ifeq ($(OS),windows) diff --git a/vendor/go.bytebuilders.dev/license-verifier/info/lib.go b/vendor/go.bytebuilders.dev/license-verifier/info/lib.go index 15d2d73ae..12b50db86 100644 --- a/vendor/go.bytebuilders.dev/license-verifier/info/lib.go +++ b/vendor/go.bytebuilders.dev/license-verifier/info/lib.go @@ -31,6 +31,7 @@ import ( "go.bytebuilders.dev/license-verifier/apis/licenses" + "github.com/PuerkitoBio/purell" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" @@ -46,8 +47,9 @@ var ( ProductName string // This has been renamed to Features ProductUID string - prodAddress = "https://byte.builders" - qaAddress = "https://appscode.ninja" + prodDomain = "byte.builders" + qaDomain = "appscode.ninja" + registrationAPIPath = "api/v1/register" LicenseIssuerAPIPath = "api/v1/license/issue" ) @@ -68,27 +70,84 @@ func SkipLicenseVerification() bool { return !v } -func RegistrationAPIEndpoint() string { - u := APIServerAddress() +func MustRegistrationAPIEndpoint() string { + r, err := RegistrationAPIEndpoint() + if err != nil { + panic(err) + } + return r +} + +func RegistrationAPIEndpoint(override ...string) (string, error) { + u, err := APIServerAddress(override...) + if err != nil { + return "", err + } u.Path = path.Join(u.Path, registrationAPIPath) - return u.String() + return u.String(), nil } -func LicenseIssuerAPIEndpoint() string { - u := APIServerAddress() +func MustLicenseIssuerAPIEndpoint() string { + r, err := LicenseIssuerAPIEndpoint() + if err != nil { + panic(err) + } + return r +} + +func LicenseIssuerAPIEndpoint(override ...string) (string, error) { + u, err := APIServerAddress(override...) + if err != nil { + return "", err + } u.Path = path.Join(u.Path, LicenseIssuerAPIPath) - return u.String() + return u.String(), nil } -func APIServerAddress() *url.URL { - if SkipLicenseVerification() { - u, _ := url.Parse(qaAddress) - return u +func MustAPIServerAddress() *url.URL { + u, err := APIServerAddress() + if err != nil { + panic(err) } - u, _ := url.Parse(prodAddress) return u } +func APIServerAddress(override ...string) (*url.URL, error) { + if len(override) > 0 && override[0] != "" { + nu, err := purell.NormalizeURLString(override[0], + purell.FlagsUsuallySafeGreedy|purell.FlagRemoveDuplicateSlashes) + if err != nil { + return nil, err + } + return url.Parse(nu) + } + + if SkipLicenseVerification() { + return url.Parse("https://api." + qaDomain) + } + return url.Parse("https://api." + prodDomain) +} + +func HostedEndpoint(u string) (bool, error) { + nu, err := purell.NormalizeURLString(u, + purell.FlagsUsuallySafeGreedy|purell.FlagRemoveDuplicateSlashes) + if err != nil { + return false, err + } + u2, err := url.Parse(nu) + if err != nil { + return false, err + } + return HostedDomain(u2.Hostname()), nil +} + +func HostedDomain(d string) bool { + return d == prodDomain || + d == qaDomain || + strings.HasSuffix(d, "."+prodDomain) || + strings.HasSuffix(d, "."+qaDomain) +} + func LoadLicenseCA() ([]byte, error) { if LicenseCA != "" { return []byte(LicenseCA), nil diff --git a/vendor/go.bytebuilders.dev/license-verifier/kubernetes/Makefile b/vendor/go.bytebuilders.dev/license-verifier/kubernetes/Makefile index 5cd4a0b45..10b65999d 100644 --- a/vendor/go.bytebuilders.dev/license-verifier/kubernetes/Makefile +++ b/vendor/go.bytebuilders.dev/license-verifier/kubernetes/Makefile @@ -64,8 +64,8 @@ ARCH := $(if $(GOARCH),$(GOARCH),$(shell go env GOARCH)) BASEIMAGE_PROD ?= gcr.io/distroless/static BASEIMAGE_DBG ?= debian:stretch -GO_VERSION ?= 1.19 -BUILD_IMAGE ?= appscode/golang-dev:$(GO_VERSION) +GO_VERSION ?= 1.20 +BUILD_IMAGE ?= ghcr.io/appscode/golang-dev:$(GO_VERSION) OUTBIN = bin/$(OS)_$(ARCH)/$(BIN) ifeq ($(OS),windows) diff --git a/vendor/go.bytebuilders.dev/license-verifier/kubernetes/lib.go b/vendor/go.bytebuilders.dev/license-verifier/kubernetes/lib.go index 054648348..3430a33d1 100644 --- a/vendor/go.bytebuilders.dev/license-verifier/kubernetes/lib.go +++ b/vendor/go.bytebuilders.dev/license-verifier/kubernetes/lib.go @@ -62,17 +62,17 @@ const ( ) type LicenseEnforcer struct { - opts verifier.VerifyOptions - config *rest.Config - kc kubernetes.Interface - getLicense func() ([]byte, error) + licenseFile string + opts verifier.VerifyOptions + config *rest.Config + kc kubernetes.Interface } // NewLicenseEnforcer returns a newly created license enforcer func NewLicenseEnforcer(config *rest.Config, licenseFile string) (*LicenseEnforcer, error) { le := LicenseEnforcer{ - getLicense: getLicense(config, licenseFile), - config: config, + config: config, + licenseFile: licenseFile, opts: verifier.VerifyOptions{ Features: info.ProductName, }, @@ -97,30 +97,38 @@ func MustLicenseEnforcer(config *rest.Config, licenseFile string) *LicenseEnforc return le } -func getLicense(cfg *rest.Config, licenseFile string) func() ([]byte, error) { - return func() ([]byte, error) { - licenseBytes, err := os.ReadFile(licenseFile) - if errors.Is(err, os.ErrNotExist) { - req := proxyserver.LicenseRequest{ - TypeMeta: metav1.TypeMeta{}, - Request: &proxyserver.LicenseRequestRequest{ - Features: info.Features(), - }, - } - pc, err := proxyclient.NewForConfig(cfg) - if err != nil { - return nil, errors.Wrap(err, "failed create client for license-proxyserver") - } - resp, err := pc.ProxyserverV1alpha1().LicenseRequests().Create(context.TODO(), &req, metav1.CreateOptions{}) - if err != nil { - return nil, errors.Wrap(err, "failed to read license") - } - licenseBytes = []byte(resp.Response.License) - } else if err != nil { +func (le *LicenseEnforcer) getLicense() ([]byte, error) { + licenseBytes, err := os.ReadFile(le.licenseFile) + if errors.Is(err, os.ErrNotExist) || (err == nil && le.invalidLicense(licenseBytes)) { + req := proxyserver.LicenseRequest{ + TypeMeta: metav1.TypeMeta{}, + Request: &proxyserver.LicenseRequestRequest{ + Features: info.Features(), + }, + } + pc, err := proxyclient.NewForConfig(le.config) + if err != nil { + return nil, errors.Wrap(err, "failed create client for license-proxyserver") + } + resp, err := pc.ProxyserverV1alpha1().LicenseRequests().Create(context.TODO(), &req, metav1.CreateOptions{}) + if err != nil { return nil, errors.Wrap(err, "failed to read license") } - return licenseBytes, nil + licenseBytes = []byte(resp.Response.License) + } else if err != nil { + return nil, errors.Wrap(err, "failed to read license") } + return licenseBytes, nil +} + +func (le *LicenseEnforcer) invalidLicense(license []byte) bool { + le.opts.License = license + // We don't want to acquire license from license-proxyserver is the license file + // contains a valid license for a different product. + // We want to acquire license-proxyserver is a previously valid license has not expired. + // So, we don't check features in the license found is license file. + l, err := verifier.ParseLicense(le.opts.ParserOptions) + return sets.NewString(l.Features...).HasAny(info.ParseFeatures(le.opts.Features)...) && err != nil } func (le *LicenseEnforcer) createClients() (err error) { @@ -136,22 +144,13 @@ func (le *LicenseEnforcer) acquireLicense() (err error) { } func (le *LicenseEnforcer) readClusterUID() (err error) { + if le.opts.ClusterUID != "" { + return + } le.opts.ClusterUID, err = clusterid.ClusterUID(le.kc.CoreV1().Namespaces()) return err } -func (le *LicenseEnforcer) podName() (string, error) { - if name, ok := os.LookupEnv("MY_POD_NAME"); ok { - return name, nil - } - - if meta.PossiblyInCluster() { - // Read current pod name - return os.Hostname() - } - return "", errors.New("failed to detect pod name") -} - func (le *LicenseEnforcer) handleLicenseVerificationFailure(licenseErr error) error { // Send interrupt so that all go-routines shut-down gracefully // https://pracucci.com/graceful-shutdown-of-kubernetes-pods.html @@ -170,10 +169,6 @@ func (le *LicenseEnforcer) handleLicenseVerificationFailure(licenseErr error) er // Log licenseInfo verification failure klog.Errorln("Failed to verify license. Reason: ", licenseErr.Error()) - podName, err := le.podName() - if err != nil { - return err - } // Read the namespace of current pod namespace := meta.PodNamespace() @@ -183,7 +178,7 @@ func (le *LicenseEnforcer) handleLicenseVerificationFailure(licenseErr error) er le.config, core.SchemeGroupVersion.WithResource(core.ResourcePods.String()), namespace, - podName, + meta.PodName(), ) if err != nil { return err diff --git a/vendor/kmodules.xyz/offshoot-api/api/v1/pvc.go b/vendor/kmodules.xyz/offshoot-api/api/v1/pvc.go index 370a95886..9e81d7354 100644 --- a/vendor/kmodules.xyz/offshoot-api/api/v1/pvc.go +++ b/vendor/kmodules.xyz/offshoot-api/api/v1/pvc.go @@ -386,3 +386,11 @@ func (in *EphemeralVolumeSource) ToAPIObject() *core.EphemeralVolumeSource { VolumeClaimTemplate: in.VolumeClaimTemplate.ToAPIObject(), } } + +func ConvertVolumes(in []Volume) []core.Volume { + out := make([]core.Volume, 0, len(in)) + for _, v := range in { + out = append(out, *v.ToAPIObject()) + } + return out +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 5e9ad209a..1c55e8cc9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -41,12 +41,9 @@ github.com/Masterminds/sprig/v3 # github.com/NYTimes/gziphandler v1.1.1 ## explicit; go 1.11 github.com/NYTimes/gziphandler -# github.com/PuerkitoBio/purell v1.1.1 -## explicit +# github.com/PuerkitoBio/purell v1.2.0 +## explicit; go 1.19 github.com/PuerkitoBio/purell -# github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 -## explicit -github.com/PuerkitoBio/urlesc # github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 ## explicit github.com/armon/circbuf @@ -434,7 +431,7 @@ github.com/yudai/gojsondiff/formatter # github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 ## explicit github.com/yudai/golcs -# go.bytebuilders.dev/audit v0.0.26 +# go.bytebuilders.dev/audit v0.0.27 ## explicit; go 1.18 go.bytebuilders.dev/audit/api/v1 go.bytebuilders.dev/audit/lib @@ -445,13 +442,13 @@ go.bytebuilders.dev/license-proxyserver/apis/proxyserver/v1alpha1 go.bytebuilders.dev/license-proxyserver/client/clientset/versioned go.bytebuilders.dev/license-proxyserver/client/clientset/versioned/scheme go.bytebuilders.dev/license-proxyserver/client/clientset/versioned/typed/proxyserver/v1alpha1 -# go.bytebuilders.dev/license-verifier v0.12.1 +# go.bytebuilders.dev/license-verifier v0.13.2 ## explicit; go 1.18 go.bytebuilders.dev/license-verifier go.bytebuilders.dev/license-verifier/apis/licenses go.bytebuilders.dev/license-verifier/apis/licenses/v1alpha1 go.bytebuilders.dev/license-verifier/info -# go.bytebuilders.dev/license-verifier/kubernetes v0.12.1 +# go.bytebuilders.dev/license-verifier/kubernetes v0.13.2 ## explicit; go 1.18 go.bytebuilders.dev/license-verifier/kubernetes # go.etcd.io/etcd/api/v3 v3.5.4 @@ -1578,7 +1575,7 @@ kmodules.xyz/constants/openstack kmodules.xyz/csi-utils/volumesnapshot kmodules.xyz/csi-utils/volumesnapshot/v1 kmodules.xyz/csi-utils/volumesnapshot/v1beta1 -# kmodules.xyz/custom-resources v0.25.1 +# kmodules.xyz/custom-resources v0.25.2 ## explicit; go 1.18 kmodules.xyz/custom-resources/apis/appcatalog kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1 @@ -1593,11 +1590,11 @@ kmodules.xyz/custom-resources/client/clientset/versioned/typed/auditor/v1alpha1 kmodules.xyz/custom-resources/client/clientset/versioned/typed/metrics/v1alpha1 kmodules.xyz/custom-resources/crds kmodules.xyz/custom-resources/util/siteinfo -# kmodules.xyz/objectstore-api v0.25.1-0.20221104003322-f0289b5b6ca2 +# kmodules.xyz/objectstore-api v0.25.1 ## explicit; go 1.18 kmodules.xyz/objectstore-api/api/v1 kmodules.xyz/objectstore-api/osm -# kmodules.xyz/offshoot-api v0.25.3 +# kmodules.xyz/offshoot-api v0.25.4 ## explicit; go 1.18 kmodules.xyz/offshoot-api/api/v1 kmodules.xyz/offshoot-api/util @@ -1780,7 +1777,7 @@ sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml -# stash.appscode.dev/apimachinery v0.29.1-0.20230529131221-1e979c48da10 +# stash.appscode.dev/apimachinery v0.30.0 ## explicit; go 1.18 stash.appscode.dev/apimachinery/apis stash.appscode.dev/apimachinery/apis/repositories diff --git a/vendor/stash.appscode.dev/apimachinery/apis/repositories/v1alpha1/openapi_generated.go b/vendor/stash.appscode.dev/apimachinery/apis/repositories/v1alpha1/openapi_generated.go index 697666c20..7b4d7fb7d 100644 --- a/vendor/stash.appscode.dev/apimachinery/apis/repositories/v1alpha1/openapi_generated.go +++ b/vendor/stash.appscode.dev/apimachinery/apis/repositories/v1alpha1/openapi_generated.go @@ -383,11 +383,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/objectstore-api/api/v1.S3Spec": schema_kmodulesxyz_objectstore_api_api_v1_S3Spec(ref), "kmodules.xyz/objectstore-api/api/v1.SwiftSpec": schema_kmodulesxyz_objectstore_api_api_v1_SwiftSpec(ref), "kmodules.xyz/offshoot-api/api/v1.ContainerRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref), + "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref), "kmodules.xyz/offshoot-api/api/v1.IONiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref), "kmodules.xyz/offshoot-api/api/v1.NiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_NiceSettings(ref), "kmodules.xyz/offshoot-api/api/v1.ObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_ObjectMeta(ref), "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_PartialObjectMeta(ref), "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaim": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref), + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref), "kmodules.xyz/offshoot-api/api/v1.PodRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref), "kmodules.xyz/offshoot-api/api/v1.PodSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref), "kmodules.xyz/offshoot-api/api/v1.PodTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodTemplateSpec(ref), @@ -395,6 +397,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/offshoot-api/api/v1.ServicePort": schema_kmodulesxyz_offshoot_api_api_v1_ServicePort(ref), "kmodules.xyz/offshoot-api/api/v1.ServiceSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceSpec(ref), "kmodules.xyz/offshoot-api/api/v1.ServiceTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref), + "kmodules.xyz/offshoot-api/api/v1.Volume": schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref), + "kmodules.xyz/offshoot-api/api/v1.VolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref), "kmodules.xyz/prober/api/v1.FormEntry": schema_kmodulesxyz_prober_api_v1_FormEntry(ref), "kmodules.xyz/prober/api/v1.HTTPPostAction": schema_kmodulesxyz_prober_api_v1_HTTPPostAction(ref), "kmodules.xyz/prober/api/v1.Handler": schema_kmodulesxyz_prober_api_v1_Handler(ref), @@ -17799,6 +17803,12 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. Format: "", }, }, + "appRef": { + SchemaProps: spec.SchemaProps{ + Description: "Reference to underlying application", + Ref: ref("kmodules.xyz/client-go/api/v1.TypedObjectReference"), + }, + }, "version": { SchemaProps: spec.SchemaProps{ Description: "Version used to facilitate programmatic handling of application.", @@ -17850,7 +17860,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, } } @@ -18793,6 +18803,27 @@ func schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref common. } } +func schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents an ephemeral volume that is handled by a normal storage driver.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "volumeClaimTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"}, + } +} + func schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -19020,6 +19051,36 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref common.Ref } } +func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"), + }, + }, + }, + Required: []string{"spec"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"}, + } +} + func schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -19223,84 +19284,88 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback SchemaProps: spec.SchemaProps{ Type: []string{"object"}, Properties: map[string]spec.Schema{ - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - Type: []string{"string"}, - Format: "", + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, }, - }, - "nodeSelector": { SchemaProps: spec.SchemaProps{ - Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, + Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/offshoot-api/api/v1.Volume"), }, }, }, }, }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's scheduling constraints", - Ref: ref("k8s.io/api/core/v1.Affinity"), - }, - }, - "schedulerName": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", - Type: []string{"string"}, - Format: "", + "initContainers": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, }, - }, - "tolerations": { SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's tolerations.", + Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Toleration"), + Ref: ref("k8s.io/api/core/v1.Container"), }, }, }, }, }, - "imagePullSecrets": { + "terminationGracePeriodSeconds": { SchemaProps: spec.SchemaProps{ - Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ + Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "dnsPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\nPossible enum values:\n - `\"ClusterFirst\"` indicates that the pod should use cluster DNS first unless hostNetwork is true, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"ClusterFirstWithHostNet\"` indicates that the pod should use cluster DNS first, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"Default\"` indicates that the pod should use the default (as determined by kubelet) DNS settings.\n - `\"None\"` indicates that the pod should use empty DNS settings. DNS parameters such as nameservers and search paths should be defined via DNSConfig.", + Type: []string{"string"}, + Format: "", + Enum: []interface{}{"ClusterFirst", "ClusterFirstWithHostNet", "Default", "None"}}, + }, + "nodeSelector": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, - "priorityClassName": { + "serviceAccountName": { SchemaProps: spec.SchemaProps{ - Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", Type: []string{"string"}, Format: "", }, }, - "priority": { - SchemaProps: spec.SchemaProps{ - Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", - Type: []string{"integer"}, - Format: "int32", - }, - }, "hostNetwork": { SchemaProps: spec.SchemaProps{ Description: "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", @@ -19335,86 +19400,107 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), }, }, - "terminationGracePeriodSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "dnsPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\nPossible enum values:\n - `\"ClusterFirst\"` indicates that the pod should use cluster DNS first unless hostNetwork is true, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"ClusterFirstWithHostNet\"` indicates that the pod should use cluster DNS first, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"Default\"` indicates that the pod should use the default (as determined by kubelet) DNS settings.\n - `\"None\"` indicates that the pod should use empty DNS settings. DNS parameters such as nameservers and search paths should be defined via DNSConfig.", - Type: []string{"string"}, - Format: "", - Enum: []interface{}{"ClusterFirst", "ClusterFirstWithHostNet", "Default", "None"}}, - }, - "dnsConfig": { - SchemaProps: spec.SchemaProps{ - Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", - Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), - }, - }, - "topologySpreadConstraints": { + "imagePullSecrets": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "topologyKey", - "whenUnsatisfiable", - }, - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "topologyKey", + "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", + Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), }, }, }, }, }, - "volumes": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge,retainKeys", - }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's scheduling constraints", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", + Type: []string{"string"}, + Format: "", }, + }, + "tolerations": { SchemaProps: spec.SchemaProps{ - Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Description: "If specified, the pod's tolerations.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Volume"), + Ref: ref("k8s.io/api/core/v1.Toleration"), }, }, }, }, }, - "initContainers": { + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Type: []string{"string"}, + Format: "", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "dnsConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), + }, + }, + "runtimeClassName": { + SchemaProps: spec.SchemaProps{ + Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", + Type: []string{"string"}, + Format: "", + }, + }, + "enableServiceLinks": { + SchemaProps: spec.SchemaProps{ + Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "topologySpreadConstraints": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-list-map-keys": []interface{}{ + "topologyKey", + "whenUnsatisfiable", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "topologyKey", "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Container"), + Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), }, }, }, @@ -19504,7 +19590,7 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback }, }, Dependencies: []string{ - "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.Volume", "k8s.io/api/core/v1.VolumeMount"}, + "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.VolumeMount", "kmodules.xyz/offshoot-api/api/v1.Volume"}, } } @@ -19739,6 +19825,381 @@ func schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref common.Refer } } +func schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "downwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "configMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "projected items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + "ephemeral": { + SchemaProps: spec.SchemaProps{ + Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"}, + } +} + +func schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents the source of a volume to mount. Only one of its members may be specified.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "downwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "configMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "projected items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + "ephemeral": { + SchemaProps: spec.SchemaProps{ + Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"}, + } +} + func schema_kmodulesxyz_prober_api_v1_FormEntry(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/stash.appscode.dev/apimachinery/apis/stash/v1alpha1/openapi_generated.go b/vendor/stash.appscode.dev/apimachinery/apis/stash/v1alpha1/openapi_generated.go index ee600202d..dea554dc8 100644 --- a/vendor/stash.appscode.dev/apimachinery/apis/stash/v1alpha1/openapi_generated.go +++ b/vendor/stash.appscode.dev/apimachinery/apis/stash/v1alpha1/openapi_generated.go @@ -383,11 +383,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/objectstore-api/api/v1.S3Spec": schema_kmodulesxyz_objectstore_api_api_v1_S3Spec(ref), "kmodules.xyz/objectstore-api/api/v1.SwiftSpec": schema_kmodulesxyz_objectstore_api_api_v1_SwiftSpec(ref), "kmodules.xyz/offshoot-api/api/v1.ContainerRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref), + "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref), "kmodules.xyz/offshoot-api/api/v1.IONiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref), "kmodules.xyz/offshoot-api/api/v1.NiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_NiceSettings(ref), "kmodules.xyz/offshoot-api/api/v1.ObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_ObjectMeta(ref), "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_PartialObjectMeta(ref), "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaim": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref), + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref), "kmodules.xyz/offshoot-api/api/v1.PodRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref), "kmodules.xyz/offshoot-api/api/v1.PodSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref), "kmodules.xyz/offshoot-api/api/v1.PodTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodTemplateSpec(ref), @@ -395,6 +397,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/offshoot-api/api/v1.ServicePort": schema_kmodulesxyz_offshoot_api_api_v1_ServicePort(ref), "kmodules.xyz/offshoot-api/api/v1.ServiceSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceSpec(ref), "kmodules.xyz/offshoot-api/api/v1.ServiceTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref), + "kmodules.xyz/offshoot-api/api/v1.Volume": schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref), + "kmodules.xyz/offshoot-api/api/v1.VolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref), "kmodules.xyz/prober/api/v1.FormEntry": schema_kmodulesxyz_prober_api_v1_FormEntry(ref), "kmodules.xyz/prober/api/v1.HTTPPostAction": schema_kmodulesxyz_prober_api_v1_HTTPPostAction(ref), "kmodules.xyz/prober/api/v1.Handler": schema_kmodulesxyz_prober_api_v1_Handler(ref), @@ -17804,6 +17808,12 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. Format: "", }, }, + "appRef": { + SchemaProps: spec.SchemaProps{ + Description: "Reference to underlying application", + Ref: ref("kmodules.xyz/client-go/api/v1.TypedObjectReference"), + }, + }, "version": { SchemaProps: spec.SchemaProps{ Description: "Version used to facilitate programmatic handling of application.", @@ -17855,7 +17865,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, } } @@ -18798,6 +18808,27 @@ func schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref common. } } +func schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents an ephemeral volume that is handled by a normal storage driver.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "volumeClaimTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"}, + } +} + func schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -19025,6 +19056,36 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref common.Ref } } +func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"), + }, + }, + }, + Required: []string{"spec"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"}, + } +} + func schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -19228,84 +19289,88 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback SchemaProps: spec.SchemaProps{ Type: []string{"object"}, Properties: map[string]spec.Schema{ - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - Type: []string{"string"}, - Format: "", + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, }, - }, - "nodeSelector": { SchemaProps: spec.SchemaProps{ - Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, + Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/offshoot-api/api/v1.Volume"), }, }, }, }, }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's scheduling constraints", - Ref: ref("k8s.io/api/core/v1.Affinity"), - }, - }, - "schedulerName": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", - Type: []string{"string"}, - Format: "", + "initContainers": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, }, - }, - "tolerations": { SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's tolerations.", + Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Toleration"), + Ref: ref("k8s.io/api/core/v1.Container"), }, }, }, }, }, - "imagePullSecrets": { + "terminationGracePeriodSeconds": { SchemaProps: spec.SchemaProps{ - Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ + Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "dnsPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\nPossible enum values:\n - `\"ClusterFirst\"` indicates that the pod should use cluster DNS first unless hostNetwork is true, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"ClusterFirstWithHostNet\"` indicates that the pod should use cluster DNS first, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"Default\"` indicates that the pod should use the default (as determined by kubelet) DNS settings.\n - `\"None\"` indicates that the pod should use empty DNS settings. DNS parameters such as nameservers and search paths should be defined via DNSConfig.", + Type: []string{"string"}, + Format: "", + Enum: []interface{}{"ClusterFirst", "ClusterFirstWithHostNet", "Default", "None"}}, + }, + "nodeSelector": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, - "priorityClassName": { + "serviceAccountName": { SchemaProps: spec.SchemaProps{ - Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", Type: []string{"string"}, Format: "", }, }, - "priority": { - SchemaProps: spec.SchemaProps{ - Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", - Type: []string{"integer"}, - Format: "int32", - }, - }, "hostNetwork": { SchemaProps: spec.SchemaProps{ Description: "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", @@ -19340,86 +19405,107 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), }, }, - "terminationGracePeriodSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "dnsPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\nPossible enum values:\n - `\"ClusterFirst\"` indicates that the pod should use cluster DNS first unless hostNetwork is true, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"ClusterFirstWithHostNet\"` indicates that the pod should use cluster DNS first, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"Default\"` indicates that the pod should use the default (as determined by kubelet) DNS settings.\n - `\"None\"` indicates that the pod should use empty DNS settings. DNS parameters such as nameservers and search paths should be defined via DNSConfig.", - Type: []string{"string"}, - Format: "", - Enum: []interface{}{"ClusterFirst", "ClusterFirstWithHostNet", "Default", "None"}}, - }, - "dnsConfig": { - SchemaProps: spec.SchemaProps{ - Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", - Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), - }, - }, - "topologySpreadConstraints": { + "imagePullSecrets": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "topologyKey", - "whenUnsatisfiable", - }, - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "topologyKey", + "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", + Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), }, }, }, }, }, - "volumes": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge,retainKeys", - }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's scheduling constraints", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", + Type: []string{"string"}, + Format: "", }, + }, + "tolerations": { SchemaProps: spec.SchemaProps{ - Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Description: "If specified, the pod's tolerations.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Volume"), + Ref: ref("k8s.io/api/core/v1.Toleration"), }, }, }, }, }, - "initContainers": { + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Type: []string{"string"}, + Format: "", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "dnsConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), + }, + }, + "runtimeClassName": { + SchemaProps: spec.SchemaProps{ + Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", + Type: []string{"string"}, + Format: "", + }, + }, + "enableServiceLinks": { + SchemaProps: spec.SchemaProps{ + Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "topologySpreadConstraints": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-list-map-keys": []interface{}{ + "topologyKey", + "whenUnsatisfiable", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "topologyKey", "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Container"), + Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), }, }, }, @@ -19509,7 +19595,7 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback }, }, Dependencies: []string{ - "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.Volume", "k8s.io/api/core/v1.VolumeMount"}, + "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.VolumeMount", "kmodules.xyz/offshoot-api/api/v1.Volume"}, } } @@ -19744,6 +19830,381 @@ func schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref common.Refer } } +func schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "downwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "configMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "projected items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + "ephemeral": { + SchemaProps: spec.SchemaProps{ + Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"}, + } +} + +func schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents the source of a volume to mount. Only one of its members may be specified.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "downwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "configMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "projected items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + "ephemeral": { + SchemaProps: spec.SchemaProps{ + Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"}, + } +} + func schema_kmodulesxyz_prober_api_v1_FormEntry(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/stash.appscode.dev/apimachinery/apis/stash/v1beta1/openapi_generated.go b/vendor/stash.appscode.dev/apimachinery/apis/stash/v1beta1/openapi_generated.go index 4e0e893b9..82831d334 100644 --- a/vendor/stash.appscode.dev/apimachinery/apis/stash/v1beta1/openapi_generated.go +++ b/vendor/stash.appscode.dev/apimachinery/apis/stash/v1beta1/openapi_generated.go @@ -383,11 +383,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/objectstore-api/api/v1.S3Spec": schema_kmodulesxyz_objectstore_api_api_v1_S3Spec(ref), "kmodules.xyz/objectstore-api/api/v1.SwiftSpec": schema_kmodulesxyz_objectstore_api_api_v1_SwiftSpec(ref), "kmodules.xyz/offshoot-api/api/v1.ContainerRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref), + "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref), "kmodules.xyz/offshoot-api/api/v1.IONiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref), "kmodules.xyz/offshoot-api/api/v1.NiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_NiceSettings(ref), "kmodules.xyz/offshoot-api/api/v1.ObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_ObjectMeta(ref), "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_PartialObjectMeta(ref), "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaim": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref), + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref), "kmodules.xyz/offshoot-api/api/v1.PodRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref), "kmodules.xyz/offshoot-api/api/v1.PodSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref), "kmodules.xyz/offshoot-api/api/v1.PodTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodTemplateSpec(ref), @@ -395,6 +397,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/offshoot-api/api/v1.ServicePort": schema_kmodulesxyz_offshoot_api_api_v1_ServicePort(ref), "kmodules.xyz/offshoot-api/api/v1.ServiceSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceSpec(ref), "kmodules.xyz/offshoot-api/api/v1.ServiceTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref), + "kmodules.xyz/offshoot-api/api/v1.Volume": schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref), + "kmodules.xyz/offshoot-api/api/v1.VolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref), "kmodules.xyz/prober/api/v1.FormEntry": schema_kmodulesxyz_prober_api_v1_FormEntry(ref), "kmodules.xyz/prober/api/v1.HTTPPostAction": schema_kmodulesxyz_prober_api_v1_HTTPPostAction(ref), "kmodules.xyz/prober/api/v1.Handler": schema_kmodulesxyz_prober_api_v1_Handler(ref), @@ -17850,6 +17854,12 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. Format: "", }, }, + "appRef": { + SchemaProps: spec.SchemaProps{ + Description: "Reference to underlying application", + Ref: ref("kmodules.xyz/client-go/api/v1.TypedObjectReference"), + }, + }, "version": { SchemaProps: spec.SchemaProps{ Description: "Version used to facilitate programmatic handling of application.", @@ -17901,7 +17911,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, } } @@ -18844,6 +18854,27 @@ func schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref common. } } +func schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents an ephemeral volume that is handled by a normal storage driver.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "volumeClaimTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"}, + } +} + func schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -19071,6 +19102,36 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref common.Ref } } +func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"), + }, + }, + }, + Required: []string{"spec"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"}, + } +} + func schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -19274,84 +19335,88 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback SchemaProps: spec.SchemaProps{ Type: []string{"object"}, Properties: map[string]spec.Schema{ - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - Type: []string{"string"}, - Format: "", + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, }, - }, - "nodeSelector": { SchemaProps: spec.SchemaProps{ - Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, + Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/offshoot-api/api/v1.Volume"), }, }, }, }, }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's scheduling constraints", - Ref: ref("k8s.io/api/core/v1.Affinity"), - }, - }, - "schedulerName": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", - Type: []string{"string"}, - Format: "", + "initContainers": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, }, - }, - "tolerations": { SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's tolerations.", + Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Toleration"), + Ref: ref("k8s.io/api/core/v1.Container"), }, }, }, }, }, - "imagePullSecrets": { + "terminationGracePeriodSeconds": { SchemaProps: spec.SchemaProps{ - Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ + Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "dnsPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\nPossible enum values:\n - `\"ClusterFirst\"` indicates that the pod should use cluster DNS first unless hostNetwork is true, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"ClusterFirstWithHostNet\"` indicates that the pod should use cluster DNS first, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"Default\"` indicates that the pod should use the default (as determined by kubelet) DNS settings.\n - `\"None\"` indicates that the pod should use empty DNS settings. DNS parameters such as nameservers and search paths should be defined via DNSConfig.", + Type: []string{"string"}, + Format: "", + Enum: []interface{}{"ClusterFirst", "ClusterFirstWithHostNet", "Default", "None"}}, + }, + "nodeSelector": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, - "priorityClassName": { + "serviceAccountName": { SchemaProps: spec.SchemaProps{ - Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", Type: []string{"string"}, Format: "", }, }, - "priority": { - SchemaProps: spec.SchemaProps{ - Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", - Type: []string{"integer"}, - Format: "int32", - }, - }, "hostNetwork": { SchemaProps: spec.SchemaProps{ Description: "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", @@ -19386,86 +19451,107 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), }, }, - "terminationGracePeriodSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "dnsPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\nPossible enum values:\n - `\"ClusterFirst\"` indicates that the pod should use cluster DNS first unless hostNetwork is true, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"ClusterFirstWithHostNet\"` indicates that the pod should use cluster DNS first, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"Default\"` indicates that the pod should use the default (as determined by kubelet) DNS settings.\n - `\"None\"` indicates that the pod should use empty DNS settings. DNS parameters such as nameservers and search paths should be defined via DNSConfig.", - Type: []string{"string"}, - Format: "", - Enum: []interface{}{"ClusterFirst", "ClusterFirstWithHostNet", "Default", "None"}}, - }, - "dnsConfig": { - SchemaProps: spec.SchemaProps{ - Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", - Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), - }, - }, - "topologySpreadConstraints": { + "imagePullSecrets": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "topologyKey", - "whenUnsatisfiable", - }, - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "topologyKey", + "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", + Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), }, }, }, }, }, - "volumes": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge,retainKeys", - }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's scheduling constraints", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", + Type: []string{"string"}, + Format: "", }, + }, + "tolerations": { SchemaProps: spec.SchemaProps{ - Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Description: "If specified, the pod's tolerations.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Volume"), + Ref: ref("k8s.io/api/core/v1.Toleration"), }, }, }, }, }, - "initContainers": { + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Type: []string{"string"}, + Format: "", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "dnsConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), + }, + }, + "runtimeClassName": { + SchemaProps: spec.SchemaProps{ + Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", + Type: []string{"string"}, + Format: "", + }, + }, + "enableServiceLinks": { + SchemaProps: spec.SchemaProps{ + Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "topologySpreadConstraints": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-list-map-keys": []interface{}{ + "topologyKey", + "whenUnsatisfiable", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "topologyKey", "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Container"), + Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), }, }, }, @@ -19555,7 +19641,7 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback }, }, Dependencies: []string{ - "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.Volume", "k8s.io/api/core/v1.VolumeMount"}, + "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.VolumeMount", "kmodules.xyz/offshoot-api/api/v1.Volume"}, } } @@ -19790,6 +19876,381 @@ func schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref common.Refer } } +func schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "downwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "configMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "projected items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + "ephemeral": { + SchemaProps: spec.SchemaProps{ + Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"}, + } +} + +func schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents the source of a volume to mount. Only one of its members may be specified.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "downwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "configMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "projected items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + "ephemeral": { + SchemaProps: spec.SchemaProps{ + Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"}, + } +} + func schema_kmodulesxyz_prober_api_v1_FormEntry(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/stash.appscode.dev/apimachinery/apis/ui/v1alpha1/openapi_generated.go b/vendor/stash.appscode.dev/apimachinery/apis/ui/v1alpha1/openapi_generated.go index 4fe9881cf..df9f12f5b 100644 --- a/vendor/stash.appscode.dev/apimachinery/apis/ui/v1alpha1/openapi_generated.go +++ b/vendor/stash.appscode.dev/apimachinery/apis/ui/v1alpha1/openapi_generated.go @@ -383,11 +383,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/objectstore-api/api/v1.S3Spec": schema_kmodulesxyz_objectstore_api_api_v1_S3Spec(ref), "kmodules.xyz/objectstore-api/api/v1.SwiftSpec": schema_kmodulesxyz_objectstore_api_api_v1_SwiftSpec(ref), "kmodules.xyz/offshoot-api/api/v1.ContainerRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref), + "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref), "kmodules.xyz/offshoot-api/api/v1.IONiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref), "kmodules.xyz/offshoot-api/api/v1.NiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_NiceSettings(ref), "kmodules.xyz/offshoot-api/api/v1.ObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_ObjectMeta(ref), "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_PartialObjectMeta(ref), "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaim": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref), + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref), "kmodules.xyz/offshoot-api/api/v1.PodRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref), "kmodules.xyz/offshoot-api/api/v1.PodSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref), "kmodules.xyz/offshoot-api/api/v1.PodTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodTemplateSpec(ref), @@ -395,6 +397,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/offshoot-api/api/v1.ServicePort": schema_kmodulesxyz_offshoot_api_api_v1_ServicePort(ref), "kmodules.xyz/offshoot-api/api/v1.ServiceSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceSpec(ref), "kmodules.xyz/offshoot-api/api/v1.ServiceTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref), + "kmodules.xyz/offshoot-api/api/v1.Volume": schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref), + "kmodules.xyz/offshoot-api/api/v1.VolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref), "kmodules.xyz/prober/api/v1.FormEntry": schema_kmodulesxyz_prober_api_v1_FormEntry(ref), "kmodules.xyz/prober/api/v1.HTTPPostAction": schema_kmodulesxyz_prober_api_v1_HTTPPostAction(ref), "kmodules.xyz/prober/api/v1.Handler": schema_kmodulesxyz_prober_api_v1_Handler(ref), @@ -17799,6 +17803,12 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. Format: "", }, }, + "appRef": { + SchemaProps: spec.SchemaProps{ + Description: "Reference to underlying application", + Ref: ref("kmodules.xyz/client-go/api/v1.TypedObjectReference"), + }, + }, "version": { SchemaProps: spec.SchemaProps{ Description: "Version used to facilitate programmatic handling of application.", @@ -17850,7 +17860,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, } } @@ -18793,6 +18803,27 @@ func schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref common. } } +func schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents an ephemeral volume that is handled by a normal storage driver.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "volumeClaimTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"}, + } +} + func schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -19020,6 +19051,36 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref common.Ref } } +func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"), + }, + }, + }, + Required: []string{"spec"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"}, + } +} + func schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -19223,84 +19284,88 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback SchemaProps: spec.SchemaProps{ Type: []string{"object"}, Properties: map[string]spec.Schema{ - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - Type: []string{"string"}, - Format: "", + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, }, - }, - "nodeSelector": { SchemaProps: spec.SchemaProps{ - Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, + Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/offshoot-api/api/v1.Volume"), }, }, }, }, }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's scheduling constraints", - Ref: ref("k8s.io/api/core/v1.Affinity"), - }, - }, - "schedulerName": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", - Type: []string{"string"}, - Format: "", + "initContainers": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, }, - }, - "tolerations": { SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's tolerations.", + Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Toleration"), + Ref: ref("k8s.io/api/core/v1.Container"), }, }, }, }, }, - "imagePullSecrets": { + "terminationGracePeriodSeconds": { SchemaProps: spec.SchemaProps{ - Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ + Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "dnsPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\nPossible enum values:\n - `\"ClusterFirst\"` indicates that the pod should use cluster DNS first unless hostNetwork is true, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"ClusterFirstWithHostNet\"` indicates that the pod should use cluster DNS first, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"Default\"` indicates that the pod should use the default (as determined by kubelet) DNS settings.\n - `\"None\"` indicates that the pod should use empty DNS settings. DNS parameters such as nameservers and search paths should be defined via DNSConfig.", + Type: []string{"string"}, + Format: "", + Enum: []interface{}{"ClusterFirst", "ClusterFirstWithHostNet", "Default", "None"}}, + }, + "nodeSelector": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, - "priorityClassName": { + "serviceAccountName": { SchemaProps: spec.SchemaProps{ - Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", Type: []string{"string"}, Format: "", }, }, - "priority": { - SchemaProps: spec.SchemaProps{ - Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", - Type: []string{"integer"}, - Format: "int32", - }, - }, "hostNetwork": { SchemaProps: spec.SchemaProps{ Description: "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", @@ -19335,86 +19400,107 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), }, }, - "terminationGracePeriodSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "dnsPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\nPossible enum values:\n - `\"ClusterFirst\"` indicates that the pod should use cluster DNS first unless hostNetwork is true, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"ClusterFirstWithHostNet\"` indicates that the pod should use cluster DNS first, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"Default\"` indicates that the pod should use the default (as determined by kubelet) DNS settings.\n - `\"None\"` indicates that the pod should use empty DNS settings. DNS parameters such as nameservers and search paths should be defined via DNSConfig.", - Type: []string{"string"}, - Format: "", - Enum: []interface{}{"ClusterFirst", "ClusterFirstWithHostNet", "Default", "None"}}, - }, - "dnsConfig": { - SchemaProps: spec.SchemaProps{ - Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", - Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), - }, - }, - "topologySpreadConstraints": { + "imagePullSecrets": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "topologyKey", - "whenUnsatisfiable", - }, - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "topologyKey", + "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", + Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), }, }, }, }, }, - "volumes": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge,retainKeys", - }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's scheduling constraints", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", + Type: []string{"string"}, + Format: "", }, + }, + "tolerations": { SchemaProps: spec.SchemaProps{ - Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Description: "If specified, the pod's tolerations.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Volume"), + Ref: ref("k8s.io/api/core/v1.Toleration"), }, }, }, }, }, - "initContainers": { + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Type: []string{"string"}, + Format: "", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "dnsConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), + }, + }, + "runtimeClassName": { + SchemaProps: spec.SchemaProps{ + Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", + Type: []string{"string"}, + Format: "", + }, + }, + "enableServiceLinks": { + SchemaProps: spec.SchemaProps{ + Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "topologySpreadConstraints": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-list-map-keys": []interface{}{ + "topologyKey", + "whenUnsatisfiable", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "topologyKey", "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Container"), + Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), }, }, }, @@ -19504,7 +19590,7 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback }, }, Dependencies: []string{ - "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.Volume", "k8s.io/api/core/v1.VolumeMount"}, + "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.VolumeMount", "kmodules.xyz/offshoot-api/api/v1.Volume"}, } } @@ -19739,6 +19825,381 @@ func schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref common.Refer } } +func schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "downwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "configMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "projected items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + "ephemeral": { + SchemaProps: spec.SchemaProps{ + Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"}, + } +} + +func schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents the source of a volume to mount. Only one of its members may be specified.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "downwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "configMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "projected items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + "ephemeral": { + SchemaProps: spec.SchemaProps{ + Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"}, + } +} + func schema_kmodulesxyz_prober_api_v1_FormEntry(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{