+ r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+ srv := &http.Server{
+ Handler: r,
+ Addr: "127.0.0.1:8000",
+ // Good practice: enforce timeouts for servers you create!
+ WriteTimeout: 15 * time.Second,
+ ReadTimeout: 15 * time.Second,
+ }
+
+ log.Fatal(srv.ListenAndServe())
+ }
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built,
+or "reversed". We define a name calling Name() on a route. For example:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+ Name("article")
+
+To build a URL, get the route and call the URL() method, passing a sequence of
+key/value pairs for the route variables. For the previous route, we would do:
+
+ url, err := r.Get("article").URL("category", "technology", "id", "42")
+
+...and the result will be a url.URL with the following path:
+
+ "/articles/technology/42"
+
+This also works for host and query value variables:
+
+ r := mux.NewRouter()
+ r.Host("{subdomain}.domain.com").
+ Path("/articles/{category}/{id:[0-9]+}").
+ Queries("filter", "{filter}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+ // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
+ url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42",
+ "filter", "gorilla")
+
+All variables defined in the route are required, and their values must
+conform to the corresponding patterns. These requirements guarantee that a
+generated URL will always match a registered route -- the only exception is
+for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+ r.HeadersRegexp("Content-Type", "application/(text|json)")
+
+...and the route will match both requests with a Content-Type of `application/json` as well as
+`application/text`
+
+There's also a way to build only the URL host or path for a route:
+use the methods URLHost() or URLPath() instead. For the previous route,
+we would do:
+
+ // "http://news.domain.com/"
+ host, err := r.Get("article").URLHost("subdomain", "news")
+
+ // "/articles/technology/42"
+ path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+
+And if you use subrouters, host and path defined separately can be built
+as well:
+
+ r := mux.NewRouter()
+ s := r.Host("{subdomain}.domain.com").Subrouter()
+ s.Path("/articles/{category}/{id:[0-9]+}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+ // "http://news.domain.com/articles/technology/42"
+ url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42")
+
+Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking.
+
+ type MiddlewareFunc func(http.Handler) http.Handler
+
+Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created).
+
+A very basic middleware which logs the URI of the request being handled could be written as:
+
+ func simpleMw(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Do stuff here
+ log.Println(r.RequestURI)
+ // Call the next handler, which can be another middleware in the chain, or the final handler.
+ next.ServeHTTP(w, r)
+ })
+ }
+
+Middlewares can be added to a router using `Router.Use()`:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/", handler)
+ r.Use(simpleMw)
+
+A more complex authentication middleware, which maps session token to users, could be written as:
+
+ // Define our struct
+ type authenticationMiddleware struct {
+ tokenUsers map[string]string
+ }
+
+ // Initialize it somewhere
+ func (amw *authenticationMiddleware) Populate() {
+ amw.tokenUsers["00000000"] = "user0"
+ amw.tokenUsers["aaaaaaaa"] = "userA"
+ amw.tokenUsers["05f717e5"] = "randomUser"
+ amw.tokenUsers["deadbeef"] = "user0"
+ }
+
+ // Middleware function, which will be called for each request
+ func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ token := r.Header.Get("X-Session-Token")
+
+ if user, found := amw.tokenUsers[token]; found {
+ // We found the token in our map
+ log.Printf("Authenticated user %s\n", user)
+ next.ServeHTTP(w, r)
+ } else {
+ http.Error(w, "Forbidden", http.StatusForbidden)
+ }
+ })
+ }
+
+ r := mux.NewRouter()
+ r.HandleFunc("/", handler)
+
+ amw := authenticationMiddleware{tokenUsers: make(map[string]string)}
+ amw.Populate()
+
+ r.Use(amw.Middleware)
+
+Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to.
+
+*/
+package mux
diff --git a/vendor/github.com/gorilla/mux/go.mod b/vendor/github.com/gorilla/mux/go.mod
new file mode 100644
index 0000000..df170a3
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/go.mod
@@ -0,0 +1,3 @@
+module github.com/gorilla/mux
+
+go 1.12
diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go
new file mode 100644
index 0000000..cb51c56
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/middleware.go
@@ -0,0 +1,74 @@
+package mux
+
+import (
+ "net/http"
+ "strings"
+)
+
+// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler.
+// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed
+// to it, and then calls the handler passed as parameter to the MiddlewareFunc.
+type MiddlewareFunc func(http.Handler) http.Handler
+
+// middleware interface is anything which implements a MiddlewareFunc named Middleware.
+type middleware interface {
+ Middleware(handler http.Handler) http.Handler
+}
+
+// Middleware allows MiddlewareFunc to implement the middleware interface.
+func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler {
+ return mw(handler)
+}
+
+// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
+func (r *Router) Use(mwf ...MiddlewareFunc) {
+ for _, fn := range mwf {
+ r.middlewares = append(r.middlewares, fn)
+ }
+}
+
+// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
+func (r *Router) useInterface(mw middleware) {
+ r.middlewares = append(r.middlewares, mw)
+}
+
+// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header
+// on requests for routes that have an OPTIONS method matcher to all the method matchers on
+// the route. Routes that do not explicitly handle OPTIONS requests will not be processed
+// by the middleware. See examples for usage.
+func CORSMethodMiddleware(r *Router) MiddlewareFunc {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ allMethods, err := getAllMethodsForRoute(r, req)
+ if err == nil {
+ for _, v := range allMethods {
+ if v == http.MethodOptions {
+ w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ","))
+ }
+ }
+ }
+
+ next.ServeHTTP(w, req)
+ })
+ }
+}
+
+// getAllMethodsForRoute returns all the methods from method matchers matching a given
+// request.
+func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) {
+ var allMethods []string
+
+ for _, route := range r.routes {
+ var match RouteMatch
+ if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch {
+ methods, err := route.GetMethods()
+ if err != nil {
+ return nil, err
+ }
+
+ allMethods = append(allMethods, methods...)
+ }
+ }
+
+ return allMethods, nil
+}
diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go
new file mode 100644
index 0000000..782a34b
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/mux.go
@@ -0,0 +1,606 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "path"
+ "regexp"
+)
+
+var (
+ // ErrMethodMismatch is returned when the method in the request does not match
+ // the method defined against the route.
+ ErrMethodMismatch = errors.New("method is not allowed")
+ // ErrNotFound is returned when no route match is found.
+ ErrNotFound = errors.New("no matching route was found")
+)
+
+// NewRouter returns a new router instance.
+func NewRouter() *Router {
+ return &Router{namedRoutes: make(map[string]*Route)}
+}
+
+// Router registers routes to be matched and dispatches a handler.
+//
+// It implements the http.Handler interface, so it can be registered to serve
+// requests:
+//
+// var router = mux.NewRouter()
+//
+// func main() {
+// http.Handle("/", router)
+// }
+//
+// Or, for Google App Engine, register it in a init() function:
+//
+// func init() {
+// http.Handle("/", router)
+// }
+//
+// This will send all incoming requests to the router.
+type Router struct {
+ // Configurable Handler to be used when no route matches.
+ NotFoundHandler http.Handler
+
+ // Configurable Handler to be used when the request method does not match the route.
+ MethodNotAllowedHandler http.Handler
+
+ // Routes to be matched, in order.
+ routes []*Route
+
+ // Routes by name for URL building.
+ namedRoutes map[string]*Route
+
+ // If true, do not clear the request context after handling the request.
+ //
+ // Deprecated: No effect, since the context is stored on the request itself.
+ KeepContext bool
+
+ // Slice of middlewares to be called after a match is found
+ middlewares []middleware
+
+ // configuration shared with `Route`
+ routeConf
+}
+
+// common route configuration shared between `Router` and `Route`
+type routeConf struct {
+ // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
+ useEncodedPath bool
+
+ // If true, when the path pattern is "/path/", accessing "/path" will
+ // redirect to the former and vice versa.
+ strictSlash bool
+
+ // If true, when the path pattern is "/path//to", accessing "/path//to"
+ // will not redirect
+ skipClean bool
+
+ // Manager for the variables from host and path.
+ regexp routeRegexpGroup
+
+ // List of matchers.
+ matchers []matcher
+
+ // The scheme used when building URLs.
+ buildScheme string
+
+ buildVarsFunc BuildVarsFunc
+}
+
+// returns an effective deep copy of `routeConf`
+func copyRouteConf(r routeConf) routeConf {
+ c := r
+
+ if r.regexp.path != nil {
+ c.regexp.path = copyRouteRegexp(r.regexp.path)
+ }
+
+ if r.regexp.host != nil {
+ c.regexp.host = copyRouteRegexp(r.regexp.host)
+ }
+
+ c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries))
+ for _, q := range r.regexp.queries {
+ c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q))
+ }
+
+ c.matchers = make([]matcher, len(r.matchers))
+ copy(c.matchers, r.matchers)
+
+ return c
+}
+
+func copyRouteRegexp(r *routeRegexp) *routeRegexp {
+ c := *r
+ return &c
+}
+
+// Match attempts to match the given request against the router's registered routes.
+//
+// If the request matches a route of this router or one of its subrouters the Route,
+// Handler, and Vars fields of the the match argument are filled and this function
+// returns true.
+//
+// If the request does not match any of this router's or its subrouters' routes
+// then this function returns false. If available, a reason for the match failure
+// will be filled in the match argument's MatchErr field. If the match failure type
+// (eg: not found) has a registered handler, the handler is assigned to the Handler
+// field of the match argument.
+func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
+ for _, route := range r.routes {
+ if route.Match(req, match) {
+ // Build middleware chain if no error was found
+ if match.MatchErr == nil {
+ for i := len(r.middlewares) - 1; i >= 0; i-- {
+ match.Handler = r.middlewares[i].Middleware(match.Handler)
+ }
+ }
+ return true
+ }
+ }
+
+ if match.MatchErr == ErrMethodMismatch {
+ if r.MethodNotAllowedHandler != nil {
+ match.Handler = r.MethodNotAllowedHandler
+ return true
+ }
+
+ return false
+ }
+
+ // Closest match for a router (includes sub-routers)
+ if r.NotFoundHandler != nil {
+ match.Handler = r.NotFoundHandler
+ match.MatchErr = ErrNotFound
+ return true
+ }
+
+ match.MatchErr = ErrNotFound
+ return false
+}
+
+// ServeHTTP dispatches the handler registered in the matched route.
+//
+// When there is a match, the route variables can be retrieved calling
+// mux.Vars(request).
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ if !r.skipClean {
+ path := req.URL.Path
+ if r.useEncodedPath {
+ path = req.URL.EscapedPath()
+ }
+ // Clean path to canonical form and redirect.
+ if p := cleanPath(path); p != path {
+
+ // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
+ // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue:
+ // http://code.google.com/p/go/issues/detail?id=5252
+ url := *req.URL
+ url.Path = p
+ p = url.String()
+
+ w.Header().Set("Location", p)
+ w.WriteHeader(http.StatusMovedPermanently)
+ return
+ }
+ }
+ var match RouteMatch
+ var handler http.Handler
+ if r.Match(req, &match) {
+ handler = match.Handler
+ req = requestWithVars(req, match.Vars)
+ req = requestWithRoute(req, match.Route)
+ }
+
+ if handler == nil && match.MatchErr == ErrMethodMismatch {
+ handler = methodNotAllowedHandler()
+ }
+
+ if handler == nil {
+ handler = http.NotFoundHandler()
+ }
+
+ handler.ServeHTTP(w, req)
+}
+
+// Get returns a route registered with the given name.
+func (r *Router) Get(name string) *Route {
+ return r.namedRoutes[name]
+}
+
+// GetRoute returns a route registered with the given name. This method
+// was renamed to Get() and remains here for backwards compatibility.
+func (r *Router) GetRoute(name string) *Route {
+ return r.namedRoutes[name]
+}
+
+// StrictSlash defines the trailing slash behavior for new routes. The initial
+// value is false.
+//
+// When true, if the route path is "/path/", accessing "/path" will perform a redirect
+// to the former and vice versa. In other words, your application will always
+// see the path as specified in the route.
+//
+// When false, if the route path is "/path", accessing "/path/" will not match
+// this route and vice versa.
+//
+// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for
+// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed
+// request will be made as a GET by most clients. Use middleware or client settings
+// to modify this behaviour as needed.
+//
+// Special case: when a route sets a path prefix using the PathPrefix() method,
+// strict slash is ignored for that route because the redirect behavior can't
+// be determined from a prefix alone. However, any subrouters created from that
+// route inherit the original StrictSlash setting.
+func (r *Router) StrictSlash(value bool) *Router {
+ r.strictSlash = value
+ return r
+}
+
+// SkipClean defines the path cleaning behaviour for new routes. The initial
+// value is false. Users should be careful about which routes are not cleaned
+//
+// When true, if the route path is "/path//to", it will remain with the double
+// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
+//
+// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
+// become /fetch/http/xkcd.com/534
+func (r *Router) SkipClean(value bool) *Router {
+ r.skipClean = value
+ return r
+}
+
+// UseEncodedPath tells the router to match the encoded original path
+// to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
+//
+// If not called, the router will match the unencoded path to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
+func (r *Router) UseEncodedPath() *Router {
+ r.useEncodedPath = true
+ return r
+}
+
+// ----------------------------------------------------------------------------
+// Route factories
+// ----------------------------------------------------------------------------
+
+// NewRoute registers an empty route.
+func (r *Router) NewRoute() *Route {
+ // initialize a route with a copy of the parent router's configuration
+ route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
+ r.routes = append(r.routes, route)
+ return route
+}
+
+// Name registers a new route with a name.
+// See Route.Name().
+func (r *Router) Name(name string) *Route {
+ return r.NewRoute().Name(name)
+}
+
+// Handle registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.Handler().
+func (r *Router) Handle(path string, handler http.Handler) *Route {
+ return r.NewRoute().Path(path).Handler(handler)
+}
+
+// HandleFunc registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.HandlerFunc().
+func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
+ *http.Request)) *Route {
+ return r.NewRoute().Path(path).HandlerFunc(f)
+}
+
+// Headers registers a new route with a matcher for request header values.
+// See Route.Headers().
+func (r *Router) Headers(pairs ...string) *Route {
+ return r.NewRoute().Headers(pairs...)
+}
+
+// Host registers a new route with a matcher for the URL host.
+// See Route.Host().
+func (r *Router) Host(tpl string) *Route {
+ return r.NewRoute().Host(tpl)
+}
+
+// MatcherFunc registers a new route with a custom matcher function.
+// See Route.MatcherFunc().
+func (r *Router) MatcherFunc(f MatcherFunc) *Route {
+ return r.NewRoute().MatcherFunc(f)
+}
+
+// Methods registers a new route with a matcher for HTTP methods.
+// See Route.Methods().
+func (r *Router) Methods(methods ...string) *Route {
+ return r.NewRoute().Methods(methods...)
+}
+
+// Path registers a new route with a matcher for the URL path.
+// See Route.Path().
+func (r *Router) Path(tpl string) *Route {
+ return r.NewRoute().Path(tpl)
+}
+
+// PathPrefix registers a new route with a matcher for the URL path prefix.
+// See Route.PathPrefix().
+func (r *Router) PathPrefix(tpl string) *Route {
+ return r.NewRoute().PathPrefix(tpl)
+}
+
+// Queries registers a new route with a matcher for URL query values.
+// See Route.Queries().
+func (r *Router) Queries(pairs ...string) *Route {
+ return r.NewRoute().Queries(pairs...)
+}
+
+// Schemes registers a new route with a matcher for URL schemes.
+// See Route.Schemes().
+func (r *Router) Schemes(schemes ...string) *Route {
+ return r.NewRoute().Schemes(schemes...)
+}
+
+// BuildVarsFunc registers a new route with a custom function for modifying
+// route variables before building a URL.
+func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
+ return r.NewRoute().BuildVarsFunc(f)
+}
+
+// Walk walks the router and all its sub-routers, calling walkFn for each route
+// in the tree. The routes are walked in the order they were added. Sub-routers
+// are explored depth-first.
+func (r *Router) Walk(walkFn WalkFunc) error {
+ return r.walk(walkFn, []*Route{})
+}
+
+// SkipRouter is used as a return value from WalkFuncs to indicate that the
+// router that walk is about to descend down to should be skipped.
+var SkipRouter = errors.New("skip this router")
+
+// WalkFunc is the type of the function called for each route visited by Walk.
+// At every invocation, it is given the current route, and the current router,
+// and a list of ancestor routes that lead to the current route.
+type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
+
+func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
+ for _, t := range r.routes {
+ err := walkFn(t, r, ancestors)
+ if err == SkipRouter {
+ continue
+ }
+ if err != nil {
+ return err
+ }
+ for _, sr := range t.matchers {
+ if h, ok := sr.(*Router); ok {
+ ancestors = append(ancestors, t)
+ err := h.walk(walkFn, ancestors)
+ if err != nil {
+ return err
+ }
+ ancestors = ancestors[:len(ancestors)-1]
+ }
+ }
+ if h, ok := t.handler.(*Router); ok {
+ ancestors = append(ancestors, t)
+ err := h.walk(walkFn, ancestors)
+ if err != nil {
+ return err
+ }
+ ancestors = ancestors[:len(ancestors)-1]
+ }
+ }
+ return nil
+}
+
+// ----------------------------------------------------------------------------
+// Context
+// ----------------------------------------------------------------------------
+
+// RouteMatch stores information about a matched route.
+type RouteMatch struct {
+ Route *Route
+ Handler http.Handler
+ Vars map[string]string
+
+ // MatchErr is set to appropriate matching error
+ // It is set to ErrMethodMismatch if there is a mismatch in
+ // the request method and route method
+ MatchErr error
+}
+
+type contextKey int
+
+const (
+ varsKey contextKey = iota
+ routeKey
+)
+
+// Vars returns the route variables for the current request, if any.
+func Vars(r *http.Request) map[string]string {
+ if rv := r.Context().Value(varsKey); rv != nil {
+ return rv.(map[string]string)
+ }
+ return nil
+}
+
+// CurrentRoute returns the matched route for the current request, if any.
+// This only works when called inside the handler of the matched route
+// because the matched route is stored in the request context which is cleared
+// after the handler returns.
+func CurrentRoute(r *http.Request) *Route {
+ if rv := r.Context().Value(routeKey); rv != nil {
+ return rv.(*Route)
+ }
+ return nil
+}
+
+func requestWithVars(r *http.Request, vars map[string]string) *http.Request {
+ ctx := context.WithValue(r.Context(), varsKey, vars)
+ return r.WithContext(ctx)
+}
+
+func requestWithRoute(r *http.Request, route *Route) *http.Request {
+ ctx := context.WithValue(r.Context(), routeKey, route)
+ return r.WithContext(ctx)
+}
+
+// ----------------------------------------------------------------------------
+// Helpers
+// ----------------------------------------------------------------------------
+
+// cleanPath returns the canonical path for p, eliminating . and .. elements.
+// Borrowed from the net/http package.
+func cleanPath(p string) string {
+ if p == "" {
+ return "/"
+ }
+ if p[0] != '/' {
+ p = "/" + p
+ }
+ np := path.Clean(p)
+ // path.Clean removes trailing slash except for root;
+ // put the trailing slash back if necessary.
+ if p[len(p)-1] == '/' && np != "/" {
+ np += "/"
+ }
+
+ return np
+}
+
+// uniqueVars returns an error if two slices contain duplicated strings.
+func uniqueVars(s1, s2 []string) error {
+ for _, v1 := range s1 {
+ for _, v2 := range s2 {
+ if v1 == v2 {
+ return fmt.Errorf("mux: duplicated route variable %q", v2)
+ }
+ }
+ }
+ return nil
+}
+
+// checkPairs returns the count of strings passed in, and an error if
+// the count is not an even number.
+func checkPairs(pairs ...string) (int, error) {
+ length := len(pairs)
+ if length%2 != 0 {
+ return length, fmt.Errorf(
+ "mux: number of parameters must be multiple of 2, got %v", pairs)
+ }
+ return length, nil
+}
+
+// mapFromPairsToString converts variadic string parameters to a
+// string to string map.
+func mapFromPairsToString(pairs ...string) (map[string]string, error) {
+ length, err := checkPairs(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ m := make(map[string]string, length/2)
+ for i := 0; i < length; i += 2 {
+ m[pairs[i]] = pairs[i+1]
+ }
+ return m, nil
+}
+
+// mapFromPairsToRegex converts variadic string parameters to a
+// string to regex map.
+func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
+ length, err := checkPairs(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ m := make(map[string]*regexp.Regexp, length/2)
+ for i := 0; i < length; i += 2 {
+ regex, err := regexp.Compile(pairs[i+1])
+ if err != nil {
+ return nil, err
+ }
+ m[pairs[i]] = regex
+ }
+ return m, nil
+}
+
+// matchInArray returns true if the given string value is in the array.
+func matchInArray(arr []string, value string) bool {
+ for _, v := range arr {
+ if v == value {
+ return true
+ }
+ }
+ return false
+}
+
+// matchMapWithString returns true if the given key/value pairs exist in a given map.
+func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
+ for k, v := range toCheck {
+ // Check if key exists.
+ if canonicalKey {
+ k = http.CanonicalHeaderKey(k)
+ }
+ if values := toMatch[k]; values == nil {
+ return false
+ } else if v != "" {
+ // If value was defined as an empty string we only check that the
+ // key exists. Otherwise we also check for equality.
+ valueExists := false
+ for _, value := range values {
+ if v == value {
+ valueExists = true
+ break
+ }
+ }
+ if !valueExists {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
+// the given regex
+func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
+ for k, v := range toCheck {
+ // Check if key exists.
+ if canonicalKey {
+ k = http.CanonicalHeaderKey(k)
+ }
+ if values := toMatch[k]; values == nil {
+ return false
+ } else if v != nil {
+ // If value was defined as an empty string we only check that the
+ // key exists. Otherwise we also check for equality.
+ valueExists := false
+ for _, value := range values {
+ if v.MatchString(value) {
+ valueExists = true
+ break
+ }
+ }
+ if !valueExists {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// methodNotAllowed replies to the request with an HTTP status code 405.
+func methodNotAllowed(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusMethodNotAllowed)
+}
+
+// methodNotAllowedHandler returns a simple request handler
+// that replies to each request with a status code 405.
+func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) }
diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go
new file mode 100644
index 0000000..0144842
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/regexp.go
@@ -0,0 +1,388 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+type routeRegexpOptions struct {
+ strictSlash bool
+ useEncodedPath bool
+}
+
+type regexpType int
+
+const (
+ regexpTypePath regexpType = 0
+ regexpTypeHost regexpType = 1
+ regexpTypePrefix regexpType = 2
+ regexpTypeQuery regexpType = 3
+)
+
+// newRouteRegexp parses a route template and returns a routeRegexp,
+// used to match a host, a path or a query string.
+//
+// It will extract named variables, assemble a regexp to be matched, create
+// a "reverse" template to build URLs and compile regexps to validate variable
+// values used in URL building.
+//
+// Previously we accepted only Python-like identifiers for variable
+// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
+// name and pattern can't be empty, and names can't contain a colon.
+func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) {
+ // Check if it is well-formed.
+ idxs, errBraces := braceIndices(tpl)
+ if errBraces != nil {
+ return nil, errBraces
+ }
+ // Backup the original.
+ template := tpl
+ // Now let's parse it.
+ defaultPattern := "[^/]+"
+ if typ == regexpTypeQuery {
+ defaultPattern = ".*"
+ } else if typ == regexpTypeHost {
+ defaultPattern = "[^.]+"
+ }
+ // Only match strict slash if not matching
+ if typ != regexpTypePath {
+ options.strictSlash = false
+ }
+ // Set a flag for strictSlash.
+ endSlash := false
+ if options.strictSlash && strings.HasSuffix(tpl, "/") {
+ tpl = tpl[:len(tpl)-1]
+ endSlash = true
+ }
+ varsN := make([]string, len(idxs)/2)
+ varsR := make([]*regexp.Regexp, len(idxs)/2)
+ pattern := bytes.NewBufferString("")
+ pattern.WriteByte('^')
+ reverse := bytes.NewBufferString("")
+ var end int
+ var err error
+ for i := 0; i < len(idxs); i += 2 {
+ // Set all values we are interested in.
+ raw := tpl[end:idxs[i]]
+ end = idxs[i+1]
+ parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
+ name := parts[0]
+ patt := defaultPattern
+ if len(parts) == 2 {
+ patt = parts[1]
+ }
+ // Name or pattern can't be empty.
+ if name == "" || patt == "" {
+ return nil, fmt.Errorf("mux: missing name or pattern in %q",
+ tpl[idxs[i]:end])
+ }
+ // Build the regexp pattern.
+ fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
+
+ // Build the reverse template.
+ fmt.Fprintf(reverse, "%s%%s", raw)
+
+ // Append variable name and compiled pattern.
+ varsN[i/2] = name
+ varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Add the remaining.
+ raw := tpl[end:]
+ pattern.WriteString(regexp.QuoteMeta(raw))
+ if options.strictSlash {
+ pattern.WriteString("[/]?")
+ }
+ if typ == regexpTypeQuery {
+ // Add the default pattern if the query value is empty
+ if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
+ pattern.WriteString(defaultPattern)
+ }
+ }
+ if typ != regexpTypePrefix {
+ pattern.WriteByte('$')
+ }
+
+ var wildcardHostPort bool
+ if typ == regexpTypeHost {
+ if !strings.Contains(pattern.String(), ":") {
+ wildcardHostPort = true
+ }
+ }
+ reverse.WriteString(raw)
+ if endSlash {
+ reverse.WriteByte('/')
+ }
+ // Compile full regexp.
+ reg, errCompile := regexp.Compile(pattern.String())
+ if errCompile != nil {
+ return nil, errCompile
+ }
+
+ // Check for capturing groups which used to work in older versions
+ if reg.NumSubexp() != len(idxs)/2 {
+ panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
+ "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
+ }
+
+ // Done!
+ return &routeRegexp{
+ template: template,
+ regexpType: typ,
+ options: options,
+ regexp: reg,
+ reverse: reverse.String(),
+ varsN: varsN,
+ varsR: varsR,
+ wildcardHostPort: wildcardHostPort,
+ }, nil
+}
+
+// routeRegexp stores a regexp to match a host or path and information to
+// collect and validate route variables.
+type routeRegexp struct {
+ // The unmodified template.
+ template string
+ // The type of match
+ regexpType regexpType
+ // Options for matching
+ options routeRegexpOptions
+ // Expanded regexp.
+ regexp *regexp.Regexp
+ // Reverse template.
+ reverse string
+ // Variable names.
+ varsN []string
+ // Variable regexps (validators).
+ varsR []*regexp.Regexp
+ // Wildcard host-port (no strict port match in hostname)
+ wildcardHostPort bool
+}
+
+// Match matches the regexp against the URL host or path.
+func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
+ if r.regexpType == regexpTypeHost {
+ host := getHost(req)
+ if r.wildcardHostPort {
+ // Don't be strict on the port match
+ if i := strings.Index(host, ":"); i != -1 {
+ host = host[:i]
+ }
+ }
+ return r.regexp.MatchString(host)
+ }
+
+ if r.regexpType == regexpTypeQuery {
+ return r.matchQueryString(req)
+ }
+ path := req.URL.Path
+ if r.options.useEncodedPath {
+ path = req.URL.EscapedPath()
+ }
+ return r.regexp.MatchString(path)
+}
+
+// url builds a URL part using the given values.
+func (r *routeRegexp) url(values map[string]string) (string, error) {
+ urlValues := make([]interface{}, len(r.varsN), len(r.varsN))
+ for k, v := range r.varsN {
+ value, ok := values[v]
+ if !ok {
+ return "", fmt.Errorf("mux: missing route variable %q", v)
+ }
+ if r.regexpType == regexpTypeQuery {
+ value = url.QueryEscape(value)
+ }
+ urlValues[k] = value
+ }
+ rv := fmt.Sprintf(r.reverse, urlValues...)
+ if !r.regexp.MatchString(rv) {
+ // The URL is checked against the full regexp, instead of checking
+ // individual variables. This is faster but to provide a good error
+ // message, we check individual regexps if the URL doesn't match.
+ for k, v := range r.varsN {
+ if !r.varsR[k].MatchString(values[v]) {
+ return "", fmt.Errorf(
+ "mux: variable %q doesn't match, expected %q", values[v],
+ r.varsR[k].String())
+ }
+ }
+ }
+ return rv, nil
+}
+
+// getURLQuery returns a single query parameter from a request URL.
+// For a URL with foo=bar&baz=ding, we return only the relevant key
+// value pair for the routeRegexp.
+func (r *routeRegexp) getURLQuery(req *http.Request) string {
+ if r.regexpType != regexpTypeQuery {
+ return ""
+ }
+ templateKey := strings.SplitN(r.template, "=", 2)[0]
+ val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey)
+ if ok {
+ return templateKey + "=" + val
+ }
+ return ""
+}
+
+// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0].
+// If key was not found, empty string and false is returned.
+func findFirstQueryKey(rawQuery, key string) (value string, ok bool) {
+ query := []byte(rawQuery)
+ for len(query) > 0 {
+ foundKey := query
+ if i := bytes.IndexAny(foundKey, "&;"); i >= 0 {
+ foundKey, query = foundKey[:i], foundKey[i+1:]
+ } else {
+ query = query[:0]
+ }
+ if len(foundKey) == 0 {
+ continue
+ }
+ var value []byte
+ if i := bytes.IndexByte(foundKey, '='); i >= 0 {
+ foundKey, value = foundKey[:i], foundKey[i+1:]
+ }
+ if len(foundKey) < len(key) {
+ // Cannot possibly be key.
+ continue
+ }
+ keyString, err := url.QueryUnescape(string(foundKey))
+ if err != nil {
+ continue
+ }
+ if keyString != key {
+ continue
+ }
+ valueString, err := url.QueryUnescape(string(value))
+ if err != nil {
+ continue
+ }
+ return valueString, true
+ }
+ return "", false
+}
+
+func (r *routeRegexp) matchQueryString(req *http.Request) bool {
+ return r.regexp.MatchString(r.getURLQuery(req))
+}
+
+// braceIndices returns the first level curly brace indices from a string.
+// It returns an error in case of unbalanced braces.
+func braceIndices(s string) ([]int, error) {
+ var level, idx int
+ var idxs []int
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '{':
+ if level++; level == 1 {
+ idx = i
+ }
+ case '}':
+ if level--; level == 0 {
+ idxs = append(idxs, idx, i+1)
+ } else if level < 0 {
+ return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+ }
+ }
+ }
+ if level != 0 {
+ return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+ }
+ return idxs, nil
+}
+
+// varGroupName builds a capturing group name for the indexed variable.
+func varGroupName(idx int) string {
+ return "v" + strconv.Itoa(idx)
+}
+
+// ----------------------------------------------------------------------------
+// routeRegexpGroup
+// ----------------------------------------------------------------------------
+
+// routeRegexpGroup groups the route matchers that carry variables.
+type routeRegexpGroup struct {
+ host *routeRegexp
+ path *routeRegexp
+ queries []*routeRegexp
+}
+
+// setMatch extracts the variables from the URL once a route matches.
+func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
+ // Store host variables.
+ if v.host != nil {
+ host := getHost(req)
+ if v.host.wildcardHostPort {
+ // Don't be strict on the port match
+ if i := strings.Index(host, ":"); i != -1 {
+ host = host[:i]
+ }
+ }
+ matches := v.host.regexp.FindStringSubmatchIndex(host)
+ if len(matches) > 0 {
+ extractVars(host, matches, v.host.varsN, m.Vars)
+ }
+ }
+ path := req.URL.Path
+ if r.useEncodedPath {
+ path = req.URL.EscapedPath()
+ }
+ // Store path variables.
+ if v.path != nil {
+ matches := v.path.regexp.FindStringSubmatchIndex(path)
+ if len(matches) > 0 {
+ extractVars(path, matches, v.path.varsN, m.Vars)
+ // Check if we should redirect.
+ if v.path.options.strictSlash {
+ p1 := strings.HasSuffix(path, "/")
+ p2 := strings.HasSuffix(v.path.template, "/")
+ if p1 != p2 {
+ u, _ := url.Parse(req.URL.String())
+ if p1 {
+ u.Path = u.Path[:len(u.Path)-1]
+ } else {
+ u.Path += "/"
+ }
+ m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently)
+ }
+ }
+ }
+ }
+ // Store query string variables.
+ for _, q := range v.queries {
+ queryURL := q.getURLQuery(req)
+ matches := q.regexp.FindStringSubmatchIndex(queryURL)
+ if len(matches) > 0 {
+ extractVars(queryURL, matches, q.varsN, m.Vars)
+ }
+ }
+}
+
+// getHost tries its best to return the request host.
+// According to section 14.23 of RFC 2616 the Host header
+// can include the port number if the default value of 80 is not used.
+func getHost(r *http.Request) string {
+ if r.URL.IsAbs() {
+ return r.URL.Host
+ }
+ return r.Host
+}
+
+func extractVars(input string, matches []int, names []string, output map[string]string) {
+ for i, name := range names {
+ output[name] = input[matches[2*i+2]:matches[2*i+3]]
+ }
+}
diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go
new file mode 100644
index 0000000..750afe5
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/route.go
@@ -0,0 +1,736 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strings"
+)
+
+// Route stores information to match a request and build URLs.
+type Route struct {
+ // Request handler for the route.
+ handler http.Handler
+ // If true, this route never matches: it is only used to build URLs.
+ buildOnly bool
+ // The name used to build URLs.
+ name string
+ // Error resulted from building a route.
+ err error
+
+ // "global" reference to all named routes
+ namedRoutes map[string]*Route
+
+ // config possibly passed in from `Router`
+ routeConf
+}
+
+// SkipClean reports whether path cleaning is enabled for this route via
+// Router.SkipClean.
+func (r *Route) SkipClean() bool {
+ return r.skipClean
+}
+
+// Match matches the route against the request.
+func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
+ if r.buildOnly || r.err != nil {
+ return false
+ }
+
+ var matchErr error
+
+ // Match everything.
+ for _, m := range r.matchers {
+ if matched := m.Match(req, match); !matched {
+ if _, ok := m.(methodMatcher); ok {
+ matchErr = ErrMethodMismatch
+ continue
+ }
+
+ // Ignore ErrNotFound errors. These errors arise from match call
+ // to Subrouters.
+ //
+ // This prevents subsequent matching subrouters from failing to
+ // run middleware. If not ignored, the middleware would see a
+ // non-nil MatchErr and be skipped, even when there was a
+ // matching route.
+ if match.MatchErr == ErrNotFound {
+ match.MatchErr = nil
+ }
+
+ matchErr = nil
+ return false
+ }
+ }
+
+ if matchErr != nil {
+ match.MatchErr = matchErr
+ return false
+ }
+
+ if match.MatchErr == ErrMethodMismatch && r.handler != nil {
+ // We found a route which matches request method, clear MatchErr
+ match.MatchErr = nil
+ // Then override the mis-matched handler
+ match.Handler = r.handler
+ }
+
+ // Yay, we have a match. Let's collect some info about it.
+ if match.Route == nil {
+ match.Route = r
+ }
+ if match.Handler == nil {
+ match.Handler = r.handler
+ }
+ if match.Vars == nil {
+ match.Vars = make(map[string]string)
+ }
+
+ // Set variables.
+ r.regexp.setMatch(req, match, r)
+ return true
+}
+
+// ----------------------------------------------------------------------------
+// Route attributes
+// ----------------------------------------------------------------------------
+
+// GetError returns an error resulted from building the route, if any.
+func (r *Route) GetError() error {
+ return r.err
+}
+
+// BuildOnly sets the route to never match: it is only used to build URLs.
+func (r *Route) BuildOnly() *Route {
+ r.buildOnly = true
+ return r
+}
+
+// Handler --------------------------------------------------------------------
+
+// Handler sets a handler for the route.
+func (r *Route) Handler(handler http.Handler) *Route {
+ if r.err == nil {
+ r.handler = handler
+ }
+ return r
+}
+
+// HandlerFunc sets a handler function for the route.
+func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
+ return r.Handler(http.HandlerFunc(f))
+}
+
+// GetHandler returns the handler for the route, if any.
+func (r *Route) GetHandler() http.Handler {
+ return r.handler
+}
+
+// Name -----------------------------------------------------------------------
+
+// Name sets the name for the route, used to build URLs.
+// It is an error to call Name more than once on a route.
+func (r *Route) Name(name string) *Route {
+ if r.name != "" {
+ r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
+ r.name, name)
+ }
+ if r.err == nil {
+ r.name = name
+ r.namedRoutes[name] = r
+ }
+ return r
+}
+
+// GetName returns the name for the route, if any.
+func (r *Route) GetName() string {
+ return r.name
+}
+
+// ----------------------------------------------------------------------------
+// Matchers
+// ----------------------------------------------------------------------------
+
+// matcher types try to match a request.
+type matcher interface {
+ Match(*http.Request, *RouteMatch) bool
+}
+
+// addMatcher adds a matcher to the route.
+func (r *Route) addMatcher(m matcher) *Route {
+ if r.err == nil {
+ r.matchers = append(r.matchers, m)
+ }
+ return r
+}
+
+// addRegexpMatcher adds a host or path matcher and builder to a route.
+func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error {
+ if r.err != nil {
+ return r.err
+ }
+ if typ == regexpTypePath || typ == regexpTypePrefix {
+ if len(tpl) > 0 && tpl[0] != '/' {
+ return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
+ }
+ if r.regexp.path != nil {
+ tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
+ }
+ }
+ rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{
+ strictSlash: r.strictSlash,
+ useEncodedPath: r.useEncodedPath,
+ })
+ if err != nil {
+ return err
+ }
+ for _, q := range r.regexp.queries {
+ if err = uniqueVars(rr.varsN, q.varsN); err != nil {
+ return err
+ }
+ }
+ if typ == regexpTypeHost {
+ if r.regexp.path != nil {
+ if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
+ return err
+ }
+ }
+ r.regexp.host = rr
+ } else {
+ if r.regexp.host != nil {
+ if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
+ return err
+ }
+ }
+ if typ == regexpTypeQuery {
+ r.regexp.queries = append(r.regexp.queries, rr)
+ } else {
+ r.regexp.path = rr
+ }
+ }
+ r.addMatcher(rr)
+ return nil
+}
+
+// Headers --------------------------------------------------------------------
+
+// headerMatcher matches the request against header values.
+type headerMatcher map[string]string
+
+func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchMapWithString(m, r.Header, true)
+}
+
+// Headers adds a matcher for request header values.
+// It accepts a sequence of key/value pairs to be matched. For example:
+//
+// r := mux.NewRouter()
+// r.Headers("Content-Type", "application/json",
+// "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both request header values match.
+// If the value is an empty string, it will match any value if the key is set.
+func (r *Route) Headers(pairs ...string) *Route {
+ if r.err == nil {
+ var headers map[string]string
+ headers, r.err = mapFromPairsToString(pairs...)
+ return r.addMatcher(headerMatcher(headers))
+ }
+ return r
+}
+
+// headerRegexMatcher matches the request against the route given a regex for the header
+type headerRegexMatcher map[string]*regexp.Regexp
+
+func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchMapWithRegex(m, r.Header, true)
+}
+
+// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
+// support. For example:
+//
+// r := mux.NewRouter()
+// r.HeadersRegexp("Content-Type", "application/(text|json)",
+// "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both the request header matches both regular expressions.
+// If the value is an empty string, it will match any value if the key is set.
+// Use the start and end of string anchors (^ and $) to match an exact value.
+func (r *Route) HeadersRegexp(pairs ...string) *Route {
+ if r.err == nil {
+ var headers map[string]*regexp.Regexp
+ headers, r.err = mapFromPairsToRegex(pairs...)
+ return r.addMatcher(headerRegexMatcher(headers))
+ }
+ return r
+}
+
+// Host -----------------------------------------------------------------------
+
+// Host adds a matcher for the URL host.
+// It accepts a template with zero or more URL variables enclosed by {}.
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next dot.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+// r := mux.NewRouter()
+// r.Host("www.example.com")
+// r.Host("{subdomain}.domain.com")
+// r.Host("{subdomain:[a-z]+}.domain.com")
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Host(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, regexpTypeHost)
+ return r
+}
+
+// MatcherFunc ----------------------------------------------------------------
+
+// MatcherFunc is the function signature used by custom matchers.
+type MatcherFunc func(*http.Request, *RouteMatch) bool
+
+// Match returns the match for a given request.
+func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
+ return m(r, match)
+}
+
+// MatcherFunc adds a custom function to be used as request matcher.
+func (r *Route) MatcherFunc(f MatcherFunc) *Route {
+ return r.addMatcher(f)
+}
+
+// Methods --------------------------------------------------------------------
+
+// methodMatcher matches the request against HTTP methods.
+type methodMatcher []string
+
+func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchInArray(m, r.Method)
+}
+
+// Methods adds a matcher for HTTP methods.
+// It accepts a sequence of one or more methods to be matched, e.g.:
+// "GET", "POST", "PUT".
+func (r *Route) Methods(methods ...string) *Route {
+ for k, v := range methods {
+ methods[k] = strings.ToUpper(v)
+ }
+ return r.addMatcher(methodMatcher(methods))
+}
+
+// Path -----------------------------------------------------------------------
+
+// Path adds a matcher for the URL path.
+// It accepts a template with zero or more URL variables enclosed by {}. The
+// template must start with a "/".
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+// r := mux.NewRouter()
+// r.Path("/products/").Handler(ProductsHandler)
+// r.Path("/products/{key}").Handler(ProductsHandler)
+// r.Path("/articles/{category}/{id:[0-9]+}").
+// Handler(ArticleHandler)
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Path(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, regexpTypePath)
+ return r
+}
+
+// PathPrefix -----------------------------------------------------------------
+
+// PathPrefix adds a matcher for the URL path prefix. This matches if the given
+// template is a prefix of the full URL path. See Route.Path() for details on
+// the tpl argument.
+//
+// Note that it does not treat slashes specially ("/foobar/" will be matched by
+// the prefix "/foo") so you may want to use a trailing slash here.
+//
+// Also note that the setting of Router.StrictSlash() has no effect on routes
+// with a PathPrefix matcher.
+func (r *Route) PathPrefix(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, regexpTypePrefix)
+ return r
+}
+
+// Query ----------------------------------------------------------------------
+
+// Queries adds a matcher for URL query values.
+// It accepts a sequence of key/value pairs. Values may define variables.
+// For example:
+//
+// r := mux.NewRouter()
+// r.Queries("foo", "bar", "id", "{id:[0-9]+}")
+//
+// The above route will only match if the URL contains the defined queries
+// values, e.g.: ?foo=bar&id=42.
+//
+// If the value is an empty string, it will match any value if the key is set.
+//
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+func (r *Route) Queries(pairs ...string) *Route {
+ length := len(pairs)
+ if length%2 != 0 {
+ r.err = fmt.Errorf(
+ "mux: number of parameters must be multiple of 2, got %v", pairs)
+ return nil
+ }
+ for i := 0; i < length; i += 2 {
+ if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil {
+ return r
+ }
+ }
+
+ return r
+}
+
+// Schemes --------------------------------------------------------------------
+
+// schemeMatcher matches the request against URL schemes.
+type schemeMatcher []string
+
+func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ scheme := r.URL.Scheme
+ // https://golang.org/pkg/net/http/#Request
+ // "For [most] server requests, fields other than Path and RawQuery will be
+ // empty."
+ // Since we're an http muxer, the scheme is either going to be http or https
+ // though, so we can just set it based on the tls termination state.
+ if scheme == "" {
+ if r.TLS == nil {
+ scheme = "http"
+ } else {
+ scheme = "https"
+ }
+ }
+ return matchInArray(m, scheme)
+}
+
+// Schemes adds a matcher for URL schemes.
+// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
+// If the request's URL has a scheme set, it will be matched against.
+// Generally, the URL scheme will only be set if a previous handler set it,
+// such as the ProxyHeaders handler from gorilla/handlers.
+// If unset, the scheme will be determined based on the request's TLS
+// termination state.
+// The first argument to Schemes will be used when constructing a route URL.
+func (r *Route) Schemes(schemes ...string) *Route {
+ for k, v := range schemes {
+ schemes[k] = strings.ToLower(v)
+ }
+ if len(schemes) > 0 {
+ r.buildScheme = schemes[0]
+ }
+ return r.addMatcher(schemeMatcher(schemes))
+}
+
+// BuildVarsFunc --------------------------------------------------------------
+
+// BuildVarsFunc is the function signature used by custom build variable
+// functions (which can modify route variables before a route's URL is built).
+type BuildVarsFunc func(map[string]string) map[string]string
+
+// BuildVarsFunc adds a custom function to be used to modify build variables
+// before a route's URL is built.
+func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
+ if r.buildVarsFunc != nil {
+ // compose the old and new functions
+ old := r.buildVarsFunc
+ r.buildVarsFunc = func(m map[string]string) map[string]string {
+ return f(old(m))
+ }
+ } else {
+ r.buildVarsFunc = f
+ }
+ return r
+}
+
+// Subrouter ------------------------------------------------------------------
+
+// Subrouter creates a subrouter for the route.
+//
+// It will test the inner routes only if the parent route matched. For example:
+//
+// r := mux.NewRouter()
+// s := r.Host("www.example.com").Subrouter()
+// s.HandleFunc("/products/", ProductsHandler)
+// s.HandleFunc("/products/{key}", ProductHandler)
+// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+//
+// Here, the routes registered in the subrouter won't be tested if the host
+// doesn't match.
+func (r *Route) Subrouter() *Router {
+ // initialize a subrouter with a copy of the parent route's configuration
+ router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
+ r.addMatcher(router)
+ return router
+}
+
+// ----------------------------------------------------------------------------
+// URL building
+// ----------------------------------------------------------------------------
+
+// URL builds a URL for the route.
+//
+// It accepts a sequence of key/value pairs for the route variables. For
+// example, given this route:
+//
+// r := mux.NewRouter()
+// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+// Name("article")
+//
+// ...a URL for it can be built using:
+//
+// url, err := r.Get("article").URL("category", "technology", "id", "42")
+//
+// ...which will return an url.URL with the following path:
+//
+// "/articles/technology/42"
+//
+// This also works for host variables:
+//
+// r := mux.NewRouter()
+// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+// Host("{subdomain}.domain.com").
+// Name("article")
+//
+// // url.String() will be "http://news.domain.com/articles/technology/42"
+// url, err := r.Get("article").URL("subdomain", "news",
+// "category", "technology",
+// "id", "42")
+//
+// The scheme of the resulting url will be the first argument that was passed to Schemes:
+//
+// // url.String() will be "https://example.com"
+// r := mux.NewRouter()
+// url, err := r.Host("example.com")
+// .Schemes("https", "http").URL()
+//
+// All variables defined in the route are required, and their values must
+// conform to the corresponding patterns.
+func (r *Route) URL(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ values, err := r.prepareVars(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ var scheme, host, path string
+ queries := make([]string, 0, len(r.regexp.queries))
+ if r.regexp.host != nil {
+ if host, err = r.regexp.host.url(values); err != nil {
+ return nil, err
+ }
+ scheme = "http"
+ if r.buildScheme != "" {
+ scheme = r.buildScheme
+ }
+ }
+ if r.regexp.path != nil {
+ if path, err = r.regexp.path.url(values); err != nil {
+ return nil, err
+ }
+ }
+ for _, q := range r.regexp.queries {
+ var query string
+ if query, err = q.url(values); err != nil {
+ return nil, err
+ }
+ queries = append(queries, query)
+ }
+ return &url.URL{
+ Scheme: scheme,
+ Host: host,
+ Path: path,
+ RawQuery: strings.Join(queries, "&"),
+ }, nil
+}
+
+// URLHost builds the host part of the URL for a route. See Route.URL().
+//
+// The route must have a host defined.
+func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp.host == nil {
+ return nil, errors.New("mux: route doesn't have a host")
+ }
+ values, err := r.prepareVars(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ host, err := r.regexp.host.url(values)
+ if err != nil {
+ return nil, err
+ }
+ u := &url.URL{
+ Scheme: "http",
+ Host: host,
+ }
+ if r.buildScheme != "" {
+ u.Scheme = r.buildScheme
+ }
+ return u, nil
+}
+
+// URLPath builds the path part of the URL for a route. See Route.URL().
+//
+// The route must have a path defined.
+func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp.path == nil {
+ return nil, errors.New("mux: route doesn't have a path")
+ }
+ values, err := r.prepareVars(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ path, err := r.regexp.path.url(values)
+ if err != nil {
+ return nil, err
+ }
+ return &url.URL{
+ Path: path,
+ }, nil
+}
+
+// GetPathTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathTemplate() (string, error) {
+ if r.err != nil {
+ return "", r.err
+ }
+ if r.regexp.path == nil {
+ return "", errors.New("mux: route doesn't have a path")
+ }
+ return r.regexp.path.template, nil
+}
+
+// GetPathRegexp returns the expanded regular expression used to match route path.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathRegexp() (string, error) {
+ if r.err != nil {
+ return "", r.err
+ }
+ if r.regexp.path == nil {
+ return "", errors.New("mux: route does not have a path")
+ }
+ return r.regexp.path.regexp.String(), nil
+}
+
+// GetQueriesRegexp returns the expanded regular expressions used to match the
+// route queries.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not have queries.
+func (r *Route) GetQueriesRegexp() ([]string, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp.queries == nil {
+ return nil, errors.New("mux: route doesn't have queries")
+ }
+ queries := make([]string, 0, len(r.regexp.queries))
+ for _, query := range r.regexp.queries {
+ queries = append(queries, query.regexp.String())
+ }
+ return queries, nil
+}
+
+// GetQueriesTemplates returns the templates used to build the
+// query matching.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define queries.
+func (r *Route) GetQueriesTemplates() ([]string, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp.queries == nil {
+ return nil, errors.New("mux: route doesn't have queries")
+ }
+ queries := make([]string, 0, len(r.regexp.queries))
+ for _, query := range r.regexp.queries {
+ queries = append(queries, query.template)
+ }
+ return queries, nil
+}
+
+// GetMethods returns the methods the route matches against
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if route does not have methods.
+func (r *Route) GetMethods() ([]string, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ for _, m := range r.matchers {
+ if methods, ok := m.(methodMatcher); ok {
+ return []string(methods), nil
+ }
+ }
+ return nil, errors.New("mux: route doesn't have methods")
+}
+
+// GetHostTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a host.
+func (r *Route) GetHostTemplate() (string, error) {
+ if r.err != nil {
+ return "", r.err
+ }
+ if r.regexp.host == nil {
+ return "", errors.New("mux: route doesn't have a host")
+ }
+ return r.regexp.host.template, nil
+}
+
+// prepareVars converts the route variable pairs into a map. If the route has a
+// BuildVarsFunc, it is invoked.
+func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
+ m, err := mapFromPairsToString(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ return r.buildVars(m), nil
+}
+
+func (r *Route) buildVars(m map[string]string) map[string]string {
+ if r.buildVarsFunc != nil {
+ m = r.buildVarsFunc(m)
+ }
+ return m
+}
diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go
new file mode 100644
index 0000000..5f5c496
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/test_helpers.go
@@ -0,0 +1,19 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import "net/http"
+
+// SetURLVars sets the URL variables for the given request, to be accessed via
+// mux.Vars for testing route behaviour. Arguments are not modified, a shallow
+// copy is returned.
+//
+// This API should only be used for testing purposes; it provides a way to
+// inject variables into the request context. Alternatively, URL variables
+// can be set by making a route that captures the required variables,
+// starting a server and sending the request to that server.
+func SetURLVars(r *http.Request, val map[string]string) *http.Request {
+ return requestWithVars(r, val)
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE
new file mode 100644
index 0000000..5f0d1fb
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2014 Alan Shreve
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md
new file mode 100644
index 0000000..7a950d1
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/README.md
@@ -0,0 +1,23 @@
+# mousetrap
+
+mousetrap is a tiny library that answers a single question.
+
+On a Windows machine, was the process invoked by someone double clicking on
+the executable file while browsing in explorer?
+
+### Motivation
+
+Windows developers unfamiliar with command line tools will often "double-click"
+the executable for a tool. Because most CLI tools print the help and then exit
+when invoked without arguments, this is often very frustrating for those users.
+
+mousetrap provides a way to detect these invocations so that you can provide
+more helpful behavior and instructions on how to run the CLI tool. To see what
+this looks like, both from an organizational and a technical perspective, see
+https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
+
+### The interface
+
+The library exposes a single interface:
+
+ func StartedByExplorer() (bool)
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
new file mode 100644
index 0000000..9d2d8a4
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package mousetrap
+
+// StartedByExplorer returns true if the program was invoked by the user
+// double-clicking on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+//
+// On non-Windows platforms, it always returns false.
+func StartedByExplorer() bool {
+ return false
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
new file mode 100644
index 0000000..336142a
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
@@ -0,0 +1,98 @@
+// +build windows
+// +build !go1.4
+
+package mousetrap
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ // defined by the Win32 API
+ th32cs_snapprocess uintptr = 0x2
+)
+
+var (
+ kernel = syscall.MustLoadDLL("kernel32.dll")
+ CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot")
+ Process32First = kernel.MustFindProc("Process32FirstW")
+ Process32Next = kernel.MustFindProc("Process32NextW")
+)
+
+// ProcessEntry32 structure defined by the Win32 API
+type processEntry32 struct {
+ dwSize uint32
+ cntUsage uint32
+ th32ProcessID uint32
+ th32DefaultHeapID int
+ th32ModuleID uint32
+ cntThreads uint32
+ th32ParentProcessID uint32
+ pcPriClassBase int32
+ dwFlags uint32
+ szExeFile [syscall.MAX_PATH]uint16
+}
+
+func getProcessEntry(pid int) (pe *processEntry32, err error) {
+ snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0))
+ if snapshot == uintptr(syscall.InvalidHandle) {
+ err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1)
+ return
+ }
+ defer syscall.CloseHandle(syscall.Handle(snapshot))
+
+ var processEntry processEntry32
+ processEntry.dwSize = uint32(unsafe.Sizeof(processEntry))
+ ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+ if ok == 0 {
+ err = fmt.Errorf("Process32First: %v", e1)
+ return
+ }
+
+ for {
+ if processEntry.th32ProcessID == uint32(pid) {
+ pe = &processEntry
+ return
+ }
+
+ ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+ if ok == 0 {
+ err = fmt.Errorf("Process32Next: %v", e1)
+ return
+ }
+ }
+}
+
+func getppid() (pid int, err error) {
+ pe, err := getProcessEntry(os.Getpid())
+ if err != nil {
+ return
+ }
+
+ pid = int(pe.th32ParentProcessID)
+ return
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ ppid, err := getppid()
+ if err != nil {
+ return false
+ }
+
+ pe, err := getProcessEntry(ppid)
+ if err != nil {
+ return false
+ }
+
+ name := syscall.UTF16ToString(pe.szExeFile[:])
+ return name == "explorer.exe"
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
new file mode 100644
index 0000000..9a28e57
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
@@ -0,0 +1,46 @@
+// +build windows
+// +build go1.4
+
+package mousetrap
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
+ snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CloseHandle(snapshot)
+ var procEntry syscall.ProcessEntry32
+ procEntry.Size = uint32(unsafe.Sizeof(procEntry))
+ if err = syscall.Process32First(snapshot, &procEntry); err != nil {
+ return nil, err
+ }
+ for {
+ if procEntry.ProcessID == uint32(pid) {
+ return &procEntry, nil
+ }
+ err = syscall.Process32Next(snapshot, &procEntry)
+ if err != nil {
+ return nil, err
+ }
+ }
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ pe, err := getProcessEntry(os.Getppid())
+ if err != nil {
+ return false
+ }
+ return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
+}
diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE
new file mode 100644
index 0000000..fbff658
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/LICENSE
@@ -0,0 +1,7 @@
+Copyright (c) 2016 Mail.Ru Group
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go
new file mode 100644
index 0000000..07fb4bc
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/buffer/pool.go
@@ -0,0 +1,270 @@
+// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to
+// reduce copying and to allow reuse of individual chunks.
+package buffer
+
+import (
+ "io"
+ "sync"
+)
+
+// PoolConfig contains configuration for the allocation and reuse strategy.
+type PoolConfig struct {
+ StartSize int // Minimum chunk size that is allocated.
+ PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead.
+ MaxSize int // Maximum chunk size that will be allocated.
+}
+
+var config = PoolConfig{
+ StartSize: 128,
+ PooledSize: 512,
+ MaxSize: 32768,
+}
+
+// Reuse pool: chunk size -> pool.
+var buffers = map[int]*sync.Pool{}
+
+func initBuffers() {
+ for l := config.PooledSize; l <= config.MaxSize; l *= 2 {
+ buffers[l] = new(sync.Pool)
+ }
+}
+
+func init() {
+ initBuffers()
+}
+
+// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done.
+func Init(cfg PoolConfig) {
+ config = cfg
+ initBuffers()
+}
+
+// putBuf puts a chunk to reuse pool if it can be reused.
+func putBuf(buf []byte) {
+ size := cap(buf)
+ if size < config.PooledSize {
+ return
+ }
+ if c := buffers[size]; c != nil {
+ c.Put(buf[:0])
+ }
+}
+
+// getBuf gets a chunk from reuse pool or creates a new one if reuse failed.
+func getBuf(size int) []byte {
+ if size < config.PooledSize {
+ return make([]byte, 0, size)
+ }
+
+ if c := buffers[size]; c != nil {
+ v := c.Get()
+ if v != nil {
+ return v.([]byte)
+ }
+ }
+ return make([]byte, 0, size)
+}
+
+// Buffer is a buffer optimized for serialization without extra copying.
+type Buffer struct {
+
+ // Buf is the current chunk that can be used for serialization.
+ Buf []byte
+
+ toPool []byte
+ bufs [][]byte
+}
+
+// EnsureSpace makes sure that the current chunk contains at least s free bytes,
+// possibly creating a new chunk.
+func (b *Buffer) EnsureSpace(s int) {
+ if cap(b.Buf)-len(b.Buf) >= s {
+ return
+ }
+ l := len(b.Buf)
+ if l > 0 {
+ if cap(b.toPool) != cap(b.Buf) {
+ // Chunk was reallocated, toPool can be pooled.
+ putBuf(b.toPool)
+ }
+ if cap(b.bufs) == 0 {
+ b.bufs = make([][]byte, 0, 8)
+ }
+ b.bufs = append(b.bufs, b.Buf)
+ l = cap(b.toPool) * 2
+ } else {
+ l = config.StartSize
+ }
+
+ if l > config.MaxSize {
+ l = config.MaxSize
+ }
+ b.Buf = getBuf(l)
+ b.toPool = b.Buf
+}
+
+// AppendByte appends a single byte to buffer.
+func (b *Buffer) AppendByte(data byte) {
+ if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
+ b.EnsureSpace(1)
+ }
+ b.Buf = append(b.Buf, data)
+}
+
+// AppendBytes appends a byte slice to buffer.
+func (b *Buffer) AppendBytes(data []byte) {
+ for len(data) > 0 {
+ if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
+ b.EnsureSpace(1)
+ }
+
+ sz := cap(b.Buf) - len(b.Buf)
+ if sz > len(data) {
+ sz = len(data)
+ }
+
+ b.Buf = append(b.Buf, data[:sz]...)
+ data = data[sz:]
+ }
+}
+
+// AppendBytes appends a string to buffer.
+func (b *Buffer) AppendString(data string) {
+ for len(data) > 0 {
+ if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
+ b.EnsureSpace(1)
+ }
+
+ sz := cap(b.Buf) - len(b.Buf)
+ if sz > len(data) {
+ sz = len(data)
+ }
+
+ b.Buf = append(b.Buf, data[:sz]...)
+ data = data[sz:]
+ }
+}
+
+// Size computes the size of a buffer by adding sizes of every chunk.
+func (b *Buffer) Size() int {
+ size := len(b.Buf)
+ for _, buf := range b.bufs {
+ size += len(buf)
+ }
+ return size
+}
+
+// DumpTo outputs the contents of a buffer to a writer and resets the buffer.
+func (b *Buffer) DumpTo(w io.Writer) (written int, err error) {
+ var n int
+ for _, buf := range b.bufs {
+ if err == nil {
+ n, err = w.Write(buf)
+ written += n
+ }
+ putBuf(buf)
+ }
+
+ if err == nil {
+ n, err = w.Write(b.Buf)
+ written += n
+ }
+ putBuf(b.toPool)
+
+ b.bufs = nil
+ b.Buf = nil
+ b.toPool = nil
+
+ return
+}
+
+// BuildBytes creates a single byte slice with all the contents of the buffer. Data is
+// copied if it does not fit in a single chunk. You can optionally provide one byte
+// slice as argument that it will try to reuse.
+func (b *Buffer) BuildBytes(reuse ...[]byte) []byte {
+ if len(b.bufs) == 0 {
+ ret := b.Buf
+ b.toPool = nil
+ b.Buf = nil
+ return ret
+ }
+
+ var ret []byte
+ size := b.Size()
+
+ // If we got a buffer as argument and it is big enought, reuse it.
+ if len(reuse) == 1 && cap(reuse[0]) >= size {
+ ret = reuse[0][:0]
+ } else {
+ ret = make([]byte, 0, size)
+ }
+ for _, buf := range b.bufs {
+ ret = append(ret, buf...)
+ putBuf(buf)
+ }
+
+ ret = append(ret, b.Buf...)
+ putBuf(b.toPool)
+
+ b.bufs = nil
+ b.toPool = nil
+ b.Buf = nil
+
+ return ret
+}
+
+type readCloser struct {
+ offset int
+ bufs [][]byte
+}
+
+func (r *readCloser) Read(p []byte) (n int, err error) {
+ for _, buf := range r.bufs {
+ // Copy as much as we can.
+ x := copy(p[n:], buf[r.offset:])
+ n += x // Increment how much we filled.
+
+ // Did we empty the whole buffer?
+ if r.offset+x == len(buf) {
+ // On to the next buffer.
+ r.offset = 0
+ r.bufs = r.bufs[1:]
+
+ // We can release this buffer.
+ putBuf(buf)
+ } else {
+ r.offset += x
+ }
+
+ if n == len(p) {
+ break
+ }
+ }
+ // No buffers left or nothing read?
+ if len(r.bufs) == 0 {
+ err = io.EOF
+ }
+ return
+}
+
+func (r *readCloser) Close() error {
+ // Release all remaining buffers.
+ for _, buf := range r.bufs {
+ putBuf(buf)
+ }
+ // In case Close gets called multiple times.
+ r.bufs = nil
+
+ return nil
+}
+
+// ReadCloser creates an io.ReadCloser with all the contents of the buffer.
+func (b *Buffer) ReadCloser() io.ReadCloser {
+ ret := &readCloser{0, append(b.bufs, b.Buf)}
+
+ b.bufs = nil
+ b.toPool = nil
+ b.Buf = nil
+
+ return ret
+}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
new file mode 100644
index 0000000..ff7b27c
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
@@ -0,0 +1,24 @@
+// This file will only be included to the build if neither
+// easyjson_nounsafe nor appengine build tag is set. See README notes
+// for more details.
+
+//+build !easyjson_nounsafe
+//+build !appengine
+
+package jlexer
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// bytesToStr creates a string pointing at the slice to avoid copying.
+//
+// Warning: the string returned by the function should be used with care, as the whole input data
+// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data
+// may be garbage-collected even when the string exists.
+func bytesToStr(data []byte) string {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
+ shdr := reflect.StringHeader{Data: h.Data, Len: h.Len}
+ return *(*string)(unsafe.Pointer(&shdr))
+}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go
new file mode 100644
index 0000000..864d1be
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go
@@ -0,0 +1,13 @@
+// This file is included to the build if any of the buildtags below
+// are defined. Refer to README notes for more details.
+
+//+build easyjson_nounsafe appengine
+
+package jlexer
+
+// bytesToStr creates a string normally from []byte
+//
+// Note that this method is roughly 1.5x slower than using the 'unsafe' method.
+func bytesToStr(data []byte) string {
+ return string(data)
+}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go
new file mode 100644
index 0000000..e90ec40
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jlexer/error.go
@@ -0,0 +1,15 @@
+package jlexer
+
+import "fmt"
+
+// LexerError implements the error interface and represents all possible errors that can be
+// generated during parsing the JSON data.
+type LexerError struct {
+ Reason string
+ Offset int
+ Data string
+}
+
+func (l *LexerError) Error() string {
+ return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data)
+}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
new file mode 100644
index 0000000..ddd376b
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
@@ -0,0 +1,1182 @@
+// Package jlexer contains a JSON lexer implementation.
+//
+// It is expected that it is mostly used with generated parser code, so the interface is tuned
+// for a parser that knows what kind of data is expected.
+package jlexer
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// tokenKind determines type of a token.
+type tokenKind byte
+
+const (
+ tokenUndef tokenKind = iota // No token.
+ tokenDelim // Delimiter: one of '{', '}', '[' or ']'.
+ tokenString // A string literal, e.g. "abc\u1234"
+ tokenNumber // Number literal, e.g. 1.5e5
+ tokenBool // Boolean literal: true or false.
+ tokenNull // null keyword.
+)
+
+// token describes a single token: type, position in the input and value.
+type token struct {
+ kind tokenKind // Type of a token.
+
+ boolValue bool // Value if a boolean literal token.
+ byteValue []byte // Raw value of a token.
+ delimValue byte
+}
+
+// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice.
+type Lexer struct {
+ Data []byte // Input data given to the lexer.
+
+ start int // Start of the current token.
+ pos int // Current unscanned position in the input stream.
+ token token // Last scanned token, if token.kind != tokenUndef.
+
+ firstElement bool // Whether current element is the first in array or an object.
+ wantSep byte // A comma or a colon character, which need to occur before a token.
+
+ UseMultipleErrors bool // If we want to use multiple errors.
+ fatalError error // Fatal error occurred during lexing. It is usually a syntax error.
+ multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors.
+}
+
+// FetchToken scans the input for the next token.
+func (r *Lexer) FetchToken() {
+ r.token.kind = tokenUndef
+ r.start = r.pos
+
+ // Check if r.Data has r.pos element
+ // If it doesn't, it mean corrupted input data
+ if len(r.Data) < r.pos {
+ r.errParse("Unexpected end of data")
+ return
+ }
+ // Determine the type of a token by skipping whitespace and reading the
+ // first character.
+ for _, c := range r.Data[r.pos:] {
+ switch c {
+ case ':', ',':
+ if r.wantSep == c {
+ r.pos++
+ r.start++
+ r.wantSep = 0
+ } else {
+ r.errSyntax()
+ }
+
+ case ' ', '\t', '\r', '\n':
+ r.pos++
+ r.start++
+
+ case '"':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+
+ r.token.kind = tokenString
+ r.fetchString()
+ return
+
+ case '{', '[':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+ r.firstElement = true
+ r.token.kind = tokenDelim
+ r.token.delimValue = r.Data[r.pos]
+ r.pos++
+ return
+
+ case '}', ']':
+ if !r.firstElement && (r.wantSep != ',') {
+ r.errSyntax()
+ }
+ r.wantSep = 0
+ r.token.kind = tokenDelim
+ r.token.delimValue = r.Data[r.pos]
+ r.pos++
+ return
+
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+ r.token.kind = tokenNumber
+ r.fetchNumber()
+ return
+
+ case 'n':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+
+ r.token.kind = tokenNull
+ r.fetchNull()
+ return
+
+ case 't':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+
+ r.token.kind = tokenBool
+ r.token.boolValue = true
+ r.fetchTrue()
+ return
+
+ case 'f':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+
+ r.token.kind = tokenBool
+ r.token.boolValue = false
+ r.fetchFalse()
+ return
+
+ default:
+ r.errSyntax()
+ return
+ }
+ }
+ r.fatalError = io.EOF
+ return
+}
+
+// isTokenEnd returns true if the char can follow a non-delimiter token
+func isTokenEnd(c byte) bool {
+ return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':'
+}
+
+// fetchNull fetches and checks remaining bytes of null keyword.
+func (r *Lexer) fetchNull() {
+ r.pos += 4
+ if r.pos > len(r.Data) ||
+ r.Data[r.pos-3] != 'u' ||
+ r.Data[r.pos-2] != 'l' ||
+ r.Data[r.pos-1] != 'l' ||
+ (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
+
+ r.pos -= 4
+ r.errSyntax()
+ }
+}
+
+// fetchTrue fetches and checks remaining bytes of true keyword.
+func (r *Lexer) fetchTrue() {
+ r.pos += 4
+ if r.pos > len(r.Data) ||
+ r.Data[r.pos-3] != 'r' ||
+ r.Data[r.pos-2] != 'u' ||
+ r.Data[r.pos-1] != 'e' ||
+ (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
+
+ r.pos -= 4
+ r.errSyntax()
+ }
+}
+
+// fetchFalse fetches and checks remaining bytes of false keyword.
+func (r *Lexer) fetchFalse() {
+ r.pos += 5
+ if r.pos > len(r.Data) ||
+ r.Data[r.pos-4] != 'a' ||
+ r.Data[r.pos-3] != 'l' ||
+ r.Data[r.pos-2] != 's' ||
+ r.Data[r.pos-1] != 'e' ||
+ (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
+
+ r.pos -= 5
+ r.errSyntax()
+ }
+}
+
+// fetchNumber scans a number literal token.
+func (r *Lexer) fetchNumber() {
+ hasE := false
+ afterE := false
+ hasDot := false
+
+ r.pos++
+ for i, c := range r.Data[r.pos:] {
+ switch {
+ case c >= '0' && c <= '9':
+ afterE = false
+ case c == '.' && !hasDot:
+ hasDot = true
+ case (c == 'e' || c == 'E') && !hasE:
+ hasE = true
+ hasDot = true
+ afterE = true
+ case (c == '+' || c == '-') && afterE:
+ afterE = false
+ default:
+ r.pos += i
+ if !isTokenEnd(c) {
+ r.errSyntax()
+ } else {
+ r.token.byteValue = r.Data[r.start:r.pos]
+ }
+ return
+ }
+ }
+
+ r.pos = len(r.Data)
+ r.token.byteValue = r.Data[r.start:]
+}
+
+// findStringLen tries to scan into the string literal for ending quote char to determine required size.
+// The size will be exact if no escapes are present and may be inexact if there are escaped chars.
+func findStringLen(data []byte) (isValid, hasEscapes bool, length int) {
+ delta := 0
+
+ for i := 0; i < len(data); i++ {
+ switch data[i] {
+ case '\\':
+ i++
+ delta++
+ if i < len(data) && data[i] == 'u' {
+ delta++
+ }
+ case '"':
+ return true, (delta > 0), (i - delta)
+ }
+ }
+
+ return false, false, len(data)
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ var val rune
+ for i := 2; i < len(s) && i < 6; i++ {
+ var v byte
+ c := s[i]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ v = c - '0'
+ case 'a', 'b', 'c', 'd', 'e', 'f':
+ v = c - 'a' + 10
+ case 'A', 'B', 'C', 'D', 'E', 'F':
+ v = c - 'A' + 10
+ default:
+ return -1
+ }
+
+ val <<= 4
+ val |= rune(v)
+ }
+ return val
+}
+
+// processEscape processes a single escape sequence and returns number of bytes processed.
+func (r *Lexer) processEscape(data []byte) (int, error) {
+ if len(data) < 2 {
+ return 0, fmt.Errorf("syntax error at %v", string(data))
+ }
+
+ c := data[1]
+ switch c {
+ case '"', '/', '\\':
+ r.token.byteValue = append(r.token.byteValue, c)
+ return 2, nil
+ case 'b':
+ r.token.byteValue = append(r.token.byteValue, '\b')
+ return 2, nil
+ case 'f':
+ r.token.byteValue = append(r.token.byteValue, '\f')
+ return 2, nil
+ case 'n':
+ r.token.byteValue = append(r.token.byteValue, '\n')
+ return 2, nil
+ case 'r':
+ r.token.byteValue = append(r.token.byteValue, '\r')
+ return 2, nil
+ case 't':
+ r.token.byteValue = append(r.token.byteValue, '\t')
+ return 2, nil
+ case 'u':
+ rr := getu4(data)
+ if rr < 0 {
+ return 0, errors.New("syntax error")
+ }
+
+ read := 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(data[read:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ read += 6
+ rr = dec
+ } else {
+ rr = unicode.ReplacementChar
+ }
+ }
+ var d [4]byte
+ s := utf8.EncodeRune(d[:], rr)
+ r.token.byteValue = append(r.token.byteValue, d[:s]...)
+ return read, nil
+ }
+
+ return 0, errors.New("syntax error")
+}
+
+// fetchString scans a string literal token.
+func (r *Lexer) fetchString() {
+ r.pos++
+ data := r.Data[r.pos:]
+
+ isValid, hasEscapes, length := findStringLen(data)
+ if !isValid {
+ r.pos += length
+ r.errParse("unterminated string literal")
+ return
+ }
+ if !hasEscapes {
+ r.token.byteValue = data[:length]
+ r.pos += length + 1
+ return
+ }
+
+ r.token.byteValue = make([]byte, 0, length)
+ p := 0
+ for i := 0; i < len(data); {
+ switch data[i] {
+ case '"':
+ r.pos += i + 1
+ r.token.byteValue = append(r.token.byteValue, data[p:i]...)
+ i++
+ return
+
+ case '\\':
+ r.token.byteValue = append(r.token.byteValue, data[p:i]...)
+ off, err := r.processEscape(data[i:])
+ if err != nil {
+ r.errParse(err.Error())
+ return
+ }
+ i += off
+ p = i
+
+ default:
+ i++
+ }
+ }
+ r.errParse("unterminated string literal")
+}
+
+// scanToken scans the next token if no token is currently available in the lexer.
+func (r *Lexer) scanToken() {
+ if r.token.kind != tokenUndef || r.fatalError != nil {
+ return
+ }
+
+ r.FetchToken()
+}
+
+// consume resets the current token to allow scanning the next one.
+func (r *Lexer) consume() {
+ r.token.kind = tokenUndef
+ r.token.delimValue = 0
+}
+
+// Ok returns true if no error (including io.EOF) was encountered during scanning.
+func (r *Lexer) Ok() bool {
+ return r.fatalError == nil
+}
+
+const maxErrorContextLen = 13
+
+func (r *Lexer) errParse(what string) {
+ if r.fatalError == nil {
+ var str string
+ if len(r.Data)-r.pos <= maxErrorContextLen {
+ str = string(r.Data)
+ } else {
+ str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..."
+ }
+ r.fatalError = &LexerError{
+ Reason: what,
+ Offset: r.pos,
+ Data: str,
+ }
+ }
+}
+
+func (r *Lexer) errSyntax() {
+ r.errParse("syntax error")
+}
+
+func (r *Lexer) errInvalidToken(expected string) {
+ if r.fatalError != nil {
+ return
+ }
+ if r.UseMultipleErrors {
+ r.pos = r.start
+ r.consume()
+ r.SkipRecursive()
+ switch expected {
+ case "[":
+ r.token.delimValue = ']'
+ r.token.kind = tokenDelim
+ case "{":
+ r.token.delimValue = '}'
+ r.token.kind = tokenDelim
+ }
+ r.addNonfatalError(&LexerError{
+ Reason: fmt.Sprintf("expected %s", expected),
+ Offset: r.start,
+ Data: string(r.Data[r.start:r.pos]),
+ })
+ return
+ }
+
+ var str string
+ if len(r.token.byteValue) <= maxErrorContextLen {
+ str = string(r.token.byteValue)
+ } else {
+ str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..."
+ }
+ r.fatalError = &LexerError{
+ Reason: fmt.Sprintf("expected %s", expected),
+ Offset: r.pos,
+ Data: str,
+ }
+}
+
+func (r *Lexer) GetPos() int {
+ return r.pos
+}
+
+// Delim consumes a token and verifies that it is the given delimiter.
+func (r *Lexer) Delim(c byte) {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+
+ if !r.Ok() || r.token.delimValue != c {
+ r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled.
+ r.errInvalidToken(string([]byte{c}))
+ } else {
+ r.consume()
+ }
+}
+
+// IsDelim returns true if there was no scanning error and next token is the given delimiter.
+func (r *Lexer) IsDelim(c byte) bool {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ return !r.Ok() || r.token.delimValue == c
+}
+
+// Null verifies that the next token is null and consumes it.
+func (r *Lexer) Null() {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenNull {
+ r.errInvalidToken("null")
+ }
+ r.consume()
+}
+
+// IsNull returns true if the next token is a null keyword.
+func (r *Lexer) IsNull() bool {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ return r.Ok() && r.token.kind == tokenNull
+}
+
+// Skip skips a single token.
+func (r *Lexer) Skip() {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ r.consume()
+}
+
+// SkipRecursive skips next array or object completely, or just skips a single token if not
+// an array/object.
+//
+// Note: no syntax validation is performed on the skipped data.
+func (r *Lexer) SkipRecursive() {
+ r.scanToken()
+ var start, end byte
+
+ switch r.token.delimValue {
+ case '{':
+ start, end = '{', '}'
+ case '[':
+ start, end = '[', ']'
+ default:
+ r.consume()
+ return
+ }
+
+ r.consume()
+
+ level := 1
+ inQuotes := false
+ wasEscape := false
+
+ for i, c := range r.Data[r.pos:] {
+ switch {
+ case c == start && !inQuotes:
+ level++
+ case c == end && !inQuotes:
+ level--
+ if level == 0 {
+ r.pos += i + 1
+ return
+ }
+ case c == '\\' && inQuotes:
+ wasEscape = !wasEscape
+ continue
+ case c == '"' && inQuotes:
+ inQuotes = wasEscape
+ case c == '"':
+ inQuotes = true
+ }
+ wasEscape = false
+ }
+ r.pos = len(r.Data)
+ r.fatalError = &LexerError{
+ Reason: "EOF reached while skipping array/object or token",
+ Offset: r.pos,
+ Data: string(r.Data[r.pos:]),
+ }
+}
+
+// Raw fetches the next item recursively as a data slice
+func (r *Lexer) Raw() []byte {
+ r.SkipRecursive()
+ if !r.Ok() {
+ return nil
+ }
+ return r.Data[r.start:r.pos]
+}
+
+// IsStart returns whether the lexer is positioned at the start
+// of an input string.
+func (r *Lexer) IsStart() bool {
+ return r.pos == 0
+}
+
+// Consumed reads all remaining bytes from the input, publishing an error if
+// there is anything but whitespace remaining.
+func (r *Lexer) Consumed() {
+ if r.pos > len(r.Data) || !r.Ok() {
+ return
+ }
+
+ for _, c := range r.Data[r.pos:] {
+ if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+ r.AddError(&LexerError{
+ Reason: "invalid character '" + string(c) + "' after top-level value",
+ Offset: r.pos,
+ Data: string(r.Data[r.pos:]),
+ })
+ return
+ }
+
+ r.pos++
+ r.start++
+ }
+}
+
+func (r *Lexer) unsafeString() (string, []byte) {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenString {
+ r.errInvalidToken("string")
+ return "", nil
+ }
+ bytes := r.token.byteValue
+ ret := bytesToStr(r.token.byteValue)
+ r.consume()
+ return ret, bytes
+}
+
+// UnsafeString returns the string value if the token is a string literal.
+//
+// Warning: returned string may point to the input buffer, so the string should not outlive
+// the input buffer. Intended pattern of usage is as an argument to a switch statement.
+func (r *Lexer) UnsafeString() string {
+ ret, _ := r.unsafeString()
+ return ret
+}
+
+// UnsafeBytes returns the byte slice if the token is a string literal.
+func (r *Lexer) UnsafeBytes() []byte {
+ _, ret := r.unsafeString()
+ return ret
+}
+
+// String reads a string literal.
+func (r *Lexer) String() string {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenString {
+ r.errInvalidToken("string")
+ return ""
+ }
+ ret := string(r.token.byteValue)
+ r.consume()
+ return ret
+}
+
+// Bytes reads a string literal and base64 decodes it into a byte slice.
+func (r *Lexer) Bytes() []byte {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenString {
+ r.errInvalidToken("string")
+ return nil
+ }
+ ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue)))
+ n, err := base64.StdEncoding.Decode(ret, r.token.byteValue)
+ if err != nil {
+ r.fatalError = &LexerError{
+ Reason: err.Error(),
+ }
+ return nil
+ }
+
+ r.consume()
+ return ret[:n]
+}
+
+// Bool reads a true or false boolean keyword.
+func (r *Lexer) Bool() bool {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenBool {
+ r.errInvalidToken("bool")
+ return false
+ }
+ ret := r.token.boolValue
+ r.consume()
+ return ret
+}
+
+func (r *Lexer) number() string {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenNumber {
+ r.errInvalidToken("number")
+ return ""
+ }
+ ret := bytesToStr(r.token.byteValue)
+ r.consume()
+ return ret
+}
+
+func (r *Lexer) Uint8() uint8 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 8)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return uint8(n)
+}
+
+func (r *Lexer) Uint16() uint16 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 16)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return uint16(n)
+}
+
+func (r *Lexer) Uint32() uint32 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return uint32(n)
+}
+
+func (r *Lexer) Uint64() uint64 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return n
+}
+
+func (r *Lexer) Uint() uint {
+ return uint(r.Uint64())
+}
+
+func (r *Lexer) Int8() int8 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 8)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return int8(n)
+}
+
+func (r *Lexer) Int16() int16 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 16)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return int16(n)
+}
+
+func (r *Lexer) Int32() int32 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return int32(n)
+}
+
+func (r *Lexer) Int64() int64 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return n
+}
+
+func (r *Lexer) Int() int {
+ return int(r.Int64())
+}
+
+func (r *Lexer) Uint8Str() uint8 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 8)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return uint8(n)
+}
+
+func (r *Lexer) Uint16Str() uint16 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 16)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return uint16(n)
+}
+
+func (r *Lexer) Uint32Str() uint32 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return uint32(n)
+}
+
+func (r *Lexer) Uint64Str() uint64 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return n
+}
+
+func (r *Lexer) UintStr() uint {
+ return uint(r.Uint64Str())
+}
+
+func (r *Lexer) UintptrStr() uintptr {
+ return uintptr(r.Uint64Str())
+}
+
+func (r *Lexer) Int8Str() int8 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 8)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return int8(n)
+}
+
+func (r *Lexer) Int16Str() int16 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 16)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return int16(n)
+}
+
+func (r *Lexer) Int32Str() int32 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return int32(n)
+}
+
+func (r *Lexer) Int64Str() int64 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return n
+}
+
+func (r *Lexer) IntStr() int {
+ return int(r.Int64Str())
+}
+
+func (r *Lexer) Float32() float32 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseFloat(s, 32)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return float32(n)
+}
+
+func (r *Lexer) Float32Str() float32 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+ n, err := strconv.ParseFloat(s, 32)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return float32(n)
+}
+
+func (r *Lexer) Float64() float64 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return n
+}
+
+func (r *Lexer) Float64Str() float64 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+ n, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return n
+}
+
+func (r *Lexer) Error() error {
+ return r.fatalError
+}
+
+func (r *Lexer) AddError(e error) {
+ if r.fatalError == nil {
+ r.fatalError = e
+ }
+}
+
+func (r *Lexer) AddNonFatalError(e error) {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Data: string(r.Data[r.start:r.pos]),
+ Reason: e.Error(),
+ })
+}
+
+func (r *Lexer) addNonfatalError(err *LexerError) {
+ if r.UseMultipleErrors {
+ // We don't want to add errors with the same offset.
+ if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset {
+ return
+ }
+ r.multipleErrors = append(r.multipleErrors, err)
+ return
+ }
+ r.fatalError = err
+}
+
+func (r *Lexer) GetNonFatalErrors() []*LexerError {
+ return r.multipleErrors
+}
+
+// JsonNumber fetches and json.Number from 'encoding/json' package.
+// Both int, float or string, contains them are valid values
+func (r *Lexer) JsonNumber() json.Number {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() {
+ r.errInvalidToken("json.Number")
+ return json.Number("")
+ }
+
+ switch r.token.kind {
+ case tokenString:
+ return json.Number(r.String())
+ case tokenNumber:
+ return json.Number(r.Raw())
+ case tokenNull:
+ r.Null()
+ return json.Number("")
+ default:
+ r.errSyntax()
+ return json.Number("")
+ }
+}
+
+// Interface fetches an interface{} analogous to the 'encoding/json' package.
+func (r *Lexer) Interface() interface{} {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+
+ if !r.Ok() {
+ return nil
+ }
+ switch r.token.kind {
+ case tokenString:
+ return r.String()
+ case tokenNumber:
+ return r.Float64()
+ case tokenBool:
+ return r.Bool()
+ case tokenNull:
+ r.Null()
+ return nil
+ }
+
+ if r.token.delimValue == '{' {
+ r.consume()
+
+ ret := map[string]interface{}{}
+ for !r.IsDelim('}') {
+ key := r.String()
+ r.WantColon()
+ ret[key] = r.Interface()
+ r.WantComma()
+ }
+ r.Delim('}')
+
+ if r.Ok() {
+ return ret
+ } else {
+ return nil
+ }
+ } else if r.token.delimValue == '[' {
+ r.consume()
+
+ ret := []interface{}{}
+ for !r.IsDelim(']') {
+ ret = append(ret, r.Interface())
+ r.WantComma()
+ }
+ r.Delim(']')
+
+ if r.Ok() {
+ return ret
+ } else {
+ return nil
+ }
+ }
+ r.errSyntax()
+ return nil
+}
+
+// WantComma requires a comma to be present before fetching next token.
+func (r *Lexer) WantComma() {
+ r.wantSep = ','
+ r.firstElement = false
+}
+
+// WantColon requires a colon to be present before fetching next token.
+func (r *Lexer) WantColon() {
+ r.wantSep = ':'
+ r.firstElement = false
+}
diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go
new file mode 100644
index 0000000..b9ed7cc
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go
@@ -0,0 +1,390 @@
+// Package jwriter contains a JSON writer.
+package jwriter
+
+import (
+ "io"
+ "strconv"
+ "unicode/utf8"
+
+ "github.com/mailru/easyjson/buffer"
+)
+
+// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but
+// Flags field in Writer is used to set and pass them around.
+type Flags int
+
+const (
+ NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'.
+ NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'.
+)
+
+// Writer is a JSON writer.
+type Writer struct {
+ Flags Flags
+
+ Error error
+ Buffer buffer.Buffer
+ NoEscapeHTML bool
+}
+
+// Size returns the size of the data that was written out.
+func (w *Writer) Size() int {
+ return w.Buffer.Size()
+}
+
+// DumpTo outputs the data to given io.Writer, resetting the buffer.
+func (w *Writer) DumpTo(out io.Writer) (written int, err error) {
+ return w.Buffer.DumpTo(out)
+}
+
+// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice
+// as argument that it will try to reuse.
+func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) {
+ if w.Error != nil {
+ return nil, w.Error
+ }
+
+ return w.Buffer.BuildBytes(reuse...), nil
+}
+
+// ReadCloser returns an io.ReadCloser that can be used to read the data.
+// ReadCloser also resets the buffer.
+func (w *Writer) ReadCloser() (io.ReadCloser, error) {
+ if w.Error != nil {
+ return nil, w.Error
+ }
+
+ return w.Buffer.ReadCloser(), nil
+}
+
+// RawByte appends raw binary data to the buffer.
+func (w *Writer) RawByte(c byte) {
+ w.Buffer.AppendByte(c)
+}
+
+// RawByte appends raw binary data to the buffer.
+func (w *Writer) RawString(s string) {
+ w.Buffer.AppendString(s)
+}
+
+// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for
+// calling with results of MarshalJSON-like functions.
+func (w *Writer) Raw(data []byte, err error) {
+ switch {
+ case w.Error != nil:
+ return
+ case err != nil:
+ w.Error = err
+ case len(data) > 0:
+ w.Buffer.AppendBytes(data)
+ default:
+ w.RawString("null")
+ }
+}
+
+// RawText encloses raw binary data in quotes and appends in to the buffer.
+// Useful for calling with results of MarshalText-like functions.
+func (w *Writer) RawText(data []byte, err error) {
+ switch {
+ case w.Error != nil:
+ return
+ case err != nil:
+ w.Error = err
+ case len(data) > 0:
+ w.String(string(data))
+ default:
+ w.RawString("null")
+ }
+}
+
+// Base64Bytes appends data to the buffer after base64 encoding it
+func (w *Writer) Base64Bytes(data []byte) {
+ if data == nil {
+ w.Buffer.AppendString("null")
+ return
+ }
+ w.Buffer.AppendByte('"')
+ w.base64(data)
+ w.Buffer.AppendByte('"')
+}
+
+func (w *Writer) Uint8(n uint8) {
+ w.Buffer.EnsureSpace(3)
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint16(n uint16) {
+ w.Buffer.EnsureSpace(5)
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint32(n uint32) {
+ w.Buffer.EnsureSpace(10)
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint(n uint) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint64(n uint64) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
+}
+
+func (w *Writer) Int8(n int8) {
+ w.Buffer.EnsureSpace(4)
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int16(n int16) {
+ w.Buffer.EnsureSpace(6)
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int32(n int32) {
+ w.Buffer.EnsureSpace(11)
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int(n int) {
+ w.Buffer.EnsureSpace(21)
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int64(n int64) {
+ w.Buffer.EnsureSpace(21)
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
+}
+
+func (w *Writer) Uint8Str(n uint8) {
+ w.Buffer.EnsureSpace(3)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Uint16Str(n uint16) {
+ w.Buffer.EnsureSpace(5)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Uint32Str(n uint32) {
+ w.Buffer.EnsureSpace(10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) UintStr(n uint) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Uint64Str(n uint64) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) UintptrStr(n uintptr) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int8Str(n int8) {
+ w.Buffer.EnsureSpace(4)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int16Str(n int16) {
+ w.Buffer.EnsureSpace(6)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int32Str(n int32) {
+ w.Buffer.EnsureSpace(11)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) IntStr(n int) {
+ w.Buffer.EnsureSpace(21)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int64Str(n int64) {
+ w.Buffer.EnsureSpace(21)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Float32(n float32) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
+}
+
+func (w *Writer) Float32Str(n float32) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Float64(n float64) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64)
+}
+
+func (w *Writer) Float64Str(n float64) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Bool(v bool) {
+ w.Buffer.EnsureSpace(5)
+ if v {
+ w.Buffer.Buf = append(w.Buffer.Buf, "true"...)
+ } else {
+ w.Buffer.Buf = append(w.Buffer.Buf, "false"...)
+ }
+}
+
+const chars = "0123456789abcdef"
+
+func isNotEscapedSingleChar(c byte, escapeHTML bool) bool {
+ // Note: might make sense to use a table if there are more chars to escape. With 4 chars
+ // it benchmarks the same.
+ if escapeHTML {
+ return c != '<' && c != '>' && c != '&' && c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
+ } else {
+ return c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
+ }
+}
+
+func (w *Writer) String(s string) {
+ w.Buffer.AppendByte('"')
+
+ // Portions of the string that contain no escapes are appended as
+ // byte slices.
+
+ p := 0 // last non-escape symbol
+
+ for i := 0; i < len(s); {
+ c := s[i]
+
+ if isNotEscapedSingleChar(c, !w.NoEscapeHTML) {
+ // single-width character, no escaping is required
+ i++
+ continue
+ } else if c < utf8.RuneSelf {
+ // single-with character, need to escape
+ w.Buffer.AppendString(s[p:i])
+ switch c {
+ case '\t':
+ w.Buffer.AppendString(`\t`)
+ case '\r':
+ w.Buffer.AppendString(`\r`)
+ case '\n':
+ w.Buffer.AppendString(`\n`)
+ case '\\':
+ w.Buffer.AppendString(`\\`)
+ case '"':
+ w.Buffer.AppendString(`\"`)
+ default:
+ w.Buffer.AppendString(`\u00`)
+ w.Buffer.AppendByte(chars[c>>4])
+ w.Buffer.AppendByte(chars[c&0xf])
+ }
+
+ i++
+ p = i
+ continue
+ }
+
+ // broken utf
+ runeValue, runeWidth := utf8.DecodeRuneInString(s[i:])
+ if runeValue == utf8.RuneError && runeWidth == 1 {
+ w.Buffer.AppendString(s[p:i])
+ w.Buffer.AppendString(`\ufffd`)
+ i++
+ p = i
+ continue
+ }
+
+ // jsonp stuff - tab separator and line separator
+ if runeValue == '\u2028' || runeValue == '\u2029' {
+ w.Buffer.AppendString(s[p:i])
+ w.Buffer.AppendString(`\u202`)
+ w.Buffer.AppendByte(chars[runeValue&0xf])
+ i += runeWidth
+ p = i
+ continue
+ }
+ i += runeWidth
+ }
+ w.Buffer.AppendString(s[p:])
+ w.Buffer.AppendByte('"')
+}
+
+const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
+const padChar = '='
+
+func (w *Writer) base64(in []byte) {
+
+ if len(in) == 0 {
+ return
+ }
+
+ w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4)
+
+ si := 0
+ n := (len(in) / 3) * 3
+
+ for si < n {
+ // Convert 3x 8bit source bytes into 4 bytes
+ val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2])
+
+ w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F])
+
+ si += 3
+ }
+
+ remain := len(in) - si
+ if remain == 0 {
+ return
+ }
+
+ // Add the remaining small block
+ val := uint(in[si+0]) << 16
+ if remain == 2 {
+ val |= uint(in[si+1]) << 8
+ }
+
+ w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F])
+
+ switch remain {
+ case 2:
+ w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar))
+ case 1:
+ w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar))
+ }
+}
diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE
new file mode 100644
index 0000000..c67dad6
--- /dev/null
+++ b/vendor/github.com/pmezard/go-difflib/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013, Patrick Mezard
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+ The names of its contributors may not be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
new file mode 100644
index 0000000..003e99f
--- /dev/null
+++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
@@ -0,0 +1,772 @@
+// Package difflib is a partial port of Python difflib module.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// The following class and functions have been ported:
+//
+// - SequenceMatcher
+//
+// - unified_diff
+//
+// - context_diff
+//
+// Getting unified diffs was the main goal of the port. Keep in mind this code
+// is mostly suitable to output text differences in a human friendly way, there
+// are no guarantees generated diffs are consumable by patch(1).
+package difflib
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func calculateRatio(matches, length int) float64 {
+ if length > 0 {
+ return 2.0 * float64(matches) / float64(length)
+ }
+ return 1.0
+}
+
+type Match struct {
+ A int
+ B int
+ Size int
+}
+
+type OpCode struct {
+ Tag byte
+ I1 int
+ I2 int
+ J1 int
+ J2 int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching". The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk). The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence. This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence. That's what
+// catches peoples' eyes. The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff. This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "" lines in HTML files). That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" .
+//
+// Timing: Basic R-O is cubic time worst case and quadratic time expected
+// case. SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+ a []string
+ b []string
+ b2j map[string][]int
+ IsJunk func(string) bool
+ autoJunk bool
+ bJunk map[string]struct{}
+ matchingBlocks []Match
+ fullBCount map[string]int
+ bPopular map[string]struct{}
+ opCodes []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+ m := SequenceMatcher{autoJunk: true}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+ isJunk func(string) bool) *SequenceMatcher {
+
+ m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+ m.SetSeq1(a)
+ m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+ if &a == &m.a {
+ return
+ }
+ m.a = a
+ m.matchingBlocks = nil
+ m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+ if &b == &m.b {
+ return
+ }
+ m.b = b
+ m.matchingBlocks = nil
+ m.opCodes = nil
+ m.fullBCount = nil
+ m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+ // Populate line -> index mapping
+ b2j := map[string][]int{}
+ for i, s := range m.b {
+ indices := b2j[s]
+ indices = append(indices, i)
+ b2j[s] = indices
+ }
+
+ // Purge junk elements
+ m.bJunk = map[string]struct{}{}
+ if m.IsJunk != nil {
+ junk := m.bJunk
+ for s, _ := range b2j {
+ if m.IsJunk(s) {
+ junk[s] = struct{}{}
+ }
+ }
+ for s, _ := range junk {
+ delete(b2j, s)
+ }
+ }
+
+ // Purge remaining popular elements
+ popular := map[string]struct{}{}
+ n := len(m.b)
+ if m.autoJunk && n >= 200 {
+ ntest := n/100 + 1
+ for s, indices := range b2j {
+ if len(indices) > ntest {
+ popular[s] = struct{}{}
+ }
+ }
+ for s, _ := range popular {
+ delete(b2j, s)
+ }
+ }
+ m.bPopular = popular
+ m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+ _, ok := m.bJunk[s]
+ return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+// alo <= i <= i+k <= ahi
+// blo <= j <= j+k <= bhi
+// and for all (i',j',k') meeting those conditions,
+// k >= k'
+// i <= i'
+// and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block. Then that block is extended as
+// far as possible by matching (only) junk elements on both sides. So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+ // CAUTION: stripping common prefix or suffix would be incorrect.
+ // E.g.,
+ // ab
+ // acab
+ // Longest matching block is "ab", but if common prefix is
+ // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
+ // strip, so ends up claiming that ab is changed to acab by
+ // inserting "ca" in the middle. That's minimal but unintuitive:
+ // "it's obvious" that someone inserted "ac" at the front.
+ // Windiff ends up at the same place as diff, but by pairing up
+ // the unique 'b's and then matching the first two 'a's.
+ besti, bestj, bestsize := alo, blo, 0
+
+ // find longest junk-free match
+ // during an iteration of the loop, j2len[j] = length of longest
+ // junk-free match ending with a[i-1] and b[j]
+ j2len := map[int]int{}
+ for i := alo; i != ahi; i++ {
+ // look at all instances of a[i] in b; note that because
+ // b2j has no junk keys, the loop is skipped if a[i] is junk
+ newj2len := map[int]int{}
+ for _, j := range m.b2j[m.a[i]] {
+ // a[i] matches b[j]
+ if j < blo {
+ continue
+ }
+ if j >= bhi {
+ break
+ }
+ k := j2len[j-1] + 1
+ newj2len[j] = k
+ if k > bestsize {
+ besti, bestj, bestsize = i-k+1, j-k+1, k
+ }
+ }
+ j2len = newj2len
+ }
+
+ // Extend the best by non-junk elements on each end. In particular,
+ // "popular" non-junk elements aren't in b2j, which greatly speeds
+ // the inner loop above, but also means "the best" match so far
+ // doesn't contain any junk *or* popular non-junk elements.
+ for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ !m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize += 1
+ }
+
+ // Now that we have a wholly interesting match (albeit possibly
+ // empty!), we may as well suck up the matching junk on each
+ // side of it too. Can't think of a good reason not to, and it
+ // saves post-processing the (possibly considerable) expense of
+ // figuring out what to do with it. In the case of an empty
+ // interesting match, this is clearly the right thing to do,
+ // because no other kind of match is possible in the regions.
+ for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize += 1
+ }
+
+ return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+ if m.matchingBlocks != nil {
+ return m.matchingBlocks
+ }
+
+ var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+ matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+ match := m.findLongestMatch(alo, ahi, blo, bhi)
+ i, j, k := match.A, match.B, match.Size
+ if match.Size > 0 {
+ if alo < i && blo < j {
+ matched = matchBlocks(alo, i, blo, j, matched)
+ }
+ matched = append(matched, match)
+ if i+k < ahi && j+k < bhi {
+ matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+ }
+ }
+ return matched
+ }
+ matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+ // It's possible that we have adjacent equal blocks in the
+ // matching_blocks list now.
+ nonAdjacent := []Match{}
+ i1, j1, k1 := 0, 0, 0
+ for _, b := range matched {
+ // Is this block adjacent to i1, j1, k1?
+ i2, j2, k2 := b.A, b.B, b.Size
+ if i1+k1 == i2 && j1+k1 == j2 {
+ // Yes, so collapse them -- this just increases the length of
+ // the first block by the length of the second, and the first
+ // block so lengthened remains the block to compare against.
+ k1 += k2
+ } else {
+ // Not adjacent. Remember the first block (k1==0 means it's
+ // the dummy we started with), and make the second block the
+ // new block to compare against.
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+ i1, j1, k1 = i2, j2, k2
+ }
+ }
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+
+ nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+ m.matchingBlocks = nonAdjacent
+ return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal): a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+ if m.opCodes != nil {
+ return m.opCodes
+ }
+ i, j := 0, 0
+ matching := m.GetMatchingBlocks()
+ opCodes := make([]OpCode, 0, len(matching))
+ for _, m := range matching {
+ // invariant: we've pumped out correct diffs to change
+ // a[:i] into b[:j], and the next matching block is
+ // a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+ // out a diff to change a[i:ai] into b[j:bj], pump out
+ // the matching block, and move (i,j) beyond the match
+ ai, bj, size := m.A, m.B, m.Size
+ tag := byte(0)
+ if i < ai && j < bj {
+ tag = 'r'
+ } else if i < ai {
+ tag = 'd'
+ } else if j < bj {
+ tag = 'i'
+ }
+ if tag > 0 {
+ opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+ }
+ i, j = ai+size, bj+size
+ // the list of matching blocks is terminated by a
+ // sentinel with size 0
+ if size > 0 {
+ opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+ }
+ }
+ m.opCodes = opCodes
+ return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+ if n < 0 {
+ n = 3
+ }
+ codes := m.GetOpCodes()
+ if len(codes) == 0 {
+ codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
+ }
+ // Fixup leading and trailing groups if they show no changes.
+ if codes[0].Tag == 'e' {
+ c := codes[0]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
+ }
+ if codes[len(codes)-1].Tag == 'e' {
+ c := codes[len(codes)-1]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
+ }
+ nn := n + n
+ groups := [][]OpCode{}
+ group := []OpCode{}
+ for _, c := range codes {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ // End the current group and start a new one whenever
+ // there is a large range with no changes.
+ if c.Tag == 'e' && i2-i1 > nn {
+ group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
+ j1, min(j2, j1+n)})
+ groups = append(groups, group)
+ group = []OpCode{}
+ i1, j1 = max(i1, i2-n), max(j1, j2-n)
+ }
+ group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+ }
+ if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+ groups = append(groups, group)
+ }
+ return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+ matches := 0
+ for _, m := range m.GetMatchingBlocks() {
+ matches += m.Size
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+ // viewing a and b as multisets, set matches to the cardinality
+ // of their intersection; this counts the number of matches
+ // without regard to order, so is clearly an upper bound
+ if m.fullBCount == nil {
+ m.fullBCount = map[string]int{}
+ for _, s := range m.b {
+ m.fullBCount[s] = m.fullBCount[s] + 1
+ }
+ }
+
+ // avail[x] is the number of times x appears in 'b' less the
+ // number of times we've seen it in 'a' so far ... kinda
+ avail := map[string]int{}
+ matches := 0
+ for _, s := range m.a {
+ n, ok := avail[s]
+ if !ok {
+ n = m.fullBCount[s]
+ }
+ avail[s] = n - 1
+ if n > 0 {
+ matches += 1
+ }
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+ la, lb := len(m.a), len(m.b)
+ return calculateRatio(min(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ if length == 0 {
+ beginning -= 1 // empty ranges begin at line just before the range
+ }
+ return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+ A []string // First sequence lines
+ FromFile string // First file name
+ FromDate string // First file time
+ B []string // Second sequence lines
+ ToFile string // Second file name
+ ToDate string // Second file time
+ Eol string // Headers end of line, defaults to LF
+ Context int // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline. This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times. Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ wf := func(format string, args ...interface{}) error {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ return err
+ }
+ ws := func(s string) error {
+ _, err := buf.WriteString(s)
+ return err
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ first, last := g[0], g[len(g)-1]
+ range1 := formatRangeUnified(first.I1, last.I2)
+ range2 := formatRangeUnified(first.J1, last.J2)
+ if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+ return err
+ }
+ for _, c := range g {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ if c.Tag == 'e' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws(" " + line); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws("-" + line); err != nil {
+ return err
+ }
+ }
+ }
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, line := range diff.B[j1:j2] {
+ if err := ws("+" + line); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteUnifiedDiff(w, diff)
+ return string(w.Bytes()), err
+}
+
+// Convert range to the "ed" format.
+func formatRangeContext(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 0 {
+ beginning -= 1 // empty ranges begin at line just before the range
+ }
+ if length <= 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
+}
+
+type ContextDiff UnifiedDiff
+
+// Compare two sequences of lines; generate the delta as a context diff.
+//
+// Context diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by diff.Context
+// which defaults to three.
+//
+// By default, the diff control lines (those with *** or ---) are
+// created with a trailing newline.
+//
+// For inputs that do not have trailing newlines, set the diff.Eol
+// argument to "" so that the output will be uniformly newline free.
+//
+// The context diff format normally has a header for filenames and
+// modification times. Any or all of these may be specified using
+// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
+// The modification times are normally expressed in the ISO 8601 format.
+// If not specified, the strings default to blanks.
+func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ var diffErr error
+ wf := func(format string, args ...interface{}) {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ if diffErr == nil && err != nil {
+ diffErr = err
+ }
+ }
+ ws := func(s string) {
+ _, err := buf.WriteString(s)
+ if diffErr == nil && err != nil {
+ diffErr = err
+ }
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ prefix := map[byte]string{
+ 'i': "+ ",
+ 'd': "- ",
+ 'r': "! ",
+ 'e': " ",
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
+ }
+ }
+
+ first, last := g[0], g[len(g)-1]
+ ws("***************" + diff.Eol)
+
+ range1 := formatRangeContext(first.I1, last.I2)
+ wf("*** %s ****%s", range1, diff.Eol)
+ for _, c := range g {
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, cc := range g {
+ if cc.Tag == 'i' {
+ continue
+ }
+ for _, line := range diff.A[cc.I1:cc.I2] {
+ ws(prefix[cc.Tag] + line)
+ }
+ }
+ break
+ }
+ }
+
+ range2 := formatRangeContext(first.J1, last.J2)
+ wf("--- %s ----%s", range2, diff.Eol)
+ for _, c := range g {
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, cc := range g {
+ if cc.Tag == 'd' {
+ continue
+ }
+ for _, line := range diff.B[cc.J1:cc.J2] {
+ ws(prefix[cc.Tag] + line)
+ }
+ }
+ break
+ }
+ }
+ }
+ return diffErr
+}
+
+// Like WriteContextDiff but returns the diff a string.
+func GetContextDiffString(diff ContextDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteContextDiff(w, diff)
+ return string(w.Bytes()), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+ lines := strings.SplitAfter(s, "\n")
+ lines[len(lines)-1] += "\n"
+ return lines
+}
diff --git a/vendor/github.com/radovskyb/watcher/.travis.yml b/vendor/github.com/radovskyb/watcher/.travis.yml
new file mode 100644
index 0000000..3f478f1
--- /dev/null
+++ b/vendor/github.com/radovskyb/watcher/.travis.yml
@@ -0,0 +1,4 @@
+language: go
+go:
+ - 1.7
+ - tip
\ No newline at end of file
diff --git a/vendor/github.com/radovskyb/watcher/LICENSE b/vendor/github.com/radovskyb/watcher/LICENSE
new file mode 100644
index 0000000..92ef0e9
--- /dev/null
+++ b/vendor/github.com/radovskyb/watcher/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2016, Benjamin Radovsky.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of watcher nor the names of its contributors may be used to
+ endorse or promote products derived from this software without specific prior
+ written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/radovskyb/watcher/README.md b/vendor/github.com/radovskyb/watcher/README.md
new file mode 100644
index 0000000..1c822e2
--- /dev/null
+++ b/vendor/github.com/radovskyb/watcher/README.md
@@ -0,0 +1,181 @@
+# watcher
+
+[![Build Status](https://travis-ci.org/radovskyb/watcher.svg?branch=master)](https://travis-ci.org/radovskyb/watcher)
+
+`watcher` is a Go package for watching for files or directory changes (recursively or non recursively) without using filesystem events, which allows it to work cross platform consistently.
+
+`watcher` watches for changes and notifies over channels either anytime an event or an error has occurred.
+
+Events contain the `os.FileInfo` of the file or directory that the event is based on and the type of event and file or directory path.
+
+[Installation](#installation)
+[Features](#features)
+[Example](#example)
+[Contributing](#contributing)
+[Watcher Command](#command)
+
+# Update
+- Event.OldPath has been added [Aug 17, 2019]
+- Added new file filter hooks (Including a built in regexp filtering hook) [Dec 12, 2018]
+- Event.Path for Rename and Move events is now returned in the format of `fromPath -> toPath`
+
+#### Chmod event is not supported under windows.
+
+# Installation
+
+```shell
+go get -u github.com/radovskyb/watcher/...
+```
+
+# Features
+
+- Customizable polling interval.
+- Filter Events.
+- Watch folders recursively or non-recursively.
+- Choose to ignore hidden files.
+- Choose to ignore specified files and folders.
+- Notifies the `os.FileInfo` of the file that the event is based on. e.g `Name`, `ModTime`, `IsDir`, etc.
+- Notifies the full path of the file that the event is based on or the old and new paths if the event was a `Rename` or `Move` event.
+- Limit amount of events that can be received per watching cycle.
+- List the files being watched.
+- Trigger custom events.
+
+# Todo
+
+- Write more tests.
+- Write benchmarks.
+
+# Example
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+ "time"
+
+ "github.com/radovskyb/watcher"
+)
+
+func main() {
+ w := watcher.New()
+
+ // SetMaxEvents to 1 to allow at most 1 event's to be received
+ // on the Event channel per watching cycle.
+ //
+ // If SetMaxEvents is not set, the default is to send all events.
+ w.SetMaxEvents(1)
+
+ // Only notify rename and move events.
+ w.FilterOps(watcher.Rename, watcher.Move)
+
+ // Only files that match the regular expression during file listings
+ // will be watched.
+ r := regexp.MustCompile("^abc$")
+ w.AddFilterHook(watcher.RegexFilterHook(r, false))
+
+ go func() {
+ for {
+ select {
+ case event := <-w.Event:
+ fmt.Println(event) // Print the event's info.
+ case err := <-w.Error:
+ log.Fatalln(err)
+ case <-w.Closed:
+ return
+ }
+ }
+ }()
+
+ // Watch this folder for changes.
+ if err := w.Add("."); err != nil {
+ log.Fatalln(err)
+ }
+
+ // Watch test_folder recursively for changes.
+ if err := w.AddRecursive("../test_folder"); err != nil {
+ log.Fatalln(err)
+ }
+
+ // Print a list of all of the files and folders currently
+ // being watched and their paths.
+ for path, f := range w.WatchedFiles() {
+ fmt.Printf("%s: %s\n", path, f.Name())
+ }
+
+ fmt.Println()
+
+ // Trigger 2 events after watcher started.
+ go func() {
+ w.Wait()
+ w.TriggerEvent(watcher.Create, nil)
+ w.TriggerEvent(watcher.Remove, nil)
+ }()
+
+ // Start the watching process - it'll check for changes every 100ms.
+ if err := w.Start(time.Millisecond * 100); err != nil {
+ log.Fatalln(err)
+ }
+}
+```
+
+# Contributing
+If you would ike to contribute, simply submit a pull request.
+
+# Command
+
+`watcher` comes with a simple command which is installed when using the `go get` command from above.
+
+# Usage
+
+```
+Usage of watcher:
+ -cmd string
+ command to run when an event occurs
+ -dotfiles
+ watch dot files (default true)
+ -ignore string
+ comma separated list of paths to ignore
+ -interval string
+ watcher poll interval (default "100ms")
+ -keepalive
+ keep alive when a cmd returns code != 0
+ -list
+ list watched files on start
+ -pipe
+ pipe event's info to command's stdin
+ -recursive
+ watch folders recursively (default true)
+ -startcmd
+ run the command when watcher starts
+```
+
+All of the flags are optional and watcher can also be called by itself:
+```shell
+watcher
+```
+(watches the current directory recursively for changes and notifies any events that occur.)
+
+A more elaborate example using the `watcher` command:
+```shell
+watcher -dotfiles=false -recursive=false -cmd="./myscript" main.go ../
+```
+In this example, `watcher` will ignore dot files and folders and won't watch any of the specified folders recursively. It will also run the script `./myscript` anytime an event occurs while watching `main.go` or any files or folders in the previous directory (`../`).
+
+Using the `pipe` and `cmd` flags together will send the event's info to the command's stdin when changes are detected.
+
+First create a file called `script.py` with the following contents:
+```python
+import sys
+
+for line in sys.stdin:
+ print (line + " - python")
+```
+
+Next, start watcher with the `pipe` and `cmd` flags enabled:
+```shell
+watcher -cmd="python script.py" -pipe=true
+```
+
+Now when changes are detected, the event's info will be output from the running python script.
diff --git a/vendor/github.com/radovskyb/watcher/ishidden.go b/vendor/github.com/radovskyb/watcher/ishidden.go
new file mode 100644
index 0000000..0f242e8
--- /dev/null
+++ b/vendor/github.com/radovskyb/watcher/ishidden.go
@@ -0,0 +1,12 @@
+// +build !windows
+
+package watcher
+
+import (
+ "path/filepath"
+ "strings"
+)
+
+func isHiddenFile(path string) (bool, error) {
+ return strings.HasPrefix(filepath.Base(path), "."), nil
+}
diff --git a/vendor/github.com/radovskyb/watcher/ishidden_windows.go b/vendor/github.com/radovskyb/watcher/ishidden_windows.go
new file mode 100644
index 0000000..306c6b7
--- /dev/null
+++ b/vendor/github.com/radovskyb/watcher/ishidden_windows.go
@@ -0,0 +1,21 @@
+// +build windows
+
+package watcher
+
+import (
+ "syscall"
+)
+
+func isHiddenFile(path string) (bool, error) {
+ pointer, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return false, err
+ }
+
+ attributes, err := syscall.GetFileAttributes(pointer)
+ if err != nil {
+ return false, err
+ }
+
+ return attributes&syscall.FILE_ATTRIBUTE_HIDDEN != 0, nil
+}
diff --git a/vendor/github.com/radovskyb/watcher/samefile.go b/vendor/github.com/radovskyb/watcher/samefile.go
new file mode 100644
index 0000000..5968bb6
--- /dev/null
+++ b/vendor/github.com/radovskyb/watcher/samefile.go
@@ -0,0 +1,9 @@
+// +build !windows
+
+package watcher
+
+import "os"
+
+func sameFile(fi1, fi2 os.FileInfo) bool {
+ return os.SameFile(fi1, fi2)
+}
diff --git a/vendor/github.com/radovskyb/watcher/samefile_windows.go b/vendor/github.com/radovskyb/watcher/samefile_windows.go
new file mode 100644
index 0000000..7785a12
--- /dev/null
+++ b/vendor/github.com/radovskyb/watcher/samefile_windows.go
@@ -0,0 +1,12 @@
+// +build windows
+
+package watcher
+
+import "os"
+
+func sameFile(fi1, fi2 os.FileInfo) bool {
+ return fi1.ModTime() == fi2.ModTime() &&
+ fi1.Size() == fi2.Size() &&
+ fi1.Mode() == fi2.Mode() &&
+ fi1.IsDir() == fi2.IsDir()
+}
diff --git a/vendor/github.com/radovskyb/watcher/watcher.go b/vendor/github.com/radovskyb/watcher/watcher.go
new file mode 100644
index 0000000..4da4dfe
--- /dev/null
+++ b/vendor/github.com/radovskyb/watcher/watcher.go
@@ -0,0 +1,715 @@
+package watcher
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ // ErrDurationTooShort occurs when calling the watcher's Start
+ // method with a duration that's less than 1 nanosecond.
+ ErrDurationTooShort = errors.New("error: duration is less than 1ns")
+
+ // ErrWatcherRunning occurs when trying to call the watcher's
+ // Start method and the polling cycle is still already running
+ // from previously calling Start and not yet calling Close.
+ ErrWatcherRunning = errors.New("error: watcher is already running")
+
+ // ErrWatchedFileDeleted is an error that occurs when a file or folder that was
+ // being watched has been deleted.
+ ErrWatchedFileDeleted = errors.New("error: watched file or folder deleted")
+
+ // ErrSkip is less of an error, but more of a way for path hooks to skip a file or
+ // directory.
+ ErrSkip = errors.New("error: skipping file")
+)
+
+// An Op is a type that is used to describe what type
+// of event has occurred during the watching process.
+type Op uint32
+
+// Ops
+const (
+ Create Op = iota
+ Write
+ Remove
+ Rename
+ Chmod
+ Move
+)
+
+var ops = map[Op]string{
+ Create: "CREATE",
+ Write: "WRITE",
+ Remove: "REMOVE",
+ Rename: "RENAME",
+ Chmod: "CHMOD",
+ Move: "MOVE",
+}
+
+// String prints the string version of the Op consts
+func (e Op) String() string {
+ if op, found := ops[e]; found {
+ return op
+ }
+ return "???"
+}
+
+// An Event describes an event that is received when files or directory
+// changes occur. It includes the os.FileInfo of the changed file or
+// directory and the type of event that's occurred and the full path of the file.
+type Event struct {
+ Op
+ Path string
+ OldPath string
+ os.FileInfo
+}
+
+// String returns a string depending on what type of event occurred and the
+// file name associated with the event.
+func (e Event) String() string {
+ if e.FileInfo == nil {
+ return "???"
+ }
+
+ pathType := "FILE"
+ if e.IsDir() {
+ pathType = "DIRECTORY"
+ }
+ return fmt.Sprintf("%s %q %s [%s]", pathType, e.Name(), e.Op, e.Path)
+}
+
+// FilterFileHookFunc is a function that is called to filter files during listings.
+// If a file is ok to be listed, nil is returned otherwise ErrSkip is returned.
+type FilterFileHookFunc func(info os.FileInfo, fullPath string) error
+
+// RegexFilterHook is a function that accepts or rejects a file
+// for listing based on whether it's filename or full path matches
+// a regular expression.
+func RegexFilterHook(r *regexp.Regexp, useFullPath bool) FilterFileHookFunc {
+ return func(info os.FileInfo, fullPath string) error {
+ str := info.Name()
+
+ if useFullPath {
+ str = fullPath
+ }
+
+ // Match
+ if r.MatchString(str) {
+ return nil
+ }
+
+ // No match.
+ return ErrSkip
+ }
+}
+
+// Watcher describes a process that watches files for changes.
+type Watcher struct {
+ Event chan Event
+ Error chan error
+ Closed chan struct{}
+ close chan struct{}
+ wg *sync.WaitGroup
+
+ // mu protects the following.
+ mu *sync.Mutex
+ ffh []FilterFileHookFunc
+ running bool
+ names map[string]bool // bool for recursive or not.
+ files map[string]os.FileInfo // map of files.
+ ignored map[string]struct{} // ignored files or directories.
+ ops map[Op]struct{} // Op filtering.
+ ignoreHidden bool // ignore hidden files or not.
+ maxEvents int // max sent events per cycle
+}
+
+// New creates a new Watcher.
+func New() *Watcher {
+ // Set up the WaitGroup for w.Wait().
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ return &Watcher{
+ Event: make(chan Event),
+ Error: make(chan error),
+ Closed: make(chan struct{}),
+ close: make(chan struct{}),
+ mu: new(sync.Mutex),
+ wg: &wg,
+ files: make(map[string]os.FileInfo),
+ ignored: make(map[string]struct{}),
+ names: make(map[string]bool),
+ }
+}
+
+// SetMaxEvents controls the maximum amount of events that are sent on
+// the Event channel per watching cycle. If max events is less than 1, there is
+// no limit, which is the default.
+func (w *Watcher) SetMaxEvents(delta int) {
+ w.mu.Lock()
+ w.maxEvents = delta
+ w.mu.Unlock()
+}
+
+// AddFilterHook
+func (w *Watcher) AddFilterHook(f FilterFileHookFunc) {
+ w.mu.Lock()
+ w.ffh = append(w.ffh, f)
+ w.mu.Unlock()
+}
+
+// IgnoreHiddenFiles sets the watcher to ignore any file or directory
+// that starts with a dot.
+func (w *Watcher) IgnoreHiddenFiles(ignore bool) {
+ w.mu.Lock()
+ w.ignoreHidden = ignore
+ w.mu.Unlock()
+}
+
+// FilterOps filters which event op types should be returned
+// when an event occurs.
+func (w *Watcher) FilterOps(ops ...Op) {
+ w.mu.Lock()
+ w.ops = make(map[Op]struct{})
+ for _, op := range ops {
+ w.ops[op] = struct{}{}
+ }
+ w.mu.Unlock()
+}
+
+// Add adds either a single file or directory to the file list.
+func (w *Watcher) Add(name string) (err error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ name, err = filepath.Abs(name)
+ if err != nil {
+ return err
+ }
+
+ // If name is on the ignored list or if hidden files are
+ // ignored and name is a hidden file or directory, simply return.
+ _, ignored := w.ignored[name]
+
+ isHidden, err := isHiddenFile(name)
+ if err != nil {
+ return err
+ }
+
+ if ignored || (w.ignoreHidden && isHidden) {
+ return nil
+ }
+
+ // Add the directory's contents to the files list.
+ fileList, err := w.list(name)
+ if err != nil {
+ return err
+ }
+ for k, v := range fileList {
+ w.files[k] = v
+ }
+
+ // Add the name to the names list.
+ w.names[name] = false
+
+ return nil
+}
+
+func (w *Watcher) list(name string) (map[string]os.FileInfo, error) {
+ fileList := make(map[string]os.FileInfo)
+
+ // Make sure name exists.
+ stat, err := os.Stat(name)
+ if err != nil {
+ return nil, err
+ }
+
+ fileList[name] = stat
+
+ // If it's not a directory, just return.
+ if !stat.IsDir() {
+ return fileList, nil
+ }
+
+ // It's a directory.
+ fInfoList, err := ioutil.ReadDir(name)
+ if err != nil {
+ return nil, err
+ }
+ // Add all of the files in the directory to the file list as long
+ // as they aren't on the ignored list or are hidden files if ignoreHidden
+ // is set to true.
+outer:
+ for _, fInfo := range fInfoList {
+ path := filepath.Join(name, fInfo.Name())
+ _, ignored := w.ignored[path]
+
+ isHidden, err := isHiddenFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ if ignored || (w.ignoreHidden && isHidden) {
+ continue
+ }
+
+ for _, f := range w.ffh {
+ err := f(fInfo, path)
+ if err == ErrSkip {
+ continue outer
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ fileList[path] = fInfo
+ }
+ return fileList, nil
+}
+
+// AddRecursive adds either a single file or directory recursively to the file list.
+func (w *Watcher) AddRecursive(name string) (err error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ name, err = filepath.Abs(name)
+ if err != nil {
+ return err
+ }
+
+ fileList, err := w.listRecursive(name)
+ if err != nil {
+ return err
+ }
+ for k, v := range fileList {
+ w.files[k] = v
+ }
+
+ // Add the name to the names list.
+ w.names[name] = true
+
+ return nil
+}
+
+func (w *Watcher) listRecursive(name string) (map[string]os.FileInfo, error) {
+ fileList := make(map[string]os.FileInfo)
+
+ return fileList, filepath.Walk(name, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ for _, f := range w.ffh {
+ err := f(info, path)
+ if err == ErrSkip {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ // If path is ignored and it's a directory, skip the directory. If it's
+ // ignored and it's a single file, skip the file.
+ _, ignored := w.ignored[path]
+
+ isHidden, err := isHiddenFile(path)
+ if err != nil {
+ return err
+ }
+
+ if ignored || (w.ignoreHidden && isHidden) {
+ if info.IsDir() {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+ // Add the path and it's info to the file list.
+ fileList[path] = info
+ return nil
+ })
+}
+
+// Remove removes either a single file or directory from the file's list.
+func (w *Watcher) Remove(name string) (err error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ name, err = filepath.Abs(name)
+ if err != nil {
+ return err
+ }
+
+ // Remove the name from w's names list.
+ delete(w.names, name)
+
+ // If name is a single file, remove it and return.
+ info, found := w.files[name]
+ if !found {
+ return nil // Doesn't exist, just return.
+ }
+ if !info.IsDir() {
+ delete(w.files, name)
+ return nil
+ }
+
+ // Delete the actual directory from w.files
+ delete(w.files, name)
+
+ // If it's a directory, delete all of it's contents from w.files.
+ for path := range w.files {
+ if filepath.Dir(path) == name {
+ delete(w.files, path)
+ }
+ }
+ return nil
+}
+
+// RemoveRecursive removes either a single file or a directory recursively from
+// the file's list.
+func (w *Watcher) RemoveRecursive(name string) (err error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ name, err = filepath.Abs(name)
+ if err != nil {
+ return err
+ }
+
+ // Remove the name from w's names list.
+ delete(w.names, name)
+
+ // If name is a single file, remove it and return.
+ info, found := w.files[name]
+ if !found {
+ return nil // Doesn't exist, just return.
+ }
+ if !info.IsDir() {
+ delete(w.files, name)
+ return nil
+ }
+
+ // If it's a directory, delete all of it's contents recursively
+ // from w.files.
+ for path := range w.files {
+ if strings.HasPrefix(path, name) {
+ delete(w.files, path)
+ }
+ }
+ return nil
+}
+
+// Ignore adds paths that should be ignored.
+//
+// For files that are already added, Ignore removes them.
+func (w *Watcher) Ignore(paths ...string) (err error) {
+ for _, path := range paths {
+ path, err = filepath.Abs(path)
+ if err != nil {
+ return err
+ }
+ // Remove any of the paths that were already added.
+ if err := w.RemoveRecursive(path); err != nil {
+ return err
+ }
+ w.mu.Lock()
+ w.ignored[path] = struct{}{}
+ w.mu.Unlock()
+ }
+ return nil
+}
+
+// WatchedFiles returns a map of files added to a Watcher.
+func (w *Watcher) WatchedFiles() map[string]os.FileInfo {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ files := make(map[string]os.FileInfo)
+ for k, v := range w.files {
+ files[k] = v
+ }
+
+ return files
+}
+
+// fileInfo is an implementation of os.FileInfo that can be used
+// as a mocked os.FileInfo when triggering an event when the specified
+// os.FileInfo is nil.
+type fileInfo struct {
+ name string
+ size int64
+ mode os.FileMode
+ modTime time.Time
+ sys interface{}
+ dir bool
+}
+
+func (fs *fileInfo) IsDir() bool {
+ return fs.dir
+}
+func (fs *fileInfo) ModTime() time.Time {
+ return fs.modTime
+}
+func (fs *fileInfo) Mode() os.FileMode {
+ return fs.mode
+}
+func (fs *fileInfo) Name() string {
+ return fs.name
+}
+func (fs *fileInfo) Size() int64 {
+ return fs.size
+}
+func (fs *fileInfo) Sys() interface{} {
+ return fs.sys
+}
+
+// TriggerEvent is a method that can be used to trigger an event, separate to
+// the file watching process.
+func (w *Watcher) TriggerEvent(eventType Op, file os.FileInfo) {
+ w.Wait()
+ if file == nil {
+ file = &fileInfo{name: "triggered event", modTime: time.Now()}
+ }
+ w.Event <- Event{Op: eventType, Path: "-", FileInfo: file}
+}
+
+func (w *Watcher) retrieveFileList() map[string]os.FileInfo {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ fileList := make(map[string]os.FileInfo)
+
+ var list map[string]os.FileInfo
+ var err error
+
+ for name, recursive := range w.names {
+ if recursive {
+ list, err = w.listRecursive(name)
+ if err != nil {
+ if os.IsNotExist(err) {
+ w.mu.Unlock()
+ if name == err.(*os.PathError).Path {
+ w.Error <- ErrWatchedFileDeleted
+ w.RemoveRecursive(name)
+ }
+ w.mu.Lock()
+ } else {
+ w.Error <- err
+ }
+ }
+ } else {
+ list, err = w.list(name)
+ if err != nil {
+ if os.IsNotExist(err) {
+ w.mu.Unlock()
+ if name == err.(*os.PathError).Path {
+ w.Error <- ErrWatchedFileDeleted
+ w.Remove(name)
+ }
+ w.mu.Lock()
+ } else {
+ w.Error <- err
+ }
+ }
+ }
+ // Add the file's to the file list.
+ for k, v := range list {
+ fileList[k] = v
+ }
+ }
+
+ return fileList
+}
+
+// Start begins the polling cycle which repeats every specified
+// duration until Close is called.
+func (w *Watcher) Start(d time.Duration) error {
+ // Return an error if d is less than 1 nanosecond.
+ if d < time.Nanosecond {
+ return ErrDurationTooShort
+ }
+
+ // Make sure the Watcher is not already running.
+ w.mu.Lock()
+ if w.running {
+ w.mu.Unlock()
+ return ErrWatcherRunning
+ }
+ w.running = true
+ w.mu.Unlock()
+
+ // Unblock w.Wait().
+ w.wg.Done()
+
+ for {
+ // done lets the inner polling cycle loop know when the
+ // current cycle's method has finished executing.
+ done := make(chan struct{})
+
+ // Any events that are found are first piped to evt before
+ // being sent to the main Event channel.
+ evt := make(chan Event)
+
+ // Retrieve the file list for all watched file's and dirs.
+ fileList := w.retrieveFileList()
+
+ // cancel can be used to cancel the current event polling function.
+ cancel := make(chan struct{})
+
+ // Look for events.
+ go func() {
+ w.pollEvents(fileList, evt, cancel)
+ done <- struct{}{}
+ }()
+
+ // numEvents holds the number of events for the current cycle.
+ numEvents := 0
+
+ inner:
+ for {
+ select {
+ case <-w.close:
+ close(cancel)
+ close(w.Closed)
+ return nil
+ case event := <-evt:
+ if len(w.ops) > 0 { // Filter Ops.
+ _, found := w.ops[event.Op]
+ if !found {
+ continue
+ }
+ }
+ numEvents++
+ if w.maxEvents > 0 && numEvents > w.maxEvents {
+ close(cancel)
+ break inner
+ }
+ w.Event <- event
+ case <-done: // Current cycle is finished.
+ break inner
+ }
+ }
+
+ // Update the file's list.
+ w.mu.Lock()
+ w.files = fileList
+ w.mu.Unlock()
+
+ // Sleep and then continue to the next loop iteration.
+ time.Sleep(d)
+ }
+}
+
+func (w *Watcher) pollEvents(files map[string]os.FileInfo, evt chan Event,
+ cancel chan struct{}) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ // Store create and remove events for use to check for rename events.
+ creates := make(map[string]os.FileInfo)
+ removes := make(map[string]os.FileInfo)
+
+ // Check for removed files.
+ for path, info := range w.files {
+ if _, found := files[path]; !found {
+ removes[path] = info
+ }
+ }
+
+ // Check for created files, writes and chmods.
+ for path, info := range files {
+ oldInfo, found := w.files[path]
+ if !found {
+ // A file was created.
+ creates[path] = info
+ continue
+ }
+ if oldInfo.ModTime() != info.ModTime() {
+ select {
+ case <-cancel:
+ return
+ case evt <- Event{Write, path, path, info}:
+ }
+ }
+ if oldInfo.Mode() != info.Mode() {
+ select {
+ case <-cancel:
+ return
+ case evt <- Event{Chmod, path, path, info}:
+ }
+ }
+ }
+
+ // Check for renames and moves.
+ for path1, info1 := range removes {
+ for path2, info2 := range creates {
+ if sameFile(info1, info2) {
+ e := Event{
+ Op: Move,
+ Path: path2,
+ OldPath: path1,
+ FileInfo: info1,
+ }
+ // If they are from the same directory, it's a rename
+ // instead of a move event.
+ if filepath.Dir(path1) == filepath.Dir(path2) {
+ e.Op = Rename
+ }
+
+ delete(removes, path1)
+ delete(creates, path2)
+
+ select {
+ case <-cancel:
+ return
+ case evt <- e:
+ }
+ }
+ }
+ }
+
+ // Send all the remaining create and remove events.
+ for path, info := range creates {
+ select {
+ case <-cancel:
+ return
+ case evt <- Event{Create, path, "", info}:
+ }
+ }
+ for path, info := range removes {
+ select {
+ case <-cancel:
+ return
+ case evt <- Event{Remove, path, path, info}:
+ }
+ }
+}
+
+// Wait blocks until the watcher is started.
+func (w *Watcher) Wait() {
+ w.wg.Wait()
+}
+
+// Close stops a Watcher and unlocks its mutex, then sends a close signal.
+func (w *Watcher) Close() {
+ w.mu.Lock()
+ if !w.running {
+ w.mu.Unlock()
+ return
+ }
+ w.running = false
+ w.files = make(map[string]os.FileInfo)
+ w.names = make(map[string]bool)
+ w.mu.Unlock()
+ // Send a close signal to the Start method.
+ w.close <- struct{}{}
+}
diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore
new file mode 100644
index 0000000..c7b459e
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.gitignore
@@ -0,0 +1,39 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore
+# swap
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+# session
+Session.vim
+# temporary
+.netrwhist
+*~
+# auto-generated tag files
+tags
+
+*.exe
+cobra.test
+bin
+
+.idea/
+*.iml
diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap
new file mode 100644
index 0000000..94ec530
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.mailmap
@@ -0,0 +1,3 @@
+Steve Francia
+Bjørn Erik Pedersen
+Fabiano Franz
diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml
new file mode 100644
index 0000000..a9bd4e5
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.travis.yml
@@ -0,0 +1,29 @@
+language: go
+
+stages:
+ - diff
+ - test
+ - build
+
+go:
+ - 1.12.x
+ - 1.13.x
+ - tip
+
+before_install:
+ - go get -u github.com/kyoh86/richgo
+ - go get -u github.com/mitchellh/gox
+
+matrix:
+ allow_failures:
+ - go: tip
+ include:
+ - stage: diff
+ go: 1.13.x
+ script: make fmt
+ - stage: build
+ go: 1.13.x
+ script: make cobra_generator
+
+script:
+ - make test
diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt
new file mode 100644
index 0000000..298f0e2
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/LICENSE.txt
@@ -0,0 +1,174 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile
new file mode 100644
index 0000000..e9740d1
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/Makefile
@@ -0,0 +1,36 @@
+BIN="./bin"
+SRC=$(shell find . -name "*.go")
+
+ifeq (, $(shell which richgo))
+$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo")
+endif
+
+.PHONY: fmt vet test cobra_generator install_deps clean
+
+default: all
+
+all: fmt vet test cobra_generator
+
+fmt:
+ $(info ******************** checking formatting ********************)
+ @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1)
+
+test: install_deps vet
+ $(info ******************** running tests ********************)
+ richgo test -v ./...
+
+cobra_generator: install_deps
+ $(info ******************** building generator ********************)
+ mkdir -p $(BIN)
+ make -C cobra all
+
+install_deps:
+ $(info ******************** downloading dependencies ********************)
+ go get -v ./...
+
+vet:
+ $(info ******************** vetting ********************)
+ go vet ./...
+
+clean:
+ rm -rf $(BIN)
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
new file mode 100644
index 0000000..9d79934
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -0,0 +1,770 @@
+![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png)
+
+Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files.
+
+Many of the most widely used Go projects are built using Cobra, such as:
+[Kubernetes](http://kubernetes.io/),
+[Hugo](http://gohugo.io),
+[rkt](https://github.com/coreos/rkt),
+[etcd](https://github.com/coreos/etcd),
+[Moby (former Docker)](https://github.com/moby/moby),
+[Docker (distribution)](https://github.com/docker/distribution),
+[OpenShift](https://www.openshift.com/),
+[Delve](https://github.com/derekparker/delve),
+[GopherJS](http://www.gopherjs.org/),
+[CockroachDB](http://www.cockroachlabs.com/),
+[Bleve](http://www.blevesearch.com/),
+[ProjectAtomic (enterprise)](http://www.projectatomic.io/),
+[Giant Swarm's gsctl](https://github.com/giantswarm/gsctl),
+[Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack),
+[rclone](http://rclone.org/),
+[nehm](https://github.com/bogem/nehm),
+[Pouch](https://github.com/alibaba/pouch),
+[Istio](https://istio.io),
+[Prototool](https://github.com/uber/prototool),
+[mattermost-server](https://github.com/mattermost/mattermost-server),
+[Gardener](https://github.com/gardener/gardenctl),
+[Linkerd](https://linkerd.io/),
+[Github CLI](https://github.com/cli/cli)
+etc.
+
+[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra)
+[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra)
+
+# Table of Contents
+
+- [Overview](#overview)
+- [Concepts](#concepts)
+ * [Commands](#commands)
+ * [Flags](#flags)
+- [Installing](#installing)
+- [Getting Started](#getting-started)
+ * [Using the Cobra Generator](#using-the-cobra-generator)
+ * [Using the Cobra Library](#using-the-cobra-library)
+ * [Working with Flags](#working-with-flags)
+ * [Positional and Custom Arguments](#positional-and-custom-arguments)
+ * [Example](#example)
+ * [Help Command](#help-command)
+ * [Usage Message](#usage-message)
+ * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks)
+ * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens)
+ * [Generating documentation for your command](#generating-documentation-for-your-command)
+ * [Generating bash completions](#generating-bash-completions)
+ * [Generating zsh completions](#generating-zsh-completions)
+- [Contributing](#contributing)
+- [License](#license)
+
+# Overview
+
+Cobra is a library providing a simple interface to create powerful modern CLI
+interfaces similar to git & go tools.
+
+Cobra is also an application that will generate your application scaffolding to rapidly
+develop a Cobra-based application.
+
+Cobra provides:
+* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
+* Fully POSIX-compliant flags (including short & long versions)
+* Nested subcommands
+* Global, local and cascading flags
+* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname`
+* Intelligent suggestions (`app srver`... did you mean `app server`?)
+* Automatic help generation for commands and flags
+* Automatic help flag recognition of `-h`, `--help`, etc.
+* Automatically generated bash autocomplete for your application
+* Automatically generated man pages for your application
+* Command aliases so you can change things without breaking them
+* The flexibility to define your own help, usage, etc.
+* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps
+
+# Concepts
+
+Cobra is built on a structure of commands, arguments & flags.
+
+**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
+
+The best applications will read like sentences when used. Users will know how
+to use the application because they will natively understand how to use it.
+
+The pattern to follow is
+`APPNAME VERB NOUN --ADJECTIVE.`
+ or
+`APPNAME COMMAND ARG --FLAG`
+
+A few good real world examples may better illustrate this point.
+
+In the following example, 'server' is a command, and 'port' is a flag:
+
+ hugo server --port=1313
+
+In this command we are telling Git to clone the url bare.
+
+ git clone URL --bare
+
+## Commands
+
+Command is the central point of the application. Each interaction that
+the application supports will be contained in a Command. A command can
+have children commands and optionally run an action.
+
+In the example above, 'server' is the command.
+
+[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command)
+
+## Flags
+
+A flag is a way to modify the behavior of a command. Cobra supports
+fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/).
+A Cobra command can define flags that persist through to children commands
+and flags that are only available to that command.
+
+In the example above, 'port' is the flag.
+
+Flag functionality is provided by the [pflag
+library](https://github.com/spf13/pflag), a fork of the flag standard library
+which maintains the same interface while adding POSIX compliance.
+
+# Installing
+Using Cobra is easy. First, use `go get` to install the latest version
+of the library. This command will install the `cobra` generator executable
+along with the library and its dependencies:
+
+ go get -u github.com/spf13/cobra/cobra
+
+Next, include Cobra in your application:
+
+```go
+import "github.com/spf13/cobra"
+```
+
+# Getting Started
+
+While you are welcome to provide your own organization, typically a Cobra-based
+application will follow the following organizational structure:
+
+```
+ ▾ appName/
+ ▾ cmd/
+ add.go
+ your.go
+ commands.go
+ here.go
+ main.go
+```
+
+In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra.
+
+```go
+package main
+
+import (
+ "{pathToYourApp}/cmd"
+)
+
+func main() {
+ cmd.Execute()
+}
+```
+
+## Using the Cobra Generator
+
+Cobra provides its own program that will create your application and add any
+commands you want. It's the easiest way to incorporate Cobra into your application.
+
+[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it.
+
+## Using the Cobra Library
+
+To manually implement Cobra you need to create a bare main.go file and a rootCmd file.
+You will optionally provide additional commands as you see fit.
+
+### Create rootCmd
+
+Cobra doesn't require any special constructors. Simply create your commands.
+
+Ideally you place this in app/cmd/root.go:
+
+```go
+var rootCmd = &cobra.Command{
+ Use: "hugo",
+ Short: "Hugo is a very fast static site generator",
+ Long: `A Fast and Flexible Static Site Generator built with
+ love by spf13 and friends in Go.
+ Complete documentation is available at http://hugo.spf13.com`,
+ Run: func(cmd *cobra.Command, args []string) {
+ // Do Stuff Here
+ },
+}
+
+func Execute() {
+ if err := rootCmd.Execute(); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+}
+```
+
+You will additionally define flags and handle configuration in your init() function.
+
+For example cmd/root.go:
+
+```go
+package cmd
+
+import (
+ "fmt"
+ "os"
+
+ homedir "github.com/mitchellh/go-homedir"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+var (
+ // Used for flags.
+ cfgFile string
+ userLicense string
+
+ rootCmd = &cobra.Command{
+ Use: "cobra",
+ Short: "A generator for Cobra based Applications",
+ Long: `Cobra is a CLI library for Go that empowers applications.
+This application is a tool to generate the needed files
+to quickly create a Cobra application.`,
+ }
+)
+
+// Execute executes the root command.
+func Execute() error {
+ return rootCmd.Execute()
+}
+
+func init() {
+ cobra.OnInitialize(initConfig)
+
+ rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
+ rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution")
+ rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project")
+ rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration")
+ viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+ viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
+ viper.SetDefault("author", "NAME HERE ")
+ viper.SetDefault("license", "apache")
+
+ rootCmd.AddCommand(addCmd)
+ rootCmd.AddCommand(initCmd)
+}
+
+func er(msg interface{}) {
+ fmt.Println("Error:", msg)
+ os.Exit(1)
+}
+
+func initConfig() {
+ if cfgFile != "" {
+ // Use config file from the flag.
+ viper.SetConfigFile(cfgFile)
+ } else {
+ // Find home directory.
+ home, err := homedir.Dir()
+ if err != nil {
+ er(err)
+ }
+
+ // Search config in home directory with name ".cobra" (without extension).
+ viper.AddConfigPath(home)
+ viper.SetConfigName(".cobra")
+ }
+
+ viper.AutomaticEnv()
+
+ if err := viper.ReadInConfig(); err == nil {
+ fmt.Println("Using config file:", viper.ConfigFileUsed())
+ }
+}
+```
+
+### Create your main.go
+
+With the root command you need to have your main function execute it.
+Execute should be run on the root for clarity, though it can be called on any command.
+
+In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra.
+
+```go
+package main
+
+import (
+ "{pathToYourApp}/cmd"
+)
+
+func main() {
+ cmd.Execute()
+}
+```
+
+### Create additional commands
+
+Additional commands can be defined and typically are each given their own file
+inside of the cmd/ directory.
+
+If you wanted to create a version command you would create cmd/version.go and
+populate it with the following:
+
+```go
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func init() {
+ rootCmd.AddCommand(versionCmd)
+}
+
+var versionCmd = &cobra.Command{
+ Use: "version",
+ Short: "Print the version number of Hugo",
+ Long: `All software has versions. This is Hugo's`,
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
+ },
+}
+```
+
+## Working with Flags
+
+Flags provide modifiers to control how the action command operates.
+
+### Assign flags to a command
+
+Since the flags are defined and used in different locations, we need to
+define a variable outside with the correct scope to assign the flag to
+work with.
+
+```go
+var Verbose bool
+var Source string
+```
+
+There are two different approaches to assign a flag.
+
+### Persistent Flags
+
+A flag can be 'persistent' meaning that this flag will be available to the
+command it's assigned to as well as every command under that command. For
+global flags, assign a flag as a persistent flag on the root.
+
+```go
+rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
+```
+
+### Local Flags
+
+A flag can also be assigned locally which will only apply to that specific command.
+
+```go
+localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
+```
+
+### Local Flag on Parent Commands
+
+By default Cobra only parses local flags on the target command, any local flags on
+parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will
+parse local flags on each command before executing the target command.
+
+```go
+command := cobra.Command{
+ Use: "print [OPTIONS] [COMMANDS]",
+ TraverseChildren: true,
+}
+```
+
+### Bind Flags with Config
+
+You can also bind your flags with [viper](https://github.com/spf13/viper):
+```go
+var author string
+
+func init() {
+ rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution")
+ viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+}
+```
+
+In this example the persistent flag `author` is bound with `viper`.
+**Note**, that the variable `author` will not be set to the value from config,
+when the `--author` flag is not provided by user.
+
+More in [viper documentation](https://github.com/spf13/viper#working-with-flags).
+
+### Required flags
+
+Flags are optional by default. If instead you wish your command to report an error
+when a flag has not been set, mark it as required:
+```go
+rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
+rootCmd.MarkFlagRequired("region")
+```
+
+## Positional and Custom Arguments
+
+Validation of positional arguments can be specified using the `Args` field
+of `Command`.
+
+The following validators are built in:
+
+- `NoArgs` - the command will report an error if there are any positional args.
+- `ArbitraryArgs` - the command will accept any args.
+- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`.
+- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args.
+- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args.
+- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args.
+- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command`
+- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args.
+
+An example of setting the custom validator:
+
+```go
+var cmd = &cobra.Command{
+ Short: "hello",
+ Args: func(cmd *cobra.Command, args []string) error {
+ if len(args) < 1 {
+ return errors.New("requires a color argument")
+ }
+ if myapp.IsValidColor(args[0]) {
+ return nil
+ }
+ return fmt.Errorf("invalid color specified: %s", args[0])
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hello, World!")
+ },
+}
+```
+
+## Example
+
+In the example below, we have defined three commands. Two are at the top level
+and one (cmdTimes) is a child of one of the top commands. In this case the root
+is not executable meaning that a subcommand is required. This is accomplished
+by not providing a 'Run' for the 'rootCmd'.
+
+We have only defined one flag for a single command.
+
+More documentation about flags is available at https://github.com/spf13/pflag
+
+```go
+package main
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+)
+
+func main() {
+ var echoTimes int
+
+ var cmdPrint = &cobra.Command{
+ Use: "print [string to print]",
+ Short: "Print anything to the screen",
+ Long: `print is for printing anything back to the screen.
+For many years people have printed back to the screen.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Print: " + strings.Join(args, " "))
+ },
+ }
+
+ var cmdEcho = &cobra.Command{
+ Use: "echo [string to echo]",
+ Short: "Echo anything to the screen",
+ Long: `echo is for echoing anything back.
+Echo works a lot like print, except it has a child command.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Echo: " + strings.Join(args, " "))
+ },
+ }
+
+ var cmdTimes = &cobra.Command{
+ Use: "times [string to echo]",
+ Short: "Echo anything to the screen more times",
+ Long: `echo things multiple times back to the user by providing
+a count and a string.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ for i := 0; i < echoTimes; i++ {
+ fmt.Println("Echo: " + strings.Join(args, " "))
+ }
+ },
+ }
+
+ cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
+
+ var rootCmd = &cobra.Command{Use: "app"}
+ rootCmd.AddCommand(cmdPrint, cmdEcho)
+ cmdEcho.AddCommand(cmdTimes)
+ rootCmd.Execute()
+}
+```
+
+For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/).
+
+## Help Command
+
+Cobra automatically adds a help command to your application when you have subcommands.
+This will be called when a user runs 'app help'. Additionally, help will also
+support all other commands as input. Say, for instance, you have a command called
+'create' without any additional configuration; Cobra will work when 'app help
+create' is called. Every command will automatically have the '--help' flag added.
+
+### Example
+
+The following output is automatically generated by Cobra. Nothing beyond the
+command and flag definitions are needed.
+
+ $ cobra help
+
+ Cobra is a CLI library for Go that empowers applications.
+ This application is a tool to generate the needed files
+ to quickly create a Cobra application.
+
+ Usage:
+ cobra [command]
+
+ Available Commands:
+ add Add a command to a Cobra Application
+ help Help about any command
+ init Initialize a Cobra Application
+
+ Flags:
+ -a, --author string author name for copyright attribution (default "YOUR NAME")
+ --config string config file (default is $HOME/.cobra.yaml)
+ -h, --help help for cobra
+ -l, --license string name of license for the project
+ --viper use Viper for configuration (default true)
+
+ Use "cobra [command] --help" for more information about a command.
+
+
+Help is just a command like any other. There is no special logic or behavior
+around it. In fact, you can provide your own if you want.
+
+### Defining your own help
+
+You can provide your own Help command or your own template for the default command to use
+with following functions:
+
+```go
+cmd.SetHelpCommand(cmd *Command)
+cmd.SetHelpFunc(f func(*Command, []string))
+cmd.SetHelpTemplate(s string)
+```
+
+The latter two will also apply to any children commands.
+
+## Usage Message
+
+When the user provides an invalid flag or invalid command, Cobra responds by
+showing the user the 'usage'.
+
+### Example
+You may recognize this from the help above. That's because the default help
+embeds the usage as part of its output.
+
+ $ cobra --invalid
+ Error: unknown flag: --invalid
+ Usage:
+ cobra [command]
+
+ Available Commands:
+ add Add a command to a Cobra Application
+ help Help about any command
+ init Initialize a Cobra Application
+
+ Flags:
+ -a, --author string author name for copyright attribution (default "YOUR NAME")
+ --config string config file (default is $HOME/.cobra.yaml)
+ -h, --help help for cobra
+ -l, --license string name of license for the project
+ --viper use Viper for configuration (default true)
+
+ Use "cobra [command] --help" for more information about a command.
+
+### Defining your own usage
+You can provide your own usage function or template for Cobra to use.
+Like help, the function and template are overridable through public methods:
+
+```go
+cmd.SetUsageFunc(f func(*Command) error)
+cmd.SetUsageTemplate(s string)
+```
+
+## Version Flag
+
+Cobra adds a top-level '--version' flag if the Version field is set on the root command.
+Running an application with the '--version' flag will print the version to stdout using
+the version template. The template can be customized using the
+`cmd.SetVersionTemplate(s string)` function.
+
+## PreRun and PostRun Hooks
+
+It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order:
+
+- `PersistentPreRun`
+- `PreRun`
+- `Run`
+- `PostRun`
+- `PersistentPostRun`
+
+An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func main() {
+
+ var rootCmd = &cobra.Command{
+ Use: "root [sub]",
+ Short: "My root command",
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args)
+ },
+ PreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PreRun with args: %v\n", args)
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd Run with args: %v\n", args)
+ },
+ PostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PostRun with args: %v\n", args)
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args)
+ },
+ }
+
+ var subCmd = &cobra.Command{
+ Use: "sub [no options!]",
+ Short: "My subcommand",
+ PreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PreRun with args: %v\n", args)
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd Run with args: %v\n", args)
+ },
+ PostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PostRun with args: %v\n", args)
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args)
+ },
+ }
+
+ rootCmd.AddCommand(subCmd)
+
+ rootCmd.SetArgs([]string{""})
+ rootCmd.Execute()
+ fmt.Println()
+ rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
+ rootCmd.Execute()
+}
+```
+
+Output:
+```
+Inside rootCmd PersistentPreRun with args: []
+Inside rootCmd PreRun with args: []
+Inside rootCmd Run with args: []
+Inside rootCmd PostRun with args: []
+Inside rootCmd PersistentPostRun with args: []
+
+Inside rootCmd PersistentPreRun with args: [arg1 arg2]
+Inside subCmd PreRun with args: [arg1 arg2]
+Inside subCmd Run with args: [arg1 arg2]
+Inside subCmd PostRun with args: [arg1 arg2]
+Inside subCmd PersistentPostRun with args: [arg1 arg2]
+```
+
+## Suggestions when "unknown command" happens
+
+Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example:
+
+```
+$ hugo srever
+Error: unknown command "srever" for "hugo"
+
+Did you mean this?
+ server
+
+Run 'hugo --help' for usage.
+```
+
+Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
+
+If you need to disable suggestions or tweak the string distance in your command, use:
+
+```go
+command.DisableSuggestions = true
+```
+
+or
+
+```go
+command.SuggestionsMinimumDistance = 1
+```
+
+You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example:
+
+```
+$ kubectl remove
+Error: unknown command "remove" for "kubectl"
+
+Did you mean this?
+ delete
+
+Run 'kubectl help' for usage.
+```
+
+## Generating documentation for your command
+
+Cobra can generate documentation based on subcommands, flags, etc. in the following formats:
+
+- [Markdown](doc/md_docs.md)
+- [ReStructured Text](doc/rest_docs.md)
+- [Man Page](doc/man_docs.md)
+
+## Generating bash completions
+
+Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md).
+
+## Generating zsh completions
+
+Cobra can generate zsh-completion file. Read more about it in
+[Zsh Completions](zsh_completions.md).
+
+# Contributing
+
+1. Fork it
+2. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`)
+3. Create your feature branch (`git checkout -b my-new-feature`)
+4. Make changes and add them (`git add .`)
+5. Commit your changes (`git commit -m 'Add some feature'`)
+6. Push to the branch (`git push origin my-new-feature`)
+7. Create new pull request
+
+# License
+
+Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
new file mode 100644
index 0000000..70e9b26
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -0,0 +1,109 @@
+package cobra
+
+import (
+ "fmt"
+ "strings"
+)
+
+type PositionalArgs func(cmd *Command, args []string) error
+
+// Legacy arg validation has the following behaviour:
+// - root commands with no subcommands can take arbitrary arguments
+// - root commands with subcommands will do subcommand validity checking
+// - subcommands will always accept arbitrary arguments
+func legacyArgs(cmd *Command, args []string) error {
+ // no subcommand, always take args
+ if !cmd.HasSubCommands() {
+ return nil
+ }
+
+ // root command with subcommands, do subcommand checking.
+ if !cmd.HasParent() && len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ return nil
+}
+
+// NoArgs returns an error if any args are included.
+func NoArgs(cmd *Command, args []string) error {
+ if len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
+ }
+ return nil
+}
+
+// OnlyValidArgs returns an error if any args are not in the list of ValidArgs.
+func OnlyValidArgs(cmd *Command, args []string) error {
+ if len(cmd.ValidArgs) > 0 {
+ // Remove any description that may be included in ValidArgs.
+ // A description is following a tab character.
+ var validArgs []string
+ for _, v := range cmd.ValidArgs {
+ validArgs = append(validArgs, strings.Split(v, "\t")[0])
+ }
+
+ for _, v := range args {
+ if !stringInSlice(v, validArgs) {
+ return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ }
+ }
+ return nil
+}
+
+// ArbitraryArgs never returns an error.
+func ArbitraryArgs(cmd *Command, args []string) error {
+ return nil
+}
+
+// MinimumNArgs returns an error if there is not at least N args.
+func MinimumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < n {
+ return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// MaximumNArgs returns an error if there are more than N args.
+func MaximumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) > n {
+ return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// ExactArgs returns an error if there are not exactly n args.
+func ExactArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) != n {
+ return fmt.Errorf("accepts %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// ExactValidArgs returns an error if
+// there are not exactly N positional args OR
+// there are any positional args that are not in the `ValidArgs` field of `Command`
+func ExactValidArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if err := ExactArgs(n)(cmd, args); err != nil {
+ return err
+ }
+ return OnlyValidArgs(cmd, args)
+ }
+}
+
+// RangeArgs returns an error if the number of args is not within the expected range.
+func RangeArgs(min int, max int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < min || len(args) > max {
+ return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
+ }
+ return nil
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
new file mode 100644
index 0000000..1e27188
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -0,0 +1,641 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+
+ "github.com/spf13/pflag"
+)
+
+// Annotations for Bash completion.
+const (
+ BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
+ BashCompCustom = "cobra_annotation_bash_completion_custom"
+ BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
+ BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
+)
+
+func writePreamble(buf *bytes.Buffer, name string) {
+ buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
+ buf.WriteString(fmt.Sprintf(`
+__%[1]s_debug()
+{
+ if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
+ echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+ fi
+}
+
+# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
+# _init_completion. This is a very minimal version of that function.
+__%[1]s_init_completion()
+{
+ COMPREPLY=()
+ _get_comp_words_by_ref "$@" cur prev words cword
+}
+
+__%[1]s_index_of_word()
+{
+ local w word=$1
+ shift
+ index=0
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ index=$((index+1))
+ done
+ index=-1
+}
+
+__%[1]s_contains_word()
+{
+ local w word=$1; shift
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ done
+ return 1
+}
+
+__%[1]s_handle_go_custom_completion()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}"
+
+ local out requestComp lastParam lastChar comp directive args
+
+ # Prepare the command to request completions for the program.
+ # Calling ${words[0]} instead of directly %[1]s allows to handle aliases
+ args=("${words[@]:1}")
+ requestComp="${words[0]} %[2]s ${args[*]}"
+
+ lastParam=${words[$((${#words[@]}-1))]}
+ lastChar=${lastParam:$((${#lastParam}-1)):1}
+ __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}"
+
+ if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter"
+ requestComp="${requestComp} \"\""
+ fi
+
+ __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}"
+ # Use eval to handle any environment variables and such
+ out=$(eval "${requestComp}" 2>/dev/null)
+
+ # Extract the directive integer at the very end of the output following a colon (:)
+ directive=${out##*:}
+ # Remove the directive
+ out=${out%%:*}
+ if [ "${directive}" = "${out}" ]; then
+ # There is not directive specified
+ directive=0
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}"
+ __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out[*]}"
+
+ if [ $((directive & %[3]d)) -ne 0 ]; then
+ # Error code. No completion.
+ __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code"
+ return
+ else
+ if [ $((directive & %[4]d)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "${FUNCNAME[0]}: activating no space"
+ compopt -o nospace
+ fi
+ fi
+ if [ $((directive & %[5]d)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "${FUNCNAME[0]}: activating no file completion"
+ compopt +o default
+ fi
+ fi
+
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${out[*]}" -- "$cur")
+ fi
+}
+
+__%[1]s_handle_reply()
+{
+ __%[1]s_debug "${FUNCNAME[0]}"
+ local comp
+ case $cur in
+ -*)
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt -o nospace
+ fi
+ local allflags
+ if [ ${#must_have_one_flag[@]} -ne 0 ]; then
+ allflags=("${must_have_one_flag[@]}")
+ else
+ allflags=("${flags[*]} ${two_word_flags[*]}")
+ fi
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${allflags[*]}" -- "$cur")
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
+ fi
+
+ # complete after --flag=abc
+ if [[ $cur == *=* ]]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt +o nospace
+ fi
+
+ local index flag
+ flag="${cur%%=*}"
+ __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
+ COMPREPLY=()
+ if [[ ${index} -ge 0 ]]; then
+ PREFIX=""
+ cur="${cur#*=}"
+ ${flags_completion[${index}]}
+ if [ -n "${ZSH_VERSION}" ]; then
+ # zsh completion needs --flag= prefix
+ eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
+ fi
+ fi
+ fi
+ return 0;
+ ;;
+ esac
+
+ # check if we are handling a flag with special work handling
+ local index
+ __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
+ if [[ ${index} -ge 0 ]]; then
+ ${flags_completion[${index}]}
+ return
+ fi
+
+ # we are parsing a flag and don't have a special handler, no completion
+ if [[ ${cur} != "${words[cword]}" ]]; then
+ return
+ fi
+
+ local completions
+ completions=("${commands[@]}")
+ if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
+ completions=("${must_have_one_noun[@]}")
+ elif [[ -n "${has_completion_function}" ]]; then
+ # if a go completion function is provided, defer to that function
+ completions=()
+ __%[1]s_handle_go_custom_completion
+ fi
+ if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
+ completions+=("${must_have_one_flag[@]}")
+ fi
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${completions[*]}" -- "$cur")
+
+ if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${noun_aliases[*]}" -- "$cur")
+ fi
+
+ if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
+ if declare -F __%[1]s_custom_func >/dev/null; then
+ # try command name qualified custom func
+ __%[1]s_custom_func
+ else
+ # otherwise fall back to unqualified for compatibility
+ declare -F __custom_func >/dev/null && __custom_func
+ fi
+ fi
+
+ # available in bash-completion >= 2, not always present on macOS
+ if declare -F __ltrim_colon_completions >/dev/null; then
+ __ltrim_colon_completions "$cur"
+ fi
+
+ # If there is only 1 completion and it is a flag with an = it will be completed
+ # but we don't want a space after the =
+ if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
+ compopt -o nospace
+ fi
+}
+
+# The arguments should be in the form "ext1|ext2|extn"
+__%[1]s_handle_filename_extension_flag()
+{
+ local ext="$1"
+ _filedir "@(${ext})"
+}
+
+__%[1]s_handle_subdirs_in_dir_flag()
+{
+ local dir="$1"
+ pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
+}
+
+__%[1]s_handle_flag()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ # if a command required a flag, and we found it, unset must_have_one_flag()
+ local flagname=${words[c]}
+ local flagvalue
+ # if the word contained an =
+ if [[ ${words[c]} == *"="* ]]; then
+ flagvalue=${flagname#*=} # take in as flagvalue after the =
+ flagname=${flagname%%=*} # strip everything after the =
+ flagname="${flagname}=" # but put the = back
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
+ if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
+ must_have_one_flag=()
+ fi
+
+ # if you set a flag which only applies to this command, don't show subcommands
+ if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
+ commands=()
+ fi
+
+ # keep flag value with flagname as flaghash
+ # flaghash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+ if [ -n "${flagvalue}" ] ; then
+ flaghash[${flagname}]=${flagvalue}
+ elif [ -n "${words[ $((c+1)) ]}" ] ; then
+ flaghash[${flagname}]=${words[ $((c+1)) ]}
+ else
+ flaghash[${flagname}]="true" # pad "true" for bool flag
+ fi
+ fi
+
+ # skip the argument to a two word flag
+ if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
+ __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument"
+ c=$((c+1))
+ # if we are looking for a flags value, don't show commands
+ if [[ $c -eq $cword ]]; then
+ commands=()
+ fi
+ fi
+
+ c=$((c+1))
+
+}
+
+__%[1]s_handle_noun()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
+ must_have_one_noun=()
+ elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
+ must_have_one_noun=()
+ fi
+
+ nouns+=("${words[c]}")
+ c=$((c+1))
+}
+
+__%[1]s_handle_command()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ local next_command
+ if [[ -n ${last_command} ]]; then
+ next_command="_${last_command}_${words[c]//:/__}"
+ else
+ if [[ $c -eq 0 ]]; then
+ next_command="_%[1]s_root_command"
+ else
+ next_command="_${words[c]//:/__}"
+ fi
+ fi
+ c=$((c+1))
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
+ declare -F "$next_command" >/dev/null && $next_command
+}
+
+__%[1]s_handle_word()
+{
+ if [[ $c -ge $cword ]]; then
+ __%[1]s_handle_reply
+ return
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+ if [[ "${words[c]}" == -* ]]; then
+ __%[1]s_handle_flag
+ elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
+ __%[1]s_handle_command
+ elif [[ $c -eq 0 ]]; then
+ __%[1]s_handle_command
+ elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
+ # aliashash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+ words[c]=${aliashash[${words[c]}]}
+ __%[1]s_handle_command
+ else
+ __%[1]s_handle_noun
+ fi
+ else
+ __%[1]s_handle_noun
+ fi
+ __%[1]s_handle_word
+}
+
+`, name, ShellCompNoDescRequestCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp))
+}
+
+func writePostscript(buf *bytes.Buffer, name string) {
+ name = strings.Replace(name, ":", "__", -1)
+ buf.WriteString(fmt.Sprintf("__start_%s()\n", name))
+ buf.WriteString(fmt.Sprintf(`{
+ local cur prev words cword
+ declare -A flaghash 2>/dev/null || :
+ declare -A aliashash 2>/dev/null || :
+ if declare -F _init_completion >/dev/null 2>&1; then
+ _init_completion -s || return
+ else
+ __%[1]s_init_completion -n "=" || return
+ fi
+
+ local c=0
+ local flags=()
+ local two_word_flags=()
+ local local_nonpersistent_flags=()
+ local flags_with_completion=()
+ local flags_completion=()
+ local commands=("%[1]s")
+ local must_have_one_flag=()
+ local must_have_one_noun=()
+ local has_completion_function
+ local last_command
+ local nouns=()
+
+ __%[1]s_handle_word
+}
+
+`, name))
+ buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
+ complete -o default -F __start_%s %s
+else
+ complete -o default -o nospace -F __start_%s %s
+fi
+
+`, name, name, name, name))
+ buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n")
+}
+
+func writeCommands(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(" commands=()\n")
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() || c == cmd.helpCommand {
+ continue
+ }
+ buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name()))
+ writeCmdAliases(buf, c)
+ }
+ buf.WriteString("\n")
+}
+
+func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) {
+ for key, value := range annotations {
+ switch key {
+ case BashCompFilenameExt:
+ buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) > 0 {
+ ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
+ } else {
+ ext = "_filedir"
+ }
+ buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ case BashCompCustom:
+ buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+ if len(value) > 0 {
+ handlers := strings.Join(value, "; ")
+ buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
+ } else {
+ buf.WriteString(" flags_completion+=(:)\n")
+ }
+ case BashCompSubdirsInDir:
+ buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) == 1 {
+ ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
+ } else {
+ ext = "_filedir -d"
+ }
+ buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ }
+ }
+}
+
+func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
+ name := flag.Shorthand
+ format := " "
+ if len(flag.NoOptDefVal) == 0 {
+ format += "two_word_"
+ }
+ format += "flags+=(\"-%s\")\n"
+ buf.WriteString(fmt.Sprintf(format, name))
+ writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
+}
+
+func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
+ name := flag.Name
+ format := " flags+=(\"--%s"
+ if len(flag.NoOptDefVal) == 0 {
+ format += "="
+ }
+ format += "\")\n"
+ buf.WriteString(fmt.Sprintf(format, name))
+ if len(flag.NoOptDefVal) == 0 {
+ format = " two_word_flags+=(\"--%s\")\n"
+ buf.WriteString(fmt.Sprintf(format, name))
+ }
+ writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
+}
+
+func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
+ name := flag.Name
+ format := " local_nonpersistent_flags+=(\"--%s"
+ if len(flag.NoOptDefVal) == 0 {
+ format += "="
+ }
+ format += "\")\n"
+ buf.WriteString(fmt.Sprintf(format, name))
+}
+
+// Setup annotations for go completions for registered flags
+func prepareCustomAnnotationsForFlags(cmd *Command) {
+ for flag := range flagCompletionFunctions {
+ // Make sure the completion script calls the __*_go_custom_completion function for
+ // every registered flag. We need to do this here (and not when the flag was registered
+ // for completion) so that we can know the root command name for the prefix
+ // of ___go_custom_completion
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())}
+ }
+}
+
+func writeFlags(buf *bytes.Buffer, cmd *Command) {
+ prepareCustomAnnotationsForFlags(cmd)
+ buf.WriteString(` flags=()
+ two_word_flags=()
+ local_nonpersistent_flags=()
+ flags_with_completion=()
+ flags_completion=()
+
+`)
+ localNonPersistentFlags := cmd.LocalNonPersistentFlags()
+ cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ if localNonPersistentFlags.Lookup(flag.Name) != nil {
+ writeLocalNonPersistentFlag(buf, flag)
+ }
+ })
+ cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ })
+
+ buf.WriteString("\n")
+}
+
+func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(" must_have_one_flag=()\n")
+ flags := cmd.NonInheritedFlags()
+ flags.VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ for key := range flag.Annotations {
+ switch key {
+ case BashCompOneRequiredFlag:
+ format := " must_have_one_flag+=(\"--%s"
+ if flag.Value.Type() != "bool" {
+ format += "="
+ }
+ format += "\")\n"
+ buf.WriteString(fmt.Sprintf(format, flag.Name))
+
+ if len(flag.Shorthand) > 0 {
+ buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand))
+ }
+ }
+ }
+ })
+}
+
+func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(" must_have_one_noun=()\n")
+ sort.Sort(sort.StringSlice(cmd.ValidArgs))
+ for _, value := range cmd.ValidArgs {
+ // Remove any description that may be included following a tab character.
+ // Descriptions are not supported by bash completion.
+ value = strings.Split(value, "\t")[0]
+ buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
+ }
+ if cmd.ValidArgsFunction != nil {
+ buf.WriteString(" has_completion_function=1\n")
+ }
+}
+
+func writeCmdAliases(buf *bytes.Buffer, cmd *Command) {
+ if len(cmd.Aliases) == 0 {
+ return
+ }
+
+ sort.Sort(sort.StringSlice(cmd.Aliases))
+
+ buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n"))
+ for _, value := range cmd.Aliases {
+ buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value))
+ buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
+ }
+ buf.WriteString(` fi`)
+ buf.WriteString("\n")
+}
+func writeArgAliases(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(" noun_aliases=()\n")
+ sort.Sort(sort.StringSlice(cmd.ArgAliases))
+ for _, value := range cmd.ArgAliases {
+ buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value))
+ }
+}
+
+func gen(buf *bytes.Buffer, cmd *Command) {
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() || c == cmd.helpCommand {
+ continue
+ }
+ gen(buf, c)
+ }
+ commandName := cmd.CommandPath()
+ commandName = strings.Replace(commandName, " ", "_", -1)
+ commandName = strings.Replace(commandName, ":", "__", -1)
+
+ if cmd.Root() == cmd {
+ buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName))
+ } else {
+ buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName))
+ }
+
+ buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName))
+ buf.WriteString("\n")
+ buf.WriteString(" command_aliases=()\n")
+ buf.WriteString("\n")
+
+ writeCommands(buf, cmd)
+ writeFlags(buf, cmd)
+ writeRequiredFlag(buf, cmd)
+ writeRequiredNouns(buf, cmd)
+ writeArgAliases(buf, cmd)
+ buf.WriteString("}\n\n")
+}
+
+// GenBashCompletion generates bash completion file and writes to the passed writer.
+func (c *Command) GenBashCompletion(w io.Writer) error {
+ buf := new(bytes.Buffer)
+ writePreamble(buf, c.Name())
+ if len(c.BashCompletionFunction) > 0 {
+ buf.WriteString(c.BashCompletionFunction + "\n")
+ }
+ gen(buf, c)
+ writePostscript(buf, c.Name())
+
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func nonCompletableFlag(flag *pflag.Flag) bool {
+ return flag.Hidden || len(flag.Deprecated) > 0
+}
+
+// GenBashCompletionFile generates bash completion file.
+func (c *Command) GenBashCompletionFile(filename string) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenBashCompletion(outFile)
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md
new file mode 100644
index 0000000..e61a3a6
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.md
@@ -0,0 +1,383 @@
+# Generating Bash Completions For Your Own cobra.Command
+
+If you are using the generator you can create a completion command by running
+
+```bash
+cobra add completion
+```
+
+Update the help text show how to install the bash_completion Linux show here [Kubectl docs show mac options](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion)
+
+Writing the shell script to stdout allows the most flexible use.
+
+```go
+// completionCmd represents the completion command
+var completionCmd = &cobra.Command{
+ Use: "completion",
+ Short: "Generates bash completion scripts",
+ Long: `To load completion run
+
+. <(bitbucket completion)
+
+To configure your bash shell to load completions for each session add to your bashrc
+
+# ~/.bashrc or ~/.profile
+. <(bitbucket completion)
+`,
+ Run: func(cmd *cobra.Command, args []string) {
+ rootCmd.GenBashCompletion(os.Stdout);
+ },
+}
+```
+
+**Note:** The cobra generator may include messages printed to stdout for example if the config file is loaded, this will break the auto complete script
+
+
+## Example from kubectl
+
+Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows:
+
+```go
+package main
+
+import (
+ "io/ioutil"
+ "os"
+
+ "k8s.io/kubernetes/pkg/kubectl/cmd"
+ "k8s.io/kubernetes/pkg/kubectl/cmd/util"
+)
+
+func main() {
+ kubectl := cmd.NewKubectlCommand(util.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
+ kubectl.GenBashCompletionFile("out.sh")
+}
+```
+
+`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior.
+
+## Have the completions code complete your 'nouns'
+
+### Static completion of nouns
+
+This method allows you to provide a pre-defined list of completion choices for your nouns using the `validArgs` field.
+For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like:
+
+```go
+validArgs []string = { "pod", "node", "service", "replicationcontroller" }
+
+cmd := &cobra.Command{
+ Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
+ Short: "Display one or many resources",
+ Long: get_long,
+ Example: get_example,
+ Run: func(cmd *cobra.Command, args []string) {
+ err := RunGet(f, out, cmd, args)
+ util.CheckErr(err)
+ },
+ ValidArgs: validArgs,
+}
+```
+
+Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like
+
+```bash
+# kubectl get [tab][tab]
+node pod replicationcontroller service
+```
+
+### Plural form and shortcuts for nouns
+
+If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
+
+```go
+argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
+
+cmd := &cobra.Command{
+ ...
+ ValidArgs: validArgs,
+ ArgAliases: argAliases
+}
+```
+
+The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
+the completion algorithm if entered manually, e.g. in:
+
+```bash
+# kubectl get rc [tab][tab]
+backend frontend database
+```
+
+Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns
+in this example again instead of the replication controllers.
+
+### Dynamic completion of nouns
+
+In some cases it is not possible to provide a list of possible completions in advance. Instead, the list of completions must be determined at execution-time. Cobra provides two ways of defining such dynamic completion of nouns. Note that both these methods can be used along-side each other as long as they are not both used for the same command.
+
+**Note**: *Custom Completions written in Go* will automatically work for other shell-completion scripts (e.g., Fish shell), while *Custom Completions written in Bash* will only work for Bash shell-completion. It is therefore recommended to use *Custom Completions written in Go*.
+
+#### 1. Custom completions of nouns written in Go
+
+In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both.
+Simplified code from `helm status` looks like:
+
+```go
+cmd := &cobra.Command{
+ Use: "status RELEASE_NAME",
+ Short: "Display the status of the named release",
+ Long: status_long,
+ RunE: func(cmd *cobra.Command, args []string) {
+ RunGet(args[0])
+ },
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 0 {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+ return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
+}
+```
+Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster.
+Notice we put the `ValidArgsFunction` on the `status` subcommand. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like
+
+```bash
+# helm status [tab][tab]
+harbor notary rook thanos
+```
+You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp`
+```go
+// Indicates an error occurred and completions should be ignored.
+ShellCompDirectiveError
+// Indicates that the shell should not add a space after the completion,
+// even if there is a single completion provided.
+ShellCompDirectiveNoSpace
+// Indicates that the shell should not provide file completion even when
+// no completion is provided.
+// This currently does not work for zsh or bash < 4
+ShellCompDirectiveNoFileComp
+// Indicates that the shell will perform its default behavior after completions
+// have been provided (this implies !ShellCompDirectiveNoSpace && !ShellCompDirectiveNoFileComp).
+ShellCompDirectiveDefault
+```
+
+When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function.
+
+##### Debugging
+
+Cobra achieves dynamic completions written in Go through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly:
+```bash
+# helm __complete status har
+harbor
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
+```
+***Important:*** If the noun to complete is empty, you must pass an empty parameter to the `__complete` command:
+```bash
+# helm __complete status ""
+harbor
+notary
+rook
+thanos
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
+```
+Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code:
+```go
+// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE
+// is set to a file path) and optionally prints to stderr.
+cobra.CompDebug(msg string, printToStdErr bool) {
+cobra.CompDebugln(msg string, printToStdErr bool)
+
+// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE
+// is set to a file path) and to stderr.
+cobra.CompError(msg string)
+cobra.CompErrorln(msg string)
+```
+***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above.
+
+#### 2. Custom completions of nouns written in Bash
+
+This method allows you to inject bash functions into the completion script. Those bash functions are responsible for providing the completion choices for your own completions.
+
+Some more actual code that works in kubernetes:
+
+```bash
+const (
+ bash_completion_func = `__kubectl_parse_get()
+{
+ local kubectl_output out
+ if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then
+ out=($(echo "${kubectl_output}" | awk '{print $1}'))
+ COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) )
+ fi
+}
+
+__kubectl_get_resource()
+{
+ if [[ ${#nouns[@]} -eq 0 ]]; then
+ return 1
+ fi
+ __kubectl_parse_get ${nouns[${#nouns[@]} -1]}
+ if [[ $? -eq 0 ]]; then
+ return 0
+ fi
+}
+
+__kubectl_custom_func() {
+ case ${last_command} in
+ kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop)
+ __kubectl_get_resource
+ return
+ ;;
+ *)
+ ;;
+ esac
+}
+`)
+```
+
+And then I set that in my command definition:
+
+```go
+cmds := &cobra.Command{
+ Use: "kubectl",
+ Short: "kubectl controls the Kubernetes cluster manager",
+ Long: `kubectl controls the Kubernetes cluster manager.
+
+Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
+ Run: runHelp,
+ BashCompletionFunction: bash_completion_func,
+}
+```
+
+The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods!
+
+## Mark flags as required
+
+Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy.
+
+```go
+cmd.MarkFlagRequired("pod")
+cmd.MarkFlagRequired("container")
+```
+
+and you'll get something like
+
+```bash
+# kubectl exec [tab][tab][tab]
+-c --container= -p --pod=
+```
+
+# Specify valid filename extensions for flags that take a filename
+
+In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions.
+
+```go
+ annotations := []string{"json", "yaml", "yml"}
+ annotation := make(map[string][]string)
+ annotation[cobra.BashCompFilenameExt] = annotations
+
+ flag := &pflag.Flag{
+ Name: "filename",
+ Shorthand: "f",
+ Usage: usage,
+ Value: value,
+ DefValue: value.String(),
+ Annotations: annotation,
+ }
+ cmd.Flags().AddFlag(flag)
+```
+
+Now when you run a command with this filename flag you'll get something like
+
+```bash
+# kubectl create -f
+test/ example/ rpmbuild/
+hello.yml test.json
+```
+
+So while there are many other files in the CWD it only shows me subdirs and those with valid extensions.
+
+# Specify custom flag completion
+
+As for nouns, Cobra provides two ways of defining dynamic completion of flags. Note that both these methods can be used along-side each other as long as they are not both used for the same flag.
+
+**Note**: *Custom Completions written in Go* will automatically work for other shell-completion scripts (e.g., Fish shell), while *Custom Completions written in Bash* will only work for Bash shell-completion. It is therefore recommended to use *Custom Completions written in Go*.
+
+## 1. Custom completions of flags written in Go
+
+To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function in the following manner:
+
+```go
+flagName := "output"
+cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault
+})
+```
+Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so:
+
+```bash
+# helm status --output [tab][tab]
+json table yaml
+```
+
+### Debugging
+
+You can also easily debug your Go completion code for flags:
+```bash
+# helm __complete status --output ""
+json
+table
+yaml
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
+```
+***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned in the above section.
+
+## 2. Custom completions of flags written in Bash
+
+Alternatively, you can use bash code for flag custom completion. Similar to the filename
+completion and filtering using `cobra.BashCompFilenameExt`, you can specify
+a custom flag completion bash function with `cobra.BashCompCustom`:
+
+```go
+ annotation := make(map[string][]string)
+ annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"}
+
+ flag := &pflag.Flag{
+ Name: "namespace",
+ Usage: usage,
+ Annotations: annotation,
+ }
+ cmd.Flags().AddFlag(flag)
+```
+
+In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction`
+value, e.g.:
+
+```bash
+__kubectl_get_namespaces()
+{
+ local template
+ template="{{ range .items }}{{ .metadata.name }} {{ end }}"
+ local kubectl_out
+ if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then
+ COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) )
+ fi
+}
+```
+# Using bash aliases for commands
+
+You can also configure the `bash aliases` for the commands and they will also support completions.
+
+```bash
+alias aliasname=origcommand
+complete -o default -F __start_origcommand aliasname
+
+# and now when you run `aliasname` completion will make
+# suggestions as it did for `origcommand`.
+
+$) aliasname
+completion firstcommand secondcommand
+```
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
new file mode 100644
index 0000000..d01becc
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -0,0 +1,207 @@
+// Copyright © 2013 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Commands similar to git, go tools and other modern CLI tools
+// inspired by go, go-Commander, gh and subcommand
+
+package cobra
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "text/template"
+ "time"
+ "unicode"
+)
+
+var templateFuncs = template.FuncMap{
+ "trim": strings.TrimSpace,
+ "trimRightSpace": trimRightSpace,
+ "trimTrailingWhitespaces": trimRightSpace,
+ "appendIfNotPresent": appendIfNotPresent,
+ "rpad": rpad,
+ "gt": Gt,
+ "eq": Eq,
+}
+
+var initializers []func()
+
+// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing
+// to automatically enable in CLI tools.
+// Set this to true to enable it.
+var EnablePrefixMatching = false
+
+// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
+// To disable sorting, set it to false.
+var EnableCommandSorting = true
+
+// MousetrapHelpText enables an information splash screen on Windows
+// if the CLI is started from explorer.exe.
+// To disable the mousetrap, just set this variable to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapHelpText = `This is a command line tool.
+
+You need to open cmd.exe and run it from there.
+`
+
+// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows
+// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed.
+// To disable the mousetrap, just set MousetrapHelpText to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapDisplayDuration = 5 * time.Second
+
+// AddTemplateFunc adds a template function that's available to Usage and Help
+// template generation.
+func AddTemplateFunc(name string, tmplFunc interface{}) {
+ templateFuncs[name] = tmplFunc
+}
+
+// AddTemplateFuncs adds multiple template functions that are available to Usage and
+// Help template generation.
+func AddTemplateFuncs(tmplFuncs template.FuncMap) {
+ for k, v := range tmplFuncs {
+ templateFuncs[k] = v
+ }
+}
+
+// OnInitialize sets the passed functions to be run when each command's
+// Execute method is called.
+func OnInitialize(y ...func()) {
+ initializers = append(initializers, y...)
+}
+
+// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
+// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
+// ints and then compared.
+func Gt(a interface{}, b interface{}) bool {
+ var left, right int64
+ av := reflect.ValueOf(a)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ left = int64(av.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ left = av.Int()
+ case reflect.String:
+ left, _ = strconv.ParseInt(av.String(), 10, 64)
+ }
+
+ bv := reflect.ValueOf(b)
+
+ switch bv.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ right = int64(bv.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ right = bv.Int()
+ case reflect.String:
+ right, _ = strconv.ParseInt(bv.String(), 10, 64)
+ }
+
+ return left > right
+}
+
+// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
+func Eq(a interface{}, b interface{}) bool {
+ av := reflect.ValueOf(a)
+ bv := reflect.ValueOf(b)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ panic("Eq called on unsupported type")
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return av.Int() == bv.Int()
+ case reflect.String:
+ return av.String() == bv.String()
+ }
+ return false
+}
+
+func trimRightSpace(s string) string {
+ return strings.TrimRightFunc(s, unicode.IsSpace)
+}
+
+// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
+func appendIfNotPresent(s, stringToAppend string) string {
+ if strings.Contains(s, stringToAppend) {
+ return s
+ }
+ return s + " " + stringToAppend
+}
+
+// rpad adds padding to the right of a string.
+func rpad(s string, padding int) string {
+ template := fmt.Sprintf("%%-%ds", padding)
+ return fmt.Sprintf(template, s)
+}
+
+// tmpl executes the given template text on data, writing the result to w.
+func tmpl(w io.Writer, text string, data interface{}) error {
+ t := template.New("top")
+ t.Funcs(templateFuncs)
+ template.Must(t.Parse(text))
+ return t.Execute(w, data)
+}
+
+// ld compares two strings and returns the levenshtein distance between them.
+func ld(s, t string, ignoreCase bool) int {
+ if ignoreCase {
+ s = strings.ToLower(s)
+ t = strings.ToLower(t)
+ }
+ d := make([][]int, len(s)+1)
+ for i := range d {
+ d[i] = make([]int, len(t)+1)
+ }
+ for i := range d {
+ d[i][0] = i
+ }
+ for j := range d[0] {
+ d[0][j] = j
+ }
+ for j := 1; j <= len(t); j++ {
+ for i := 1; i <= len(s); i++ {
+ if s[i-1] == t[j-1] {
+ d[i][j] = d[i-1][j-1]
+ } else {
+ min := d[i-1][j]
+ if d[i][j-1] < min {
+ min = d[i][j-1]
+ }
+ if d[i-1][j-1] < min {
+ min = d[i-1][j-1]
+ }
+ d[i][j] = min + 1
+ }
+ }
+
+ }
+ return d[len(s)][len(t)]
+}
+
+func stringInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
new file mode 100644
index 0000000..88e6ed7
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -0,0 +1,1634 @@
+// Copyright © 2013 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
+// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
+package cobra
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+)
+
+// FParseErrWhitelist configures Flag parse errors to be ignored
+type FParseErrWhitelist flag.ParseErrorsWhitelist
+
+// Command is just that, a command for your application.
+// E.g. 'go run ...' - 'run' is the command. Cobra requires
+// you to define the usage and description as part of your command
+// definition to ensure usability.
+type Command struct {
+ // Use is the one-line usage message.
+ Use string
+
+ // Aliases is an array of aliases that can be used instead of the first word in Use.
+ Aliases []string
+
+ // SuggestFor is an array of command names for which this command will be suggested -
+ // similar to aliases but only suggests.
+ SuggestFor []string
+
+ // Short is the short description shown in the 'help' output.
+ Short string
+
+ // Long is the long message shown in the 'help ' output.
+ Long string
+
+ // Example is examples of how to use the command.
+ Example string
+
+ // ValidArgs is list of all valid non-flag arguments that are accepted in bash completions
+ ValidArgs []string
+ // ValidArgsFunction is an optional function that provides valid non-flag arguments for bash completion.
+ // It is a dynamic version of using ValidArgs.
+ // Only one of ValidArgs and ValidArgsFunction can be used for a command.
+ ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
+
+ // Expected arguments
+ Args PositionalArgs
+
+ // ArgAliases is List of aliases for ValidArgs.
+ // These are not suggested to the user in the bash completion,
+ // but accepted if entered manually.
+ ArgAliases []string
+
+ // BashCompletionFunction is custom functions used by the bash autocompletion generator.
+ BashCompletionFunction string
+
+ // Deprecated defines, if this command is deprecated and should print this string when used.
+ Deprecated string
+
+ // Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
+ Hidden bool
+
+ // Annotations are key/value pairs that can be used by applications to identify or
+ // group commands.
+ Annotations map[string]string
+
+ // Version defines the version for this command. If this value is non-empty and the command does not
+ // define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
+ // will print content of the "Version" variable. A shorthand "v" flag will also be added if the
+ // command does not define one.
+ Version string
+
+ // The *Run functions are executed in the following order:
+ // * PersistentPreRun()
+ // * PreRun()
+ // * Run()
+ // * PostRun()
+ // * PersistentPostRun()
+ // All functions get the same args, the arguments after the command name.
+ //
+ // PersistentPreRun: children of this command will inherit and execute.
+ PersistentPreRun func(cmd *Command, args []string)
+ // PersistentPreRunE: PersistentPreRun but returns an error.
+ PersistentPreRunE func(cmd *Command, args []string) error
+ // PreRun: children of this command will not inherit.
+ PreRun func(cmd *Command, args []string)
+ // PreRunE: PreRun but returns an error.
+ PreRunE func(cmd *Command, args []string) error
+ // Run: Typically the actual work function. Most commands will only implement this.
+ Run func(cmd *Command, args []string)
+ // RunE: Run but returns an error.
+ RunE func(cmd *Command, args []string) error
+ // PostRun: run after the Run command.
+ PostRun func(cmd *Command, args []string)
+ // PostRunE: PostRun but returns an error.
+ PostRunE func(cmd *Command, args []string) error
+ // PersistentPostRun: children of this command will inherit and execute after PostRun.
+ PersistentPostRun func(cmd *Command, args []string)
+ // PersistentPostRunE: PersistentPostRun but returns an error.
+ PersistentPostRunE func(cmd *Command, args []string) error
+
+ // SilenceErrors is an option to quiet errors down stream.
+ SilenceErrors bool
+
+ // SilenceUsage is an option to silence usage when an error occurs.
+ SilenceUsage bool
+
+ // DisableFlagParsing disables the flag parsing.
+ // If this is true all flags will be passed to the command as arguments.
+ DisableFlagParsing bool
+
+ // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
+ // will be printed by generating docs for this command.
+ DisableAutoGenTag bool
+
+ // DisableFlagsInUseLine will disable the addition of [flags] to the usage
+ // line of a command when printing help or generating docs
+ DisableFlagsInUseLine bool
+
+ // DisableSuggestions disables the suggestions based on Levenshtein distance
+ // that go along with 'unknown command' messages.
+ DisableSuggestions bool
+ // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
+ // Must be > 0.
+ SuggestionsMinimumDistance int
+
+ // TraverseChildren parses flags on all parents before executing child command.
+ TraverseChildren bool
+
+ // FParseErrWhitelist flag parse errors to be ignored
+ FParseErrWhitelist FParseErrWhitelist
+
+ ctx context.Context
+
+ // commands is the list of commands supported by this program.
+ commands []*Command
+ // parent is a parent command for this command.
+ parent *Command
+ // Max lengths of commands' string lengths for use in padding.
+ commandsMaxUseLen int
+ commandsMaxCommandPathLen int
+ commandsMaxNameLen int
+ // commandsAreSorted defines, if command slice are sorted or not.
+ commandsAreSorted bool
+ // commandCalledAs is the name or alias value used to call this command.
+ commandCalledAs struct {
+ name string
+ called bool
+ }
+
+ // args is actual args parsed from flags.
+ args []string
+ // flagErrorBuf contains all error messages from pflag.
+ flagErrorBuf *bytes.Buffer
+ // flags is full set of flags.
+ flags *flag.FlagSet
+ // pflags contains persistent flags.
+ pflags *flag.FlagSet
+ // lflags contains local flags.
+ lflags *flag.FlagSet
+ // iflags contains inherited flags.
+ iflags *flag.FlagSet
+ // parentsPflags is all persistent flags of cmd's parents.
+ parentsPflags *flag.FlagSet
+ // globNormFunc is the global normalization function
+ // that we can use on every pflag set and children commands
+ globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName
+
+ // usageFunc is usage func defined by user.
+ usageFunc func(*Command) error
+ // usageTemplate is usage template defined by user.
+ usageTemplate string
+ // flagErrorFunc is func defined by user and it's called when the parsing of
+ // flags returns an error.
+ flagErrorFunc func(*Command, error) error
+ // helpTemplate is help template defined by user.
+ helpTemplate string
+ // helpFunc is help func defined by user.
+ helpFunc func(*Command, []string)
+ // helpCommand is command with usage 'help'. If it's not defined by user,
+ // cobra uses default help command.
+ helpCommand *Command
+ // versionTemplate is the version template defined by user.
+ versionTemplate string
+
+ // inReader is a reader defined by the user that replaces stdin
+ inReader io.Reader
+ // outWriter is a writer defined by the user that replaces stdout
+ outWriter io.Writer
+ // errWriter is a writer defined by the user that replaces stderr
+ errWriter io.Writer
+}
+
+// Context returns underlying command context. If command wasn't
+// executed with ExecuteContext Context returns Background context.
+func (c *Command) Context() context.Context {
+ return c.ctx
+}
+
+// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
+// particularly useful when testing.
+func (c *Command) SetArgs(a []string) {
+ c.args = a
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+// Deprecated: Use SetOut and/or SetErr instead
+func (c *Command) SetOutput(output io.Writer) {
+ c.outWriter = output
+ c.errWriter = output
+}
+
+// SetOut sets the destination for usage messages.
+// If newOut is nil, os.Stdout is used.
+func (c *Command) SetOut(newOut io.Writer) {
+ c.outWriter = newOut
+}
+
+// SetErr sets the destination for error messages.
+// If newErr is nil, os.Stderr is used.
+func (c *Command) SetErr(newErr io.Writer) {
+ c.errWriter = newErr
+}
+
+// SetIn sets the source for input data
+// If newIn is nil, os.Stdin is used.
+func (c *Command) SetIn(newIn io.Reader) {
+ c.inReader = newIn
+}
+
+// SetUsageFunc sets usage function. Usage can be defined by application.
+func (c *Command) SetUsageFunc(f func(*Command) error) {
+ c.usageFunc = f
+}
+
+// SetUsageTemplate sets usage template. Can be defined by Application.
+func (c *Command) SetUsageTemplate(s string) {
+ c.usageTemplate = s
+}
+
+// SetFlagErrorFunc sets a function to generate an error when flag parsing
+// fails.
+func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) {
+ c.flagErrorFunc = f
+}
+
+// SetHelpFunc sets help function. Can be defined by Application.
+func (c *Command) SetHelpFunc(f func(*Command, []string)) {
+ c.helpFunc = f
+}
+
+// SetHelpCommand sets help command.
+func (c *Command) SetHelpCommand(cmd *Command) {
+ c.helpCommand = cmd
+}
+
+// SetHelpTemplate sets help template to be used. Application can use it to set custom template.
+func (c *Command) SetHelpTemplate(s string) {
+ c.helpTemplate = s
+}
+
+// SetVersionTemplate sets version template to be used. Application can use it to set custom template.
+func (c *Command) SetVersionTemplate(s string) {
+ c.versionTemplate = s
+}
+
+// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
+// The user should not have a cyclic dependency on commands.
+func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) {
+ c.Flags().SetNormalizeFunc(n)
+ c.PersistentFlags().SetNormalizeFunc(n)
+ c.globNormFunc = n
+
+ for _, command := range c.commands {
+ command.SetGlobalNormalizationFunc(n)
+ }
+}
+
+// OutOrStdout returns output to stdout.
+func (c *Command) OutOrStdout() io.Writer {
+ return c.getOut(os.Stdout)
+}
+
+// OutOrStderr returns output to stderr
+func (c *Command) OutOrStderr() io.Writer {
+ return c.getOut(os.Stderr)
+}
+
+// ErrOrStderr returns output to stderr
+func (c *Command) ErrOrStderr() io.Writer {
+ return c.getErr(os.Stderr)
+}
+
+// InOrStdin returns input to stdin
+func (c *Command) InOrStdin() io.Reader {
+ return c.getIn(os.Stdin)
+}
+
+func (c *Command) getOut(def io.Writer) io.Writer {
+ if c.outWriter != nil {
+ return c.outWriter
+ }
+ if c.HasParent() {
+ return c.parent.getOut(def)
+ }
+ return def
+}
+
+func (c *Command) getErr(def io.Writer) io.Writer {
+ if c.errWriter != nil {
+ return c.errWriter
+ }
+ if c.HasParent() {
+ return c.parent.getErr(def)
+ }
+ return def
+}
+
+func (c *Command) getIn(def io.Reader) io.Reader {
+ if c.inReader != nil {
+ return c.inReader
+ }
+ if c.HasParent() {
+ return c.parent.getIn(def)
+ }
+ return def
+}
+
+// UsageFunc returns either the function set by SetUsageFunc for this command
+// or a parent, or it returns a default usage function.
+func (c *Command) UsageFunc() (f func(*Command) error) {
+ if c.usageFunc != nil {
+ return c.usageFunc
+ }
+ if c.HasParent() {
+ return c.Parent().UsageFunc()
+ }
+ return func(c *Command) error {
+ c.mergePersistentFlags()
+ err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ return err
+ }
+}
+
+// Usage puts out the usage for the command.
+// Used when a user provides invalid input.
+// Can be defined by user by overriding UsageFunc.
+func (c *Command) Usage() error {
+ return c.UsageFunc()(c)
+}
+
+// HelpFunc returns either the function set by SetHelpFunc for this command
+// or a parent, or it returns a function with default help behavior.
+func (c *Command) HelpFunc() func(*Command, []string) {
+ if c.helpFunc != nil {
+ return c.helpFunc
+ }
+ if c.HasParent() {
+ return c.Parent().HelpFunc()
+ }
+ return func(c *Command, a []string) {
+ c.mergePersistentFlags()
+ // The help should be sent to stdout
+ // See https://github.com/spf13/cobra/issues/1002
+ err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ }
+}
+
+// Help puts out the help for the command.
+// Used when a user calls help [command].
+// Can be defined by user by overriding HelpFunc.
+func (c *Command) Help() error {
+ c.HelpFunc()(c, []string{})
+ return nil
+}
+
+// UsageString returns usage string.
+func (c *Command) UsageString() string {
+ // Storing normal writers
+ tmpOutput := c.outWriter
+ tmpErr := c.errWriter
+
+ bb := new(bytes.Buffer)
+ c.outWriter = bb
+ c.errWriter = bb
+
+ c.Usage()
+
+ // Setting things back to normal
+ c.outWriter = tmpOutput
+ c.errWriter = tmpErr
+
+ return bb.String()
+}
+
+// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
+// command or a parent, or it returns a function which returns the original
+// error.
+func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
+ if c.flagErrorFunc != nil {
+ return c.flagErrorFunc
+ }
+
+ if c.HasParent() {
+ return c.parent.FlagErrorFunc()
+ }
+ return func(c *Command, err error) error {
+ return err
+ }
+}
+
+var minUsagePadding = 25
+
+// UsagePadding return padding for the usage.
+func (c *Command) UsagePadding() int {
+ if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen {
+ return minUsagePadding
+ }
+ return c.parent.commandsMaxUseLen
+}
+
+var minCommandPathPadding = 11
+
+// CommandPathPadding return padding for the command path.
+func (c *Command) CommandPathPadding() int {
+ if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen {
+ return minCommandPathPadding
+ }
+ return c.parent.commandsMaxCommandPathLen
+}
+
+var minNamePadding = 11
+
+// NamePadding returns padding for the name.
+func (c *Command) NamePadding() int {
+ if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen {
+ return minNamePadding
+ }
+ return c.parent.commandsMaxNameLen
+}
+
+// UsageTemplate returns usage template for the command.
+func (c *Command) UsageTemplate() string {
+ if c.usageTemplate != "" {
+ return c.usageTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.UsageTemplate()
+ }
+ return `Usage:{{if .Runnable}}
+ {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
+ {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
+
+Aliases:
+ {{.NameAndAliases}}{{end}}{{if .HasExample}}
+
+Examples:
+{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
+
+Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
+
+Flags:
+{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
+
+Global Flags:
+{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
+
+Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
+ {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
+
+Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
+`
+}
+
+// HelpTemplate return help template for the command.
+func (c *Command) HelpTemplate() string {
+ if c.helpTemplate != "" {
+ return c.helpTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.HelpTemplate()
+ }
+ return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
+
+{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
+}
+
+// VersionTemplate return version template for the command.
+func (c *Command) VersionTemplate() string {
+ if c.versionTemplate != "" {
+ return c.versionTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.VersionTemplate()
+ }
+ return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
+`
+}
+
+func hasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ flag := fs.Lookup(name)
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ if len(name) == 0 {
+ return false
+ }
+
+ flag := fs.ShorthandLookup(name[:1])
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func stripFlags(args []string, c *Command) []string {
+ if len(args) == 0 {
+ return args
+ }
+ c.mergePersistentFlags()
+
+ commands := []string{}
+ flags := c.Flags()
+
+Loop:
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ switch {
+ case s == "--":
+ // "--" terminates the flags
+ break Loop
+ case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
+ // If '--flag arg' then
+ // delete arg from args.
+ fallthrough // (do the same as below)
+ case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
+ // If '-f arg' then
+ // delete 'arg' from args or break the loop if len(args) <= 1.
+ if len(args) <= 1 {
+ break Loop
+ } else {
+ args = args[1:]
+ continue
+ }
+ case s != "" && !strings.HasPrefix(s, "-"):
+ commands = append(commands, s)
+ }
+ }
+
+ return commands
+}
+
+// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like
+// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]).
+func argsMinusFirstX(args []string, x string) []string {
+ for i, y := range args {
+ if x == y {
+ ret := []string{}
+ ret = append(ret, args[:i]...)
+ ret = append(ret, args[i+1:]...)
+ return ret
+ }
+ }
+ return args
+}
+
+func isFlagArg(arg string) bool {
+ return ((len(arg) >= 3 && arg[1] == '-') ||
+ (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-'))
+}
+
+// Find the target command given the args and command tree
+// Meant to be run on the highest node. Only searches down.
+func (c *Command) Find(args []string) (*Command, []string, error) {
+ var innerfind func(*Command, []string) (*Command, []string)
+
+ innerfind = func(c *Command, innerArgs []string) (*Command, []string) {
+ argsWOflags := stripFlags(innerArgs, c)
+ if len(argsWOflags) == 0 {
+ return c, innerArgs
+ }
+ nextSubCmd := argsWOflags[0]
+
+ cmd := c.findNext(nextSubCmd)
+ if cmd != nil {
+ return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd))
+ }
+ return c, innerArgs
+ }
+
+ commandFound, a := innerfind(c, args)
+ if commandFound.Args == nil {
+ return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound))
+ }
+ return commandFound, a, nil
+}
+
+func (c *Command) findSuggestions(arg string) string {
+ if c.DisableSuggestions {
+ return ""
+ }
+ if c.SuggestionsMinimumDistance <= 0 {
+ c.SuggestionsMinimumDistance = 2
+ }
+ suggestionsString := ""
+ if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 {
+ suggestionsString += "\n\nDid you mean this?\n"
+ for _, s := range suggestions {
+ suggestionsString += fmt.Sprintf("\t%v\n", s)
+ }
+ }
+ return suggestionsString
+}
+
+func (c *Command) findNext(next string) *Command {
+ matches := make([]*Command, 0)
+ for _, cmd := range c.commands {
+ if cmd.Name() == next || cmd.HasAlias(next) {
+ cmd.commandCalledAs.name = next
+ return cmd
+ }
+ if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) {
+ matches = append(matches, cmd)
+ }
+ }
+
+ if len(matches) == 1 {
+ return matches[0]
+ }
+
+ return nil
+}
+
+// Traverse the command tree to find the command, and parse args for
+// each parent.
+func (c *Command) Traverse(args []string) (*Command, []string, error) {
+ flags := []string{}
+ inFlag := false
+
+ for i, arg := range args {
+ switch {
+ // A long flag with a space separated value
+ case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="):
+ // TODO: this isn't quite right, we should really check ahead for 'true' or 'false'
+ inFlag = !hasNoOptDefVal(arg[2:], c.Flags())
+ flags = append(flags, arg)
+ continue
+ // A short flag with a space separated value
+ case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()):
+ inFlag = true
+ flags = append(flags, arg)
+ continue
+ // The value for a flag
+ case inFlag:
+ inFlag = false
+ flags = append(flags, arg)
+ continue
+ // A flag without a value, or with an `=` separated value
+ case isFlagArg(arg):
+ flags = append(flags, arg)
+ continue
+ }
+
+ cmd := c.findNext(arg)
+ if cmd == nil {
+ return c, args, nil
+ }
+
+ if err := c.ParseFlags(flags); err != nil {
+ return nil, args, err
+ }
+ return cmd.Traverse(args[i+1:])
+ }
+ return c, args, nil
+}
+
+// SuggestionsFor provides suggestions for the typedName.
+func (c *Command) SuggestionsFor(typedName string) []string {
+ suggestions := []string{}
+ for _, cmd := range c.commands {
+ if cmd.IsAvailableCommand() {
+ levenshteinDistance := ld(typedName, cmd.Name(), true)
+ suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance
+ suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName))
+ if suggestByLevenshtein || suggestByPrefix {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ for _, explicitSuggestion := range cmd.SuggestFor {
+ if strings.EqualFold(typedName, explicitSuggestion) {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ }
+ }
+ }
+ return suggestions
+}
+
+// VisitParents visits all parents of the command and invokes fn on each parent.
+func (c *Command) VisitParents(fn func(*Command)) {
+ if c.HasParent() {
+ fn(c.Parent())
+ c.Parent().VisitParents(fn)
+ }
+}
+
+// Root finds root command.
+func (c *Command) Root() *Command {
+ if c.HasParent() {
+ return c.Parent().Root()
+ }
+ return c
+}
+
+// ArgsLenAtDash will return the length of c.Flags().Args at the moment
+// when a -- was found during args parsing.
+func (c *Command) ArgsLenAtDash() int {
+ return c.Flags().ArgsLenAtDash()
+}
+
+func (c *Command) execute(a []string) (err error) {
+ if c == nil {
+ return fmt.Errorf("Called Execute() on a nil Command")
+ }
+
+ if len(c.Deprecated) > 0 {
+ c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated)
+ }
+
+ // initialize help and version flag at the last point possible to allow for user
+ // overriding
+ c.InitDefaultHelpFlag()
+ c.InitDefaultVersionFlag()
+
+ err = c.ParseFlags(a)
+ if err != nil {
+ return c.FlagErrorFunc()(c, err)
+ }
+
+ // If help is called, regardless of other flags, return we want help.
+ // Also say we need help if the command isn't runnable.
+ helpVal, err := c.Flags().GetBool("help")
+ if err != nil {
+ // should be impossible to get here as we always declare a help
+ // flag in InitDefaultHelpFlag()
+ c.Println("\"help\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+
+ if helpVal {
+ return flag.ErrHelp
+ }
+
+ // for back-compat, only add version flag behavior if version is defined
+ if c.Version != "" {
+ versionVal, err := c.Flags().GetBool("version")
+ if err != nil {
+ c.Println("\"version\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+ if versionVal {
+ err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ return err
+ }
+ }
+
+ if !c.Runnable() {
+ return flag.ErrHelp
+ }
+
+ c.preRun()
+
+ argWoFlags := c.Flags().Args()
+ if c.DisableFlagParsing {
+ argWoFlags = a
+ }
+
+ if err := c.ValidateArgs(argWoFlags); err != nil {
+ return err
+ }
+
+ for p := c; p != nil; p = p.Parent() {
+ if p.PersistentPreRunE != nil {
+ if err := p.PersistentPreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ break
+ } else if p.PersistentPreRun != nil {
+ p.PersistentPreRun(c, argWoFlags)
+ break
+ }
+ }
+ if c.PreRunE != nil {
+ if err := c.PreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PreRun != nil {
+ c.PreRun(c, argWoFlags)
+ }
+
+ if err := c.validateRequiredFlags(); err != nil {
+ return err
+ }
+ if c.RunE != nil {
+ if err := c.RunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else {
+ c.Run(c, argWoFlags)
+ }
+ if c.PostRunE != nil {
+ if err := c.PostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PostRun != nil {
+ c.PostRun(c, argWoFlags)
+ }
+ for p := c; p != nil; p = p.Parent() {
+ if p.PersistentPostRunE != nil {
+ if err := p.PersistentPostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ break
+ } else if p.PersistentPostRun != nil {
+ p.PersistentPostRun(c, argWoFlags)
+ break
+ }
+ }
+
+ return nil
+}
+
+func (c *Command) preRun() {
+ for _, x := range initializers {
+ x()
+ }
+}
+
+// ExecuteContext is the same as Execute(), but sets the ctx on the command.
+// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle functions.
+func (c *Command) ExecuteContext(ctx context.Context) error {
+ c.ctx = ctx
+ return c.Execute()
+}
+
+// Execute uses the args (os.Args[1:] by default)
+// and run through the command tree finding appropriate matches
+// for commands and then corresponding flags.
+func (c *Command) Execute() error {
+ _, err := c.ExecuteC()
+ return err
+}
+
+// ExecuteC executes the command.
+func (c *Command) ExecuteC() (cmd *Command, err error) {
+ if c.ctx == nil {
+ c.ctx = context.Background()
+ }
+
+ // Regardless of what command execute is called on, run on Root only
+ if c.HasParent() {
+ return c.Root().ExecuteC()
+ }
+
+ // windows hook
+ if preExecHookFn != nil {
+ preExecHookFn(c)
+ }
+
+ // initialize help as the last point possible to allow for user
+ // overriding
+ c.InitDefaultHelpCmd()
+
+ args := c.args
+
+ // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
+ if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
+ args = os.Args[1:]
+ }
+
+ // initialize the hidden command to be used for bash completion
+ c.initCompleteCmd(args)
+
+ var flags []string
+ if c.TraverseChildren {
+ cmd, flags, err = c.Traverse(args)
+ } else {
+ cmd, flags, err = c.Find(args)
+ }
+ if err != nil {
+ // If found parse to a subcommand and then failed, talk about the subcommand
+ if cmd != nil {
+ c = cmd
+ }
+ if !c.SilenceErrors {
+ c.Println("Error:", err.Error())
+ c.Printf("Run '%v --help' for usage.\n", c.CommandPath())
+ }
+ return c, err
+ }
+
+ cmd.commandCalledAs.called = true
+ if cmd.commandCalledAs.name == "" {
+ cmd.commandCalledAs.name = cmd.Name()
+ }
+
+ // We have to pass global context to children command
+ // if context is present on the parent command.
+ if cmd.ctx == nil {
+ cmd.ctx = c.ctx
+ }
+
+ err = cmd.execute(flags)
+ if err != nil {
+ // Always show help if requested, even if SilenceErrors is in
+ // effect
+ if err == flag.ErrHelp {
+ cmd.HelpFunc()(cmd, args)
+ return cmd, nil
+ }
+
+ // If root command has SilentErrors flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceErrors && !c.SilenceErrors {
+ c.Println("Error:", err.Error())
+ }
+
+ // If root command has SilentUsage flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceUsage && !c.SilenceUsage {
+ c.Println(cmd.UsageString())
+ }
+ }
+ return cmd, err
+}
+
+func (c *Command) ValidateArgs(args []string) error {
+ if c.Args == nil {
+ return nil
+ }
+ return c.Args(c, args)
+}
+
+func (c *Command) validateRequiredFlags() error {
+ flags := c.Flags()
+ missingFlagNames := []string{}
+ flags.VisitAll(func(pflag *flag.Flag) {
+ requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag]
+ if !found {
+ return
+ }
+ if (requiredAnnotation[0] == "true") && !pflag.Changed {
+ missingFlagNames = append(missingFlagNames, pflag.Name)
+ }
+ })
+
+ if len(missingFlagNames) > 0 {
+ return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`))
+ }
+ return nil
+}
+
+// InitDefaultHelpFlag adds default help flag to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help flag, it will do nothing.
+func (c *Command) InitDefaultHelpFlag() {
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("help") == nil {
+ usage := "help for "
+ if c.Name() == "" {
+ usage += "this command"
+ } else {
+ usage += c.Name()
+ }
+ c.Flags().BoolP("help", "h", false, usage)
+ }
+}
+
+// InitDefaultVersionFlag adds default version flag to c.
+// It is called automatically by executing the c.
+// If c already has a version flag, it will do nothing.
+// If c.Version is empty, it will do nothing.
+func (c *Command) InitDefaultVersionFlag() {
+ if c.Version == "" {
+ return
+ }
+
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("version") == nil {
+ usage := "version for "
+ if c.Name() == "" {
+ usage += "this command"
+ } else {
+ usage += c.Name()
+ }
+ if c.Flags().ShorthandLookup("v") == nil {
+ c.Flags().BoolP("version", "v", false, usage)
+ } else {
+ c.Flags().Bool("version", false, usage)
+ }
+ }
+}
+
+// InitDefaultHelpCmd adds default help command to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help command or c has no subcommands, it will do nothing.
+func (c *Command) InitDefaultHelpCmd() {
+ if !c.HasSubCommands() {
+ return
+ }
+
+ if c.helpCommand == nil {
+ c.helpCommand = &Command{
+ Use: "help [command]",
+ Short: "Help about any command",
+ Long: `Help provides help for any command in the application.
+Simply type ` + c.Name() + ` help [path to command] for full details.`,
+
+ Run: func(c *Command, args []string) {
+ cmd, _, e := c.Root().Find(args)
+ if cmd == nil || e != nil {
+ c.Printf("Unknown help topic %#q\n", args)
+ c.Root().Usage()
+ } else {
+ cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
+ cmd.Help()
+ }
+ },
+ }
+ }
+ c.RemoveCommand(c.helpCommand)
+ c.AddCommand(c.helpCommand)
+}
+
+// ResetCommands delete parent, subcommand and help command from c.
+func (c *Command) ResetCommands() {
+ c.parent = nil
+ c.commands = nil
+ c.helpCommand = nil
+ c.parentsPflags = nil
+}
+
+// Sorts commands by their names.
+type commandSorterByName []*Command
+
+func (c commandSorterByName) Len() int { return len(c) }
+func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
+func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() }
+
+// Commands returns a sorted slice of child commands.
+func (c *Command) Commands() []*Command {
+ // do not sort commands if it already sorted or sorting was disabled
+ if EnableCommandSorting && !c.commandsAreSorted {
+ sort.Sort(commandSorterByName(c.commands))
+ c.commandsAreSorted = true
+ }
+ return c.commands
+}
+
+// AddCommand adds one or more commands to this parent command.
+func (c *Command) AddCommand(cmds ...*Command) {
+ for i, x := range cmds {
+ if cmds[i] == c {
+ panic("Command can't be a child of itself")
+ }
+ cmds[i].parent = c
+ // update max lengths
+ usageLen := len(x.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(x.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(x.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ // If global normalization function exists, update all children
+ if c.globNormFunc != nil {
+ x.SetGlobalNormalizationFunc(c.globNormFunc)
+ }
+ c.commands = append(c.commands, x)
+ c.commandsAreSorted = false
+ }
+}
+
+// RemoveCommand removes one or more commands from a parent command.
+func (c *Command) RemoveCommand(cmds ...*Command) {
+ commands := []*Command{}
+main:
+ for _, command := range c.commands {
+ for _, cmd := range cmds {
+ if command == cmd {
+ command.parent = nil
+ continue main
+ }
+ }
+ commands = append(commands, command)
+ }
+ c.commands = commands
+ // recompute all lengths
+ c.commandsMaxUseLen = 0
+ c.commandsMaxCommandPathLen = 0
+ c.commandsMaxNameLen = 0
+ for _, command := range c.commands {
+ usageLen := len(command.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(command.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(command.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ }
+}
+
+// Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
+func (c *Command) Print(i ...interface{}) {
+ fmt.Fprint(c.OutOrStderr(), i...)
+}
+
+// Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
+func (c *Command) Println(i ...interface{}) {
+ c.Print(fmt.Sprintln(i...))
+}
+
+// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
+func (c *Command) Printf(format string, i ...interface{}) {
+ c.Print(fmt.Sprintf(format, i...))
+}
+
+// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErr(i ...interface{}) {
+ fmt.Fprint(c.ErrOrStderr(), i...)
+}
+
+// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErrln(i ...interface{}) {
+ c.Print(fmt.Sprintln(i...))
+}
+
+// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErrf(format string, i ...interface{}) {
+ c.Print(fmt.Sprintf(format, i...))
+}
+
+// CommandPath returns the full path to this command.
+func (c *Command) CommandPath() string {
+ if c.HasParent() {
+ return c.Parent().CommandPath() + " " + c.Name()
+ }
+ return c.Name()
+}
+
+// UseLine puts out the full usage for a given command (including parents).
+func (c *Command) UseLine() string {
+ var useline string
+ if c.HasParent() {
+ useline = c.parent.CommandPath() + " " + c.Use
+ } else {
+ useline = c.Use
+ }
+ if c.DisableFlagsInUseLine {
+ return useline
+ }
+ if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") {
+ useline += " [flags]"
+ }
+ return useline
+}
+
+// DebugFlags used to determine which flags have been assigned to which commands
+// and which persist.
+func (c *Command) DebugFlags() {
+ c.Println("DebugFlags called on", c.Name())
+ var debugflags func(*Command)
+
+ debugflags = func(x *Command) {
+ if x.HasFlags() || x.HasPersistentFlags() {
+ c.Println(x.Name())
+ }
+ if x.HasFlags() {
+ x.flags.VisitAll(func(f *flag.Flag) {
+ if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]")
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]")
+ }
+ })
+ }
+ if x.HasPersistentFlags() {
+ x.pflags.VisitAll(func(f *flag.Flag) {
+ if x.HasFlags() {
+ if x.flags.Lookup(f.Name) == nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ })
+ }
+ c.Println(x.flagErrorBuf)
+ if x.HasSubCommands() {
+ for _, y := range x.commands {
+ debugflags(y)
+ }
+ }
+ }
+
+ debugflags(c)
+}
+
+// Name returns the command's name: the first word in the use line.
+func (c *Command) Name() string {
+ name := c.Use
+ i := strings.Index(name, " ")
+ if i >= 0 {
+ name = name[:i]
+ }
+ return name
+}
+
+// HasAlias determines if a given string is an alias of the command.
+func (c *Command) HasAlias(s string) bool {
+ for _, a := range c.Aliases {
+ if a == s {
+ return true
+ }
+ }
+ return false
+}
+
+// CalledAs returns the command name or alias that was used to invoke
+// this command or an empty string if the command has not been called.
+func (c *Command) CalledAs() string {
+ if c.commandCalledAs.called {
+ return c.commandCalledAs.name
+ }
+ return ""
+}
+
+// hasNameOrAliasPrefix returns true if the Name or any of aliases start
+// with prefix
+func (c *Command) hasNameOrAliasPrefix(prefix string) bool {
+ if strings.HasPrefix(c.Name(), prefix) {
+ c.commandCalledAs.name = c.Name()
+ return true
+ }
+ for _, alias := range c.Aliases {
+ if strings.HasPrefix(alias, prefix) {
+ c.commandCalledAs.name = alias
+ return true
+ }
+ }
+ return false
+}
+
+// NameAndAliases returns a list of the command name and all aliases
+func (c *Command) NameAndAliases() string {
+ return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ")
+}
+
+// HasExample determines if the command has example.
+func (c *Command) HasExample() bool {
+ return len(c.Example) > 0
+}
+
+// Runnable determines if the command is itself runnable.
+func (c *Command) Runnable() bool {
+ return c.Run != nil || c.RunE != nil
+}
+
+// HasSubCommands determines if the command has children commands.
+func (c *Command) HasSubCommands() bool {
+ return len(c.commands) > 0
+}
+
+// IsAvailableCommand determines if a command is available as a non-help command
+// (this includes all non deprecated/hidden commands).
+func (c *Command) IsAvailableCommand() bool {
+ if len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ if c.HasParent() && c.Parent().helpCommand == c {
+ return false
+ }
+
+ if c.Runnable() || c.HasAvailableSubCommands() {
+ return true
+ }
+
+ return false
+}
+
+// IsAdditionalHelpTopicCommand determines if a command is an additional
+// help topic command; additional help topic command is determined by the
+// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
+// are runnable/hidden/deprecated.
+// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
+func (c *Command) IsAdditionalHelpTopicCommand() bool {
+ // if a command is runnable, deprecated, or hidden it is not a 'help' command
+ if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ // if any non-help sub commands are found, the command is not a 'help' command
+ for _, sub := range c.commands {
+ if !sub.IsAdditionalHelpTopicCommand() {
+ return false
+ }
+ }
+
+ // the command either has no sub commands, or no non-help sub commands
+ return true
+}
+
+// HasHelpSubCommands determines if a command has any available 'help' sub commands
+// that need to be shown in the usage/help default template under 'additional help
+// topics'.
+func (c *Command) HasHelpSubCommands() bool {
+ // return true on the first found available 'help' sub command
+ for _, sub := range c.commands {
+ if sub.IsAdditionalHelpTopicCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available 'help' sub commands
+ return false
+}
+
+// HasAvailableSubCommands determines if a command has available sub commands that
+// need to be shown in the usage/help default template under 'available commands'.
+func (c *Command) HasAvailableSubCommands() bool {
+ // return true on the first found available (non deprecated/help/hidden)
+ // sub command
+ for _, sub := range c.commands {
+ if sub.IsAvailableCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available (non deprecated/help/hidden)
+ // sub commands
+ return false
+}
+
+// HasParent determines if the command is a child command.
+func (c *Command) HasParent() bool {
+ return c.parent != nil
+}
+
+// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
+func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
+ return c.globNormFunc
+}
+
+// Flags returns the complete FlagSet that applies
+// to this command (local and persistent declared here and by all parents).
+func (c *Command) Flags() *flag.FlagSet {
+ if c.flags == nil {
+ c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.flags.SetOutput(c.flagErrorBuf)
+ }
+
+ return c.flags
+}
+
+// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
+func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
+ persistentFlags := c.PersistentFlags()
+
+ out := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.LocalFlags().VisitAll(func(f *flag.Flag) {
+ if persistentFlags.Lookup(f.Name) == nil {
+ out.AddFlag(f)
+ }
+ })
+ return out
+}
+
+// LocalFlags returns the local FlagSet specifically set in the current command.
+func (c *Command) LocalFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.lflags == nil {
+ c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.lflags.SetOutput(c.flagErrorBuf)
+ }
+ c.lflags.SortFlags = c.Flags().SortFlags
+ if c.globNormFunc != nil {
+ c.lflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ addToLocal := func(f *flag.Flag) {
+ if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil {
+ c.lflags.AddFlag(f)
+ }
+ }
+ c.Flags().VisitAll(addToLocal)
+ c.PersistentFlags().VisitAll(addToLocal)
+ return c.lflags
+}
+
+// InheritedFlags returns all flags which were inherited from parent commands.
+func (c *Command) InheritedFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.iflags == nil {
+ c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.iflags.SetOutput(c.flagErrorBuf)
+ }
+
+ local := c.LocalFlags()
+ if c.globNormFunc != nil {
+ c.iflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.parentsPflags.VisitAll(func(f *flag.Flag) {
+ if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil {
+ c.iflags.AddFlag(f)
+ }
+ })
+ return c.iflags
+}
+
+// NonInheritedFlags returns all flags which were not inherited from parent commands.
+func (c *Command) NonInheritedFlags() *flag.FlagSet {
+ return c.LocalFlags()
+}
+
+// PersistentFlags returns the persistent FlagSet specifically set in the current command.
+func (c *Command) PersistentFlags() *flag.FlagSet {
+ if c.pflags == nil {
+ c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.pflags.SetOutput(c.flagErrorBuf)
+ }
+ return c.pflags
+}
+
+// ResetFlags deletes all flags from command.
+func (c *Command) ResetFlags() {
+ c.flagErrorBuf = new(bytes.Buffer)
+ c.flagErrorBuf.Reset()
+ c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.flags.SetOutput(c.flagErrorBuf)
+ c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.pflags.SetOutput(c.flagErrorBuf)
+
+ c.lflags = nil
+ c.iflags = nil
+ c.parentsPflags = nil
+}
+
+// HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
+func (c *Command) HasFlags() bool {
+ return c.Flags().HasFlags()
+}
+
+// HasPersistentFlags checks if the command contains persistent flags.
+func (c *Command) HasPersistentFlags() bool {
+ return c.PersistentFlags().HasFlags()
+}
+
+// HasLocalFlags checks if the command has flags specifically declared locally.
+func (c *Command) HasLocalFlags() bool {
+ return c.LocalFlags().HasFlags()
+}
+
+// HasInheritedFlags checks if the command has flags inherited from its parent command.
+func (c *Command) HasInheritedFlags() bool {
+ return c.InheritedFlags().HasFlags()
+}
+
+// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
+// structure) which are not hidden or deprecated.
+func (c *Command) HasAvailableFlags() bool {
+ return c.Flags().HasAvailableFlags()
+}
+
+// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
+func (c *Command) HasAvailablePersistentFlags() bool {
+ return c.PersistentFlags().HasAvailableFlags()
+}
+
+// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
+// or deprecated.
+func (c *Command) HasAvailableLocalFlags() bool {
+ return c.LocalFlags().HasAvailableFlags()
+}
+
+// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
+// not hidden or deprecated.
+func (c *Command) HasAvailableInheritedFlags() bool {
+ return c.InheritedFlags().HasAvailableFlags()
+}
+
+// Flag climbs up the command tree looking for matching flag.
+func (c *Command) Flag(name string) (flag *flag.Flag) {
+ flag = c.Flags().Lookup(name)
+
+ if flag == nil {
+ flag = c.persistentFlag(name)
+ }
+
+ return
+}
+
+// Recursively find matching persistent flag.
+func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
+ if c.HasPersistentFlags() {
+ flag = c.PersistentFlags().Lookup(name)
+ }
+
+ if flag == nil {
+ c.updateParentsPflags()
+ flag = c.parentsPflags.Lookup(name)
+ }
+ return
+}
+
+// ParseFlags parses persistent flag tree and local flags.
+func (c *Command) ParseFlags(args []string) error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ beforeErrorBufLen := c.flagErrorBuf.Len()
+ c.mergePersistentFlags()
+
+ // do it here after merging all flags and just before parse
+ c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
+
+ err := c.Flags().Parse(args)
+ // Print warnings if they occurred (e.g. deprecated flag messages).
+ if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil {
+ c.Print(c.flagErrorBuf.String())
+ }
+
+ return err
+}
+
+// Parent returns a commands parent command.
+func (c *Command) Parent() *Command {
+ return c.parent
+}
+
+// mergePersistentFlags merges c.PersistentFlags() to c.Flags()
+// and adds missing persistent flags of all parents.
+func (c *Command) mergePersistentFlags() {
+ c.updateParentsPflags()
+ c.Flags().AddFlagSet(c.PersistentFlags())
+ c.Flags().AddFlagSet(c.parentsPflags)
+}
+
+// updateParentsPflags updates c.parentsPflags by adding
+// new persistent flags of all parents.
+// If c.parentsPflags == nil, it makes new.
+func (c *Command) updateParentsPflags() {
+ if c.parentsPflags == nil {
+ c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.parentsPflags.SetOutput(c.flagErrorBuf)
+ c.parentsPflags.SortFlags = false
+ }
+
+ if c.globNormFunc != nil {
+ c.parentsPflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.Root().PersistentFlags().AddFlagSet(flag.CommandLine)
+
+ c.VisitParents(func(parent *Command) {
+ c.parentsPflags.AddFlagSet(parent.PersistentFlags())
+ })
+}
diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go
new file mode 100644
index 0000000..6159c1c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_notwin.go
@@ -0,0 +1,5 @@
+// +build !windows
+
+package cobra
+
+var preExecHookFn func(*Command)
diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go
new file mode 100644
index 0000000..8768b17
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_win.go
@@ -0,0 +1,26 @@
+// +build windows
+
+package cobra
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/inconshreveable/mousetrap"
+)
+
+var preExecHookFn = preExecHook
+
+func preExecHook(c *Command) {
+ if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
+ c.Print(MousetrapHelpText)
+ if MousetrapDisplayDuration > 0 {
+ time.Sleep(MousetrapDisplayDuration)
+ } else {
+ c.Println("Press return to continue...")
+ fmt.Scanln()
+ }
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/custom_completions.go b/vendor/github.com/spf13/cobra/custom_completions.go
new file mode 100644
index 0000000..ba57327
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/custom_completions.go
@@ -0,0 +1,384 @@
+package cobra
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/spf13/pflag"
+)
+
+const (
+ // ShellCompRequestCmd is the name of the hidden command that is used to request
+ // completion results from the program. It is used by the shell completion scripts.
+ ShellCompRequestCmd = "__complete"
+ // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request
+ // completion results without their description. It is used by the shell completion scripts.
+ ShellCompNoDescRequestCmd = "__completeNoDesc"
+)
+
+// Global map of flag completion functions.
+var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){}
+
+// ShellCompDirective is a bit map representing the different behaviors the shell
+// can be instructed to have once completions have been provided.
+type ShellCompDirective int
+
+const (
+ // ShellCompDirectiveError indicates an error occurred and completions should be ignored.
+ ShellCompDirectiveError ShellCompDirective = 1 << iota
+
+ // ShellCompDirectiveNoSpace indicates that the shell should not add a space
+ // after the completion even if there is a single completion provided.
+ ShellCompDirectiveNoSpace
+
+ // ShellCompDirectiveNoFileComp indicates that the shell should not provide
+ // file completion even when no completion is provided.
+ // This currently does not work for zsh or bash < 4
+ ShellCompDirectiveNoFileComp
+
+ // ShellCompDirectiveDefault indicates to let the shell perform its default
+ // behavior after completions have been provided.
+ ShellCompDirectiveDefault ShellCompDirective = 0
+)
+
+// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
+func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error {
+ flag := c.Flag(flagName)
+ if flag == nil {
+ return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName)
+ }
+ if _, exists := flagCompletionFunctions[flag]; exists {
+ return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName)
+ }
+ flagCompletionFunctions[flag] = f
+ return nil
+}
+
+// Returns a string listing the different directive enabled in the specified parameter
+func (d ShellCompDirective) string() string {
+ var directives []string
+ if d&ShellCompDirectiveError != 0 {
+ directives = append(directives, "ShellCompDirectiveError")
+ }
+ if d&ShellCompDirectiveNoSpace != 0 {
+ directives = append(directives, "ShellCompDirectiveNoSpace")
+ }
+ if d&ShellCompDirectiveNoFileComp != 0 {
+ directives = append(directives, "ShellCompDirectiveNoFileComp")
+ }
+ if len(directives) == 0 {
+ directives = append(directives, "ShellCompDirectiveDefault")
+ }
+
+ if d > ShellCompDirectiveError+ShellCompDirectiveNoSpace+ShellCompDirectiveNoFileComp {
+ return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d)
+ }
+ return strings.Join(directives, ", ")
+}
+
+// Adds a special hidden command that can be used to request custom completions.
+func (c *Command) initCompleteCmd(args []string) {
+ completeCmd := &Command{
+ Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd),
+ Aliases: []string{ShellCompNoDescRequestCmd},
+ DisableFlagsInUseLine: true,
+ Hidden: true,
+ DisableFlagParsing: true,
+ Args: MinimumNArgs(1),
+ Short: "Request shell completion choices for the specified command-line",
+ Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s",
+ "to request completion choices for the specified command-line.", ShellCompRequestCmd),
+ Run: func(cmd *Command, args []string) {
+ finalCmd, completions, directive, err := cmd.getCompletions(args)
+ if err != nil {
+ CompErrorln(err.Error())
+ // Keep going for multiple reasons:
+ // 1- There could be some valid completions even though there was an error
+ // 2- Even without completions, we need to print the directive
+ }
+
+ noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd)
+ for _, comp := range completions {
+ if noDescriptions {
+ // Remove any description that may be included following a tab character.
+ comp = strings.Split(comp, "\t")[0]
+ }
+ // Print each possible completion to stdout for the completion script to consume.
+ fmt.Fprintln(finalCmd.OutOrStdout(), comp)
+ }
+
+ if directive > ShellCompDirectiveError+ShellCompDirectiveNoSpace+ShellCompDirectiveNoFileComp {
+ directive = ShellCompDirectiveDefault
+ }
+
+ // As the last printout, print the completion directive for the completion script to parse.
+ // The directive integer must be that last character following a single colon (:).
+ // The completion script expects :
+ fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive)
+
+ // Print some helpful info to stderr for the user to understand.
+ // Output from stderr must be ignored by the completion script.
+ fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string())
+ },
+ }
+ c.AddCommand(completeCmd)
+ subCmd, _, err := c.Find(args)
+ if err != nil || subCmd.Name() != ShellCompRequestCmd {
+ // Only create this special command if it is actually being called.
+ // This reduces possible side-effects of creating such a command;
+ // for example, having this command would cause problems to a
+ // cobra program that only consists of the root command, since this
+ // command would cause the root command to suddenly have a subcommand.
+ c.RemoveCommand(completeCmd)
+ }
+}
+
+func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) {
+ var completions []string
+
+ // The last argument, which is not completely typed by the user,
+ // should not be part of the list of arguments
+ toComplete := args[len(args)-1]
+ trimmedArgs := args[:len(args)-1]
+
+ // Find the real command for which completion must be performed
+ finalCmd, finalArgs, err := c.Root().Find(trimmedArgs)
+ if err != nil {
+ // Unable to find the real command. E.g., someInvalidCmd
+ return c, completions, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs)
+ }
+
+ // When doing completion of a flag name, as soon as an argument starts with
+ // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires
+ // the flag to be complete
+ if len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") {
+ // We are completing a flag name
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
+ })
+ finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
+ })
+
+ directive := ShellCompDirectiveDefault
+ if len(completions) > 0 {
+ if strings.HasSuffix(completions[0], "=") {
+ directive = ShellCompDirectiveNoSpace
+ }
+ }
+ return finalCmd, completions, directive, nil
+ }
+
+ var flag *pflag.Flag
+ if !finalCmd.DisableFlagParsing {
+ // We only do flag completion if we are allowed to parse flags
+ // This is important for commands which have requested to do their own flag completion.
+ flag, finalArgs, toComplete, err = checkIfFlagCompletion(finalCmd, finalArgs, toComplete)
+ if err != nil {
+ // Error while attempting to parse flags
+ return finalCmd, completions, ShellCompDirectiveDefault, err
+ }
+ }
+
+ if flag == nil {
+ // Complete subcommand names
+ for _, subCmd := range finalCmd.Commands() {
+ if subCmd.IsAvailableCommand() && strings.HasPrefix(subCmd.Name(), toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
+ }
+ }
+
+ if len(finalCmd.ValidArgs) > 0 {
+ // Always complete ValidArgs, even if we are completing a subcommand name.
+ // This is for commands that have both subcommands and ValidArgs.
+ for _, validArg := range finalCmd.ValidArgs {
+ if strings.HasPrefix(validArg, toComplete) {
+ completions = append(completions, validArg)
+ }
+ }
+
+ // If there are ValidArgs specified (even if they don't match), we stop completion.
+ // Only one of ValidArgs or ValidArgsFunction can be used for a single command.
+ return finalCmd, completions, ShellCompDirectiveNoFileComp, nil
+ }
+
+ // Always let the logic continue so as to add any ValidArgsFunction completions,
+ // even if we already found sub-commands.
+ // This is for commands that have subcommands but also specify a ValidArgsFunction.
+ }
+
+ // Parse the flags and extract the arguments to prepare for calling the completion function
+ if err = finalCmd.ParseFlags(finalArgs); err != nil {
+ return finalCmd, completions, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error())
+ }
+
+ // We only remove the flags from the arguments if DisableFlagParsing is not set.
+ // This is important for commands which have requested to do their own flag completion.
+ if !finalCmd.DisableFlagParsing {
+ finalArgs = finalCmd.Flags().Args()
+ }
+
+ // Find the completion function for the flag or command
+ var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
+ if flag != nil {
+ completionFn = flagCompletionFunctions[flag]
+ } else {
+ completionFn = finalCmd.ValidArgsFunction
+ }
+ if completionFn == nil {
+ // Go custom completion not supported/needed for this flag or command
+ return finalCmd, completions, ShellCompDirectiveDefault, nil
+ }
+
+ // Call the registered completion function to get the completions
+ comps, directive := completionFn(finalCmd, finalArgs, toComplete)
+ completions = append(completions, comps...)
+ return finalCmd, completions, directive, nil
+}
+
+func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string {
+ if nonCompletableFlag(flag) {
+ return []string{}
+ }
+
+ var completions []string
+ flagName := "--" + flag.Name
+ if strings.HasPrefix(flagName, toComplete) {
+ // Flag without the =
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+
+ if len(flag.NoOptDefVal) == 0 {
+ // Flag requires a value, so it can be suffixed with =
+ flagName += "="
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+ }
+ }
+
+ flagName = "-" + flag.Shorthand
+ if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+ }
+
+ return completions
+}
+
+func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) {
+ var flagName string
+ trimmedArgs := args
+ flagWithEqual := false
+ if isFlagArg(lastArg) {
+ if index := strings.Index(lastArg, "="); index >= 0 {
+ flagName = strings.TrimLeft(lastArg[:index], "-")
+ lastArg = lastArg[index+1:]
+ flagWithEqual = true
+ } else {
+ return nil, nil, "", errors.New("Unexpected completion request for flag")
+ }
+ }
+
+ if len(flagName) == 0 {
+ if len(args) > 0 {
+ prevArg := args[len(args)-1]
+ if isFlagArg(prevArg) {
+ // Only consider the case where the flag does not contain an =.
+ // If the flag contains an = it means it has already been fully processed,
+ // so we don't need to deal with it here.
+ if index := strings.Index(prevArg, "="); index < 0 {
+ flagName = strings.TrimLeft(prevArg, "-")
+
+ // Remove the uncompleted flag or else there could be an error created
+ // for an invalid value for that flag
+ trimmedArgs = args[:len(args)-1]
+ }
+ }
+ }
+ }
+
+ if len(flagName) == 0 {
+ // Not doing flag completion
+ return nil, trimmedArgs, lastArg, nil
+ }
+
+ flag := findFlag(finalCmd, flagName)
+ if flag == nil {
+ // Flag not supported by this command, nothing to complete
+ err := fmt.Errorf("Subcommand '%s' does not support flag '%s'", finalCmd.Name(), flagName)
+ return nil, nil, "", err
+ }
+
+ if !flagWithEqual {
+ if len(flag.NoOptDefVal) != 0 {
+ // We had assumed dealing with a two-word flag but the flag is a boolean flag.
+ // In that case, there is no value following it, so we are not really doing flag completion.
+ // Reset everything to do noun completion.
+ trimmedArgs = args
+ flag = nil
+ }
+ }
+
+ return flag, trimmedArgs, lastArg, nil
+}
+
+func findFlag(cmd *Command, name string) *pflag.Flag {
+ flagSet := cmd.Flags()
+ if len(name) == 1 {
+ // First convert the short flag into a long flag
+ // as the cmd.Flag() search only accepts long flags
+ if short := flagSet.ShorthandLookup(name); short != nil {
+ name = short.Name
+ } else {
+ set := cmd.InheritedFlags()
+ if short = set.ShorthandLookup(name); short != nil {
+ name = short.Name
+ } else {
+ return nil
+ }
+ }
+ }
+ return cmd.Flag(name)
+}
+
+// CompDebug prints the specified string to the same file as where the
+// completion script prints its logs.
+// Note that completion printouts should never be on stdout as they would
+// be wrongly interpreted as actual completion choices by the completion script.
+func CompDebug(msg string, printToStdErr bool) {
+ msg = fmt.Sprintf("[Debug] %s", msg)
+
+ // Such logs are only printed when the user has set the environment
+ // variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
+ if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" {
+ f, err := os.OpenFile(path,
+ os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err == nil {
+ defer f.Close()
+ f.WriteString(msg)
+ }
+ }
+
+ if printToStdErr {
+ // Must print to stderr for this not to be read by the completion script.
+ fmt.Fprintf(os.Stderr, msg)
+ }
+}
+
+// CompDebugln prints the specified string with a newline at the end
+// to the same file as where the completion script prints its logs.
+// Such logs are only printed when the user has set the environment
+// variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
+func CompDebugln(msg string, printToStdErr bool) {
+ CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr)
+}
+
+// CompError prints the specified completion message to stderr.
+func CompError(msg string) {
+ msg = fmt.Sprintf("[Error] %s", msg)
+ CompDebug(msg, true)
+}
+
+// CompErrorln prints the specified completion message to stderr with a newline at the end.
+func CompErrorln(msg string) {
+ CompError(fmt.Sprintf("%s\n", msg))
+}
diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go
new file mode 100644
index 0000000..c83609c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/fish_completions.go
@@ -0,0 +1,172 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+func genFishComp(buf *bytes.Buffer, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ buf.WriteString(fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name))
+ buf.WriteString(fmt.Sprintf(`
+function __%[1]s_debug
+ set file "$BASH_COMP_DEBUG_FILE"
+ if test -n "$file"
+ echo "$argv" >> $file
+ end
+end
+
+function __%[1]s_perform_completion
+ __%[1]s_debug "Starting __%[1]s_perform_completion with: $argv"
+
+ set args (string split -- " " "$argv")
+ set lastArg "$args[-1]"
+
+ __%[1]s_debug "args: $args"
+ __%[1]s_debug "last arg: $lastArg"
+
+ set emptyArg ""
+ if test -z "$lastArg"
+ __%[1]s_debug "Setting emptyArg"
+ set emptyArg \"\"
+ end
+ __%[1]s_debug "emptyArg: $emptyArg"
+
+ set requestComp "$args[1] %[2]s $args[2..-1] $emptyArg"
+ __%[1]s_debug "Calling $requestComp"
+
+ set results (eval $requestComp 2> /dev/null)
+ set comps $results[1..-2]
+ set directiveLine $results[-1]
+
+ # For Fish, when completing a flag with an = (e.g., -n=)
+ # completions must be prefixed with the flag
+ set flagPrefix (string match -r -- '-.*=' "$lastArg")
+
+ __%[1]s_debug "Comps: $comps"
+ __%[1]s_debug "DirectiveLine: $directiveLine"
+ __%[1]s_debug "flagPrefix: $flagPrefix"
+
+ for comp in $comps
+ printf "%%s%%s\n" "$flagPrefix" "$comp"
+ end
+
+ printf "%%s\n" "$directiveLine"
+end
+
+# This function does three things:
+# 1- Obtain the completions and store them in the global __%[1]s_comp_results
+# 2- Set the __%[1]s_comp_do_file_comp flag if file completion should be performed
+# and unset it otherwise
+# 3- Return true if the completion results are not empty
+function __%[1]s_prepare_completions
+ # Start fresh
+ set --erase __%[1]s_comp_do_file_comp
+ set --erase __%[1]s_comp_results
+
+ # Check if the command-line is already provided. This is useful for testing.
+ if not set --query __%[1]s_comp_commandLine
+ set __%[1]s_comp_commandLine (commandline)
+ end
+ __%[1]s_debug "commandLine is: $__%[1]s_comp_commandLine"
+
+ set results (__%[1]s_perform_completion "$__%[1]s_comp_commandLine")
+ set --erase __%[1]s_comp_commandLine
+ __%[1]s_debug "Completion results: $results"
+
+ if test -z "$results"
+ __%[1]s_debug "No completion, probably due to a failure"
+ # Might as well do file completion, in case it helps
+ set --global __%[1]s_comp_do_file_comp 1
+ return 0
+ end
+
+ set directive (string sub --start 2 $results[-1])
+ set --global __%[1]s_comp_results $results[1..-2]
+
+ __%[1]s_debug "Completions are: $__%[1]s_comp_results"
+ __%[1]s_debug "Directive is: $directive"
+
+ if test -z "$directive"
+ set directive 0
+ end
+
+ set compErr (math (math --scale 0 $directive / %[3]d) %% 2)
+ if test $compErr -eq 1
+ __%[1]s_debug "Received error directive: aborting."
+ # Might as well do file completion, in case it helps
+ set --global __%[1]s_comp_do_file_comp 1
+ return 0
+ end
+
+ set nospace (math (math --scale 0 $directive / %[4]d) %% 2)
+ set nofiles (math (math --scale 0 $directive / %[5]d) %% 2)
+
+ __%[1]s_debug "nospace: $nospace, nofiles: $nofiles"
+
+ # Important not to quote the variable for count to work
+ set numComps (count $__%[1]s_comp_results)
+ __%[1]s_debug "numComps: $numComps"
+
+ if test $numComps -eq 1; and test $nospace -ne 0
+ # To support the "nospace" directive we trick the shell
+ # by outputting an extra, longer completion.
+ __%[1]s_debug "Adding second completion to perform nospace directive"
+ set --append __%[1]s_comp_results $__%[1]s_comp_results[1].
+ end
+
+ if test $numComps -eq 0; and test $nofiles -eq 0
+ __%[1]s_debug "Requesting file completion"
+ set --global __%[1]s_comp_do_file_comp 1
+ end
+
+ # If we don't want file completion, we must return true even if there
+ # are no completions found. This is because fish will perform the last
+ # completion command, even if its condition is false, if no other
+ # completion command was triggered
+ return (not set --query __%[1]s_comp_do_file_comp)
+end
+
+# Remove any pre-existing completions for the program since we will be handling all of them
+# TODO this cleanup is not sufficient. Fish completions are only loaded once the user triggers
+# them, so the below deletion will not work as it is run too early. What else can we do?
+complete -c %[1]s -e
+
+# The order in which the below two lines are defined is very important so that __%[1]s_prepare_completions
+# is called first. It is __%[1]s_prepare_completions that sets up the __%[1]s_comp_do_file_comp variable.
+#
+# This completion will be run second as complete commands are added FILO.
+# It triggers file completion choices when __%[1]s_comp_do_file_comp is set.
+complete -c %[1]s -n 'set --query __%[1]s_comp_do_file_comp'
+
+# This completion will be run first as complete commands are added FILO.
+# The call to __%[1]s_prepare_completions will setup both __%[1]s_comp_results abd __%[1]s_comp_do_file_comp.
+# It provides the program's completion choices.
+complete -c %[1]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
+
+`, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp))
+}
+
+// GenFishCompletion generates fish completion file and writes to the passed writer.
+func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genFishComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+// GenFishCompletionFile generates fish completion file.
+func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenFishCompletion(outFile, includeDesc)
+}
diff --git a/vendor/github.com/spf13/cobra/fish_completions.md b/vendor/github.com/spf13/cobra/fish_completions.md
new file mode 100644
index 0000000..6bfe5f8
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/fish_completions.md
@@ -0,0 +1,7 @@
+## Generating Fish Completions for your own cobra.Command
+
+Cobra supports native Fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users.
+
+### Limitations
+
+* Custom completions implemented using the `ValidArgsFunction` and `RegisterFlagCompletionFunc()` are supported automatically but the ones implemented in Bash scripting are not.
diff --git a/vendor/github.com/spf13/cobra/go.mod b/vendor/github.com/spf13/cobra/go.mod
new file mode 100644
index 0000000..dea1030
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/go.mod
@@ -0,0 +1,12 @@
+module github.com/spf13/cobra
+
+go 1.12
+
+require (
+ github.com/cpuguy83/go-md2man/v2 v2.0.0
+ github.com/inconshreveable/mousetrap v1.0.0
+ github.com/mitchellh/go-homedir v1.1.0
+ github.com/spf13/pflag v1.0.3
+ github.com/spf13/viper v1.4.0
+ gopkg.in/yaml.v2 v2.2.2
+)
diff --git a/vendor/github.com/spf13/cobra/go.sum b/vendor/github.com/spf13/cobra/go.sum
new file mode 100644
index 0000000..3aaa2ac
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/go.sum
@@ -0,0 +1,149 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go
new file mode 100644
index 0000000..756c61b
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/powershell_completions.go
@@ -0,0 +1,100 @@
+// PowerShell completions are based on the amazing work from clap:
+// https://github.com/clap-rs/clap/blob/3294d18efe5f264d12c9035f404c7d189d4824e1/src/completions/powershell.rs
+//
+// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but
+// can be downloaded separately for windows 7 or 8.1).
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/spf13/pflag"
+)
+
+var powerShellCompletionTemplate = `using namespace System.Management.Automation
+using namespace System.Management.Automation.Language
+Register-ArgumentCompleter -Native -CommandName '%s' -ScriptBlock {
+ param($wordToComplete, $commandAst, $cursorPosition)
+ $commandElements = $commandAst.CommandElements
+ $command = @(
+ '%s'
+ for ($i = 1; $i -lt $commandElements.Count; $i++) {
+ $element = $commandElements[$i]
+ if ($element -isnot [StringConstantExpressionAst] -or
+ $element.StringConstantType -ne [StringConstantType]::BareWord -or
+ $element.Value.StartsWith('-')) {
+ break
+ }
+ $element.Value
+ }
+ ) -join ';'
+ $completions = @(switch ($command) {%s
+ })
+ $completions.Where{ $_.CompletionText -like "$wordToComplete*" } |
+ Sort-Object -Property ListItemText
+}`
+
+func generatePowerShellSubcommandCases(out io.Writer, cmd *Command, previousCommandName string) {
+ var cmdName string
+ if previousCommandName == "" {
+ cmdName = cmd.Name()
+ } else {
+ cmdName = fmt.Sprintf("%s;%s", previousCommandName, cmd.Name())
+ }
+
+ fmt.Fprintf(out, "\n '%s' {", cmdName)
+
+ cmd.Flags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ usage := escapeStringForPowerShell(flag.Usage)
+ if len(flag.Shorthand) > 0 {
+ fmt.Fprintf(out, "\n [CompletionResult]::new('-%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Shorthand, flag.Shorthand, usage)
+ }
+ fmt.Fprintf(out, "\n [CompletionResult]::new('--%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Name, flag.Name, usage)
+ })
+
+ for _, subCmd := range cmd.Commands() {
+ usage := escapeStringForPowerShell(subCmd.Short)
+ fmt.Fprintf(out, "\n [CompletionResult]::new('%s', '%s', [CompletionResultType]::ParameterValue, '%s')", subCmd.Name(), subCmd.Name(), usage)
+ }
+
+ fmt.Fprint(out, "\n break\n }")
+
+ for _, subCmd := range cmd.Commands() {
+ generatePowerShellSubcommandCases(out, subCmd, cmdName)
+ }
+}
+
+func escapeStringForPowerShell(s string) string {
+ return strings.Replace(s, "'", "''", -1)
+}
+
+// GenPowerShellCompletion generates PowerShell completion file and writes to the passed writer.
+func (c *Command) GenPowerShellCompletion(w io.Writer) error {
+ buf := new(bytes.Buffer)
+
+ var subCommandCases bytes.Buffer
+ generatePowerShellSubcommandCases(&subCommandCases, c, "")
+ fmt.Fprintf(buf, powerShellCompletionTemplate, c.Name(), c.Name(), subCommandCases.String())
+
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+// GenPowerShellCompletionFile generates PowerShell completion file.
+func (c *Command) GenPowerShellCompletionFile(filename string) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenPowerShellCompletion(outFile)
+}
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md
new file mode 100644
index 0000000..afed802
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/powershell_completions.md
@@ -0,0 +1,14 @@
+# Generating PowerShell Completions For Your Own cobra.Command
+
+Cobra can generate PowerShell completion scripts. Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles.
+
+# What's supported
+
+- Completion for subcommands using their `.Short` description
+- Completion for non-hidden flags using their `.Name` and `.Shorthand`
+
+# What's not yet supported
+
+- Command aliases
+- Required, filename or custom flags (they will work like normal flags)
+- Custom completion scripts
diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go
new file mode 100644
index 0000000..ba0af9c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/shell_completions.go
@@ -0,0 +1,85 @@
+package cobra
+
+import (
+ "github.com/spf13/pflag"
+)
+
+// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkFlagRequired(name string) error {
+ return MarkFlagRequired(c.Flags(), name)
+}
+
+// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkPersistentFlagRequired(name string) error {
+ return MarkFlagRequired(c.PersistentFlags(), name)
+}
+
+// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
+ return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
+}
+
+// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists.
+// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
+func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.Flags(), name, extensions...)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// Generated bash autocompletion will call the bash function f for the flag.
+func (c *Command) MarkFlagCustom(name string, f string) error {
+ return MarkFlagCustom(c.Flags(), name, f)
+}
+
+// MarkPersistentFlagFilename instructs the various shell completion
+// implementations to limit completions for this persistent flag to the
+// specified extensions (patterns).
+//
+// Shell Completion compatibility matrix: bash, zsh
+func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
+}
+
+// MarkFlagFilename instructs the various shell completion implementations to
+// limit completions for this flag to the specified extensions (patterns).
+//
+// Shell Completion compatibility matrix: bash, zsh
+func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
+ return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
+}
+
+// MarkFlagCustom instructs the various shell completion implementations to
+// limit completions for this flag to the specified extensions (patterns).
+//
+// Shell Completion compatibility matrix: bash, zsh
+func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error {
+ return flags.SetAnnotation(name, BashCompCustom, []string{f})
+}
+
+// MarkFlagDirname instructs the various shell completion implementations to
+// complete only directories with this named flag.
+//
+// Shell Completion compatibility matrix: zsh
+func (c *Command) MarkFlagDirname(name string) error {
+ return MarkFlagDirname(c.Flags(), name)
+}
+
+// MarkPersistentFlagDirname instructs the various shell completion
+// implementations to complete only directories with this persistent named flag.
+//
+// Shell Completion compatibility matrix: zsh
+func (c *Command) MarkPersistentFlagDirname(name string) error {
+ return MarkFlagDirname(c.PersistentFlags(), name)
+}
+
+// MarkFlagDirname instructs the various shell completion implementations to
+// complete only directories with this specified flag.
+//
+// Shell Completion compatibility matrix: zsh
+func MarkFlagDirname(flags *pflag.FlagSet, name string) error {
+ zshPattern := "-(/)"
+ return flags.SetAnnotation(name, zshCompDirname, []string{zshPattern})
+}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
new file mode 100644
index 0000000..1275548
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -0,0 +1,336 @@
+package cobra
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+ "text/template"
+
+ "github.com/spf13/pflag"
+)
+
+const (
+ zshCompArgumentAnnotation = "cobra_annotations_zsh_completion_argument_annotation"
+ zshCompArgumentFilenameComp = "cobra_annotations_zsh_completion_argument_file_completion"
+ zshCompArgumentWordComp = "cobra_annotations_zsh_completion_argument_word_completion"
+ zshCompDirname = "cobra_annotations_zsh_dirname"
+)
+
+var (
+ zshCompFuncMap = template.FuncMap{
+ "genZshFuncName": zshCompGenFuncName,
+ "extractFlags": zshCompExtractFlag,
+ "genFlagEntryForZshArguments": zshCompGenFlagEntryForArguments,
+ "extractArgsCompletions": zshCompExtractArgumentCompletionHintsForRendering,
+ }
+ zshCompletionText = `
+{{/* should accept Command (that contains subcommands) as parameter */}}
+{{define "argumentsC" -}}
+{{ $cmdPath := genZshFuncName .}}
+function {{$cmdPath}} {
+ local -a commands
+
+ _arguments -C \{{- range extractFlags .}}
+ {{genFlagEntryForZshArguments .}} \{{- end}}
+ "1: :->cmnds" \
+ "*::arg:->args"
+
+ case $state in
+ cmnds)
+ commands=({{range .Commands}}{{if not .Hidden}}
+ "{{.Name}}:{{.Short}}"{{end}}{{end}}
+ )
+ _describe "command" commands
+ ;;
+ esac
+
+ case "$words[1]" in {{- range .Commands}}{{if not .Hidden}}
+ {{.Name}})
+ {{$cmdPath}}_{{.Name}}
+ ;;{{end}}{{end}}
+ esac
+}
+{{range .Commands}}{{if not .Hidden}}
+{{template "selectCmdTemplate" .}}
+{{- end}}{{end}}
+{{- end}}
+
+{{/* should accept Command without subcommands as parameter */}}
+{{define "arguments" -}}
+function {{genZshFuncName .}} {
+{{" _arguments"}}{{range extractFlags .}} \
+ {{genFlagEntryForZshArguments . -}}
+{{end}}{{range extractArgsCompletions .}} \
+ {{.}}{{end}}
+}
+{{end}}
+
+{{/* dispatcher for commands with or without subcommands */}}
+{{define "selectCmdTemplate" -}}
+{{if .Hidden}}{{/* ignore hidden*/}}{{else -}}
+{{if .Commands}}{{template "argumentsC" .}}{{else}}{{template "arguments" .}}{{end}}
+{{- end}}
+{{- end}}
+
+{{/* template entry point */}}
+{{define "Main" -}}
+#compdef _{{.Name}} {{.Name}}
+
+{{template "selectCmdTemplate" .}}
+{{end}}
+`
+)
+
+// zshCompArgsAnnotation is used to encode/decode zsh completion for
+// arguments to/from Command.Annotations.
+type zshCompArgsAnnotation map[int]zshCompArgHint
+
+type zshCompArgHint struct {
+ // Indicates the type of the completion to use. One of:
+ // zshCompArgumentFilenameComp or zshCompArgumentWordComp
+ Tipe string `json:"type"`
+
+ // A value for the type above (globs for file completion or words)
+ Options []string `json:"options"`
+}
+
+// GenZshCompletionFile generates zsh completion file.
+func (c *Command) GenZshCompletionFile(filename string) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenZshCompletion(outFile)
+}
+
+// GenZshCompletion generates a zsh completion file and writes to the passed
+// writer. The completion always run on the root command regardless of the
+// command it was called from.
+func (c *Command) GenZshCompletion(w io.Writer) error {
+ tmpl, err := template.New("Main").Funcs(zshCompFuncMap).Parse(zshCompletionText)
+ if err != nil {
+ return fmt.Errorf("error creating zsh completion template: %v", err)
+ }
+ return tmpl.Execute(w, c.Root())
+}
+
+// MarkZshCompPositionalArgumentFile marks the specified argument (first
+// argument is 1) as completed by file selection. patterns (e.g. "*.txt") are
+// optional - if not provided the completion will search for all files.
+func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error {
+ if argPosition < 1 {
+ return fmt.Errorf("Invalid argument position (%d)", argPosition)
+ }
+ annotation, err := c.zshCompGetArgsAnnotations()
+ if err != nil {
+ return err
+ }
+ if c.zshcompArgsAnnotationnIsDuplicatePosition(annotation, argPosition) {
+ return fmt.Errorf("Duplicate annotation for positional argument at index %d", argPosition)
+ }
+ annotation[argPosition] = zshCompArgHint{
+ Tipe: zshCompArgumentFilenameComp,
+ Options: patterns,
+ }
+ return c.zshCompSetArgsAnnotations(annotation)
+}
+
+// MarkZshCompPositionalArgumentWords marks the specified positional argument
+// (first argument is 1) as completed by the provided words. At east one word
+// must be provided, spaces within words will be offered completion with
+// "word\ word".
+func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error {
+ if argPosition < 1 {
+ return fmt.Errorf("Invalid argument position (%d)", argPosition)
+ }
+ if len(words) == 0 {
+ return fmt.Errorf("Trying to set empty word list for positional argument %d", argPosition)
+ }
+ annotation, err := c.zshCompGetArgsAnnotations()
+ if err != nil {
+ return err
+ }
+ if c.zshcompArgsAnnotationnIsDuplicatePosition(annotation, argPosition) {
+ return fmt.Errorf("Duplicate annotation for positional argument at index %d", argPosition)
+ }
+ annotation[argPosition] = zshCompArgHint{
+ Tipe: zshCompArgumentWordComp,
+ Options: words,
+ }
+ return c.zshCompSetArgsAnnotations(annotation)
+}
+
+func zshCompExtractArgumentCompletionHintsForRendering(c *Command) ([]string, error) {
+ var result []string
+ annotation, err := c.zshCompGetArgsAnnotations()
+ if err != nil {
+ return nil, err
+ }
+ for k, v := range annotation {
+ s, err := zshCompRenderZshCompArgHint(k, v)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, s)
+ }
+ if len(c.ValidArgs) > 0 {
+ if _, positionOneExists := annotation[1]; !positionOneExists {
+ s, err := zshCompRenderZshCompArgHint(1, zshCompArgHint{
+ Tipe: zshCompArgumentWordComp,
+ Options: c.ValidArgs,
+ })
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, s)
+ }
+ }
+ sort.Strings(result)
+ return result, nil
+}
+
+func zshCompRenderZshCompArgHint(i int, z zshCompArgHint) (string, error) {
+ switch t := z.Tipe; t {
+ case zshCompArgumentFilenameComp:
+ var globs []string
+ for _, g := range z.Options {
+ globs = append(globs, fmt.Sprintf(`-g "%s"`, g))
+ }
+ return fmt.Sprintf(`'%d: :_files %s'`, i, strings.Join(globs, " ")), nil
+ case zshCompArgumentWordComp:
+ var words []string
+ for _, w := range z.Options {
+ words = append(words, fmt.Sprintf("%q", w))
+ }
+ return fmt.Sprintf(`'%d: :(%s)'`, i, strings.Join(words, " ")), nil
+ default:
+ return "", fmt.Errorf("Invalid zsh argument completion annotation: %s", t)
+ }
+}
+
+func (c *Command) zshcompArgsAnnotationnIsDuplicatePosition(annotation zshCompArgsAnnotation, position int) bool {
+ _, dup := annotation[position]
+ return dup
+}
+
+func (c *Command) zshCompGetArgsAnnotations() (zshCompArgsAnnotation, error) {
+ annotation := make(zshCompArgsAnnotation)
+ annotationString, ok := c.Annotations[zshCompArgumentAnnotation]
+ if !ok {
+ return annotation, nil
+ }
+ err := json.Unmarshal([]byte(annotationString), &annotation)
+ if err != nil {
+ return annotation, fmt.Errorf("Error unmarshaling zsh argument annotation: %v", err)
+ }
+ return annotation, nil
+}
+
+func (c *Command) zshCompSetArgsAnnotations(annotation zshCompArgsAnnotation) error {
+ jsn, err := json.Marshal(annotation)
+ if err != nil {
+ return fmt.Errorf("Error marshaling zsh argument annotation: %v", err)
+ }
+ if c.Annotations == nil {
+ c.Annotations = make(map[string]string)
+ }
+ c.Annotations[zshCompArgumentAnnotation] = string(jsn)
+ return nil
+}
+
+func zshCompGenFuncName(c *Command) string {
+ if c.HasParent() {
+ return zshCompGenFuncName(c.Parent()) + "_" + c.Name()
+ }
+ return "_" + c.Name()
+}
+
+func zshCompExtractFlag(c *Command) []*pflag.Flag {
+ var flags []*pflag.Flag
+ c.LocalFlags().VisitAll(func(f *pflag.Flag) {
+ if !f.Hidden {
+ flags = append(flags, f)
+ }
+ })
+ c.InheritedFlags().VisitAll(func(f *pflag.Flag) {
+ if !f.Hidden {
+ flags = append(flags, f)
+ }
+ })
+ return flags
+}
+
+// zshCompGenFlagEntryForArguments returns an entry that matches _arguments
+// zsh-completion parameters. It's too complicated to generate in a template.
+func zshCompGenFlagEntryForArguments(f *pflag.Flag) string {
+ if f.Name == "" || f.Shorthand == "" {
+ return zshCompGenFlagEntryForSingleOptionFlag(f)
+ }
+ return zshCompGenFlagEntryForMultiOptionFlag(f)
+}
+
+func zshCompGenFlagEntryForSingleOptionFlag(f *pflag.Flag) string {
+ var option, multiMark, extras string
+
+ if zshCompFlagCouldBeSpecifiedMoreThenOnce(f) {
+ multiMark = "*"
+ }
+
+ option = "--" + f.Name
+ if option == "--" {
+ option = "-" + f.Shorthand
+ }
+ extras = zshCompGenFlagEntryExtras(f)
+
+ return fmt.Sprintf(`'%s%s[%s]%s'`, multiMark, option, zshCompQuoteFlagDescription(f.Usage), extras)
+}
+
+func zshCompGenFlagEntryForMultiOptionFlag(f *pflag.Flag) string {
+ var options, parenMultiMark, curlyMultiMark, extras string
+
+ if zshCompFlagCouldBeSpecifiedMoreThenOnce(f) {
+ parenMultiMark = "*"
+ curlyMultiMark = "\\*"
+ }
+
+ options = fmt.Sprintf(`'(%s-%s %s--%s)'{%s-%s,%s--%s}`,
+ parenMultiMark, f.Shorthand, parenMultiMark, f.Name, curlyMultiMark, f.Shorthand, curlyMultiMark, f.Name)
+ extras = zshCompGenFlagEntryExtras(f)
+
+ return fmt.Sprintf(`%s'[%s]%s'`, options, zshCompQuoteFlagDescription(f.Usage), extras)
+}
+
+func zshCompGenFlagEntryExtras(f *pflag.Flag) string {
+ if f.NoOptDefVal != "" {
+ return ""
+ }
+
+ extras := ":" // allow options for flag (even without assistance)
+ for key, values := range f.Annotations {
+ switch key {
+ case zshCompDirname:
+ extras = fmt.Sprintf(":filename:_files -g %q", values[0])
+ case BashCompFilenameExt:
+ extras = ":filename:_files"
+ for _, pattern := range values {
+ extras = extras + fmt.Sprintf(` -g "%s"`, pattern)
+ }
+ }
+ }
+
+ return extras
+}
+
+func zshCompFlagCouldBeSpecifiedMoreThenOnce(f *pflag.Flag) bool {
+ return strings.Contains(f.Value.Type(), "Slice") ||
+ strings.Contains(f.Value.Type(), "Array")
+}
+
+func zshCompQuoteFlagDescription(s string) string {
+ return strings.Replace(s, "'", `'\''`, -1)
+}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.md b/vendor/github.com/spf13/cobra/zsh_completions.md
new file mode 100644
index 0000000..df9c2ea
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions.md
@@ -0,0 +1,39 @@
+## Generating Zsh Completion for your cobra.Command
+
+Cobra supports native Zsh completion generated from the root `cobra.Command`.
+The generated completion script should be put somewhere in your `$fpath` named
+`_`.
+
+### What's Supported
+
+* Completion for all non-hidden subcommands using their `.Short` description.
+* Completion for all non-hidden flags using the following rules:
+ * Filename completion works by marking the flag with `cmd.MarkFlagFilename...`
+ family of commands.
+ * The requirement for argument to the flag is decided by the `.NoOptDefVal`
+ flag value - if it's empty then completion will expect an argument.
+ * Flags of one of the various `*Array` and `*Slice` types supports multiple
+ specifications (with or without argument depending on the specific type).
+* Completion of positional arguments using the following rules:
+ * Argument position for all options below starts at `1`. If argument position
+ `0` is requested it will raise an error.
+ * Use `command.MarkZshCompPositionalArgumentFile` to complete filenames. Glob
+ patterns (e.g. `"*.log"`) are optional - if not specified it will offer to
+ complete all file types.
+ * Use `command.MarkZshCompPositionalArgumentWords` to offer specific words for
+ completion. At least one word is required.
+ * It's possible to specify completion for some arguments and leave some
+ unspecified (e.g. offer words for second argument but nothing for first
+ argument). This will cause no completion for first argument but words
+ completion for second argument.
+ * If no argument completion was specified for 1st argument (but optionally was
+ specified for 2nd) and the command has `ValidArgs` it will be used as
+ completion options for 1st argument.
+ * Argument completions only offered for commands with no subcommands.
+
+### What's not yet Supported
+
+* Custom completion scripts are not supported yet (We should probably create zsh
+ specific one, doesn't make sense to re-use the bash one as the functions will
+ be different).
+* Whatever other feature you're looking for and doesn't exist :)
diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore
new file mode 100644
index 0000000..c3da290
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.gitignore
@@ -0,0 +1,2 @@
+.idea/*
+
diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml
new file mode 100644
index 0000000..00d04cb
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.travis.yml
@@ -0,0 +1,22 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+install:
+ - go get golang.org/x/lint/golint
+ - export PATH=$GOPATH/bin:$PATH
+ - go install ./...
+
+script:
+ - verify/all.sh -v
+ - go test ./...
diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE
new file mode 100644
index 0000000..63ed1cf
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Alex Ogier. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md
new file mode 100644
index 0000000..7eacc5b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/README.md
@@ -0,0 +1,296 @@
+[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag)
+[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag)
+
+## Description
+
+pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the [GNU extensions to the POSIX recommendations
+for command-line options][1]. For a more precise description, see the
+"Command-line flag syntax" section below.
+
+[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+pflag is available under the same style of BSD license as the Go language,
+which can be found in the LICENSE file.
+
+## Installation
+
+pflag is available using the standard `go get` command.
+
+Install by running:
+
+ go get github.com/spf13/pflag
+
+Run tests by running:
+
+ go test github.com/spf13/pflag
+
+## Usage
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+``` go
+import flag "github.com/spf13/pflag"
+```
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+
+``` go
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+```
+
+If you like, you can bind the flag to a variable using the Var() functions.
+
+``` go
+var flagvar int
+func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+}
+```
+
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+
+``` go
+flag.Var(&flagVal, "name", "help message for flagname")
+```
+
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+
+``` go
+flag.Parse()
+```
+
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+
+``` go
+fmt.Println("ip has value ", *ip)
+fmt.Println("flagvar has value ", flagvar)
+```
+
+There are helper functions available to get the value stored in a Flag if you have a FlagSet but find
+it difficult to keep up with all of the pointers in your code.
+If you have a pflag.FlagSet with a flag called 'flagname' of type int you
+can use GetInt() to get the int value. But notice that 'flagname' must exist
+and it must be an int. GetString("flagname") will fail.
+
+``` go
+i, err := flagset.GetInt("flagname")
+```
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+var flagvar bool
+func init() {
+ flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
+}
+flag.VarP(&flagVal, "varname", "v", "help message")
+```
+
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+
+## Setting no option default values for flags
+
+After you create a flag it is possible to set the pflag.NoOptDefVal for
+the given flag. Doing this changes the meaning of the flag slightly. If
+a flag has a NoOptDefVal and the flag is set on the command line without
+an option the flag will be set to the NoOptDefVal. For example given:
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+flag.Lookup("flagname").NoOptDefVal = "4321"
+```
+
+Would result in something like
+
+| Parsed Arguments | Resulting Value |
+| ------------- | ------------- |
+| --flagname=1357 | ip=1357 |
+| --flagname | ip=4321 |
+| [nothing] | ip=1234 |
+
+## Command line flag syntax
+
+```
+--flag // boolean flags, or flags with no option default values
+--flag x // only on flags without a default value
+--flag=x
+```
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags
+or a flag with a default value
+
+```
+// boolean or flags where the 'no option default value' is set
+-f
+-f=true
+-abc
+but
+-b true is INVALID
+
+// non-boolean and flags without a 'no option default value'
+-n 1234
+-n=1234
+-n1234
+
+// mixed
+-abcs "hello"
+-absd="hello"
+-abcs1234
+```
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+## Mutating or "Normalizing" Flag names
+
+It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
+
+**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
+
+``` go
+func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ from := []string{"-", "_"}
+ to := "."
+ for _, sep := range from {
+ name = strings.Replace(name, sep, to, -1)
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
+```
+
+**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
+
+``` go
+func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ switch name {
+ case "old-flag-name":
+ name = "new-flag-name"
+ break
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
+```
+
+## Deprecating a flag or its shorthand
+It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
+
+**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
+```go
+// deprecate a flag by specifying its name and a usage message
+flags.MarkDeprecated("badflag", "please use --good-flag instead")
+```
+This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
+
+**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
+```go
+// deprecate a flag shorthand by specifying its flag name and a usage message
+flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
+```
+This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
+
+Note that usage message is essential here, and it should not be empty.
+
+## Hidden flags
+It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
+
+**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
+```go
+// hide a flag by specifying its name
+flags.MarkHidden("secretFlag")
+```
+
+## Disable sorting of flags
+`pflag` allows you to disable sorting of flags for help and usage message.
+
+**Example**:
+```go
+flags.BoolP("verbose", "v", false, "verbose output")
+flags.String("coolflag", "yeaah", "it's really cool flag")
+flags.Int("usefulflag", 777, "sometimes it's very useful")
+flags.SortFlags = false
+flags.PrintDefaults()
+```
+**Output**:
+```
+ -v, --verbose verbose output
+ --coolflag string it's really cool flag (default "yeaah")
+ --usefulflag int sometimes it's very useful (default 777)
+```
+
+
+## Supporting Go flags when using pflag
+In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary
+to support flags defined by third-party dependencies (e.g. `golang/glog`).
+
+**Example**: You want to add the Go flags to the `CommandLine` flagset
+```go
+import (
+ goflag "flag"
+ flag "github.com/spf13/pflag"
+)
+
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+
+func main() {
+ flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
+ flag.Parse()
+}
+```
+
+## More info
+
+You can see the full reference documentation of the pflag package
+[at godoc.org][3], or through go's standard documentation system by
+running `godoc -http=:6060` and browsing to
+[http://localhost:6060/pkg/github.com/spf13/pflag][2] after
+installation.
+
+[2]: http://localhost:6060/pkg/github.com/spf13/pflag
+[3]: http://godoc.org/github.com/spf13/pflag
diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go
new file mode 100644
index 0000000..c4c5c0b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool.go
@@ -0,0 +1,94 @@
+package pflag
+
+import "strconv"
+
+// optional interface to indicate boolean flags that can be
+// supplied without "=value" text
+type boolFlag interface {
+ Value
+ IsBoolFlag() bool
+}
+
+// -- bool Value
+type boolValue bool
+
+func newBoolValue(val bool, p *bool) *boolValue {
+ *p = val
+ return (*boolValue)(p)
+}
+
+func (b *boolValue) Set(s string) error {
+ v, err := strconv.ParseBool(s)
+ *b = boolValue(v)
+ return err
+}
+
+func (b *boolValue) Type() string {
+ return "bool"
+}
+
+func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
+
+func (b *boolValue) IsBoolFlag() bool { return true }
+
+func boolConv(sval string) (interface{}, error) {
+ return strconv.ParseBool(sval)
+}
+
+// GetBool return the bool value of a flag with the given name
+func (f *FlagSet) GetBool(name string) (bool, error) {
+ val, err := f.getFlagType(name, "bool", boolConv)
+ if err != nil {
+ return false, err
+ }
+ return val.(bool), nil
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
+ f.BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func BoolVar(p *bool, name string, value bool, usage string) {
+ BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
+ return f.BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool {
+ p := new(bool)
+ f.BoolVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func Bool(name string, value bool, usage string) *bool {
+ return BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func BoolP(name, shorthand string, value bool, usage string) *bool {
+ b := CommandLine.BoolP(name, shorthand, value, usage)
+ return b
+}
diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go
new file mode 100644
index 0000000..3731370
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool_slice.go
@@ -0,0 +1,185 @@
+package pflag
+
+import (
+ "io"
+ "strconv"
+ "strings"
+)
+
+// -- boolSlice Value
+type boolSliceValue struct {
+ value *[]bool
+ changed bool
+}
+
+func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue {
+ bsv := new(boolSliceValue)
+ bsv.value = p
+ *bsv.value = val
+ return bsv
+}
+
+// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag.
+// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended.
+func (s *boolSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ boolStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse boolean values into slice
+ out := make([]bool, 0, len(boolStrSlice))
+ for _, boolStr := range boolStrSlice {
+ b, err := strconv.ParseBool(strings.TrimSpace(boolStr))
+ if err != nil {
+ return err
+ }
+ out = append(out, b)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *boolSliceValue) Type() string {
+ return "boolSlice"
+}
+
+// String defines a "native" format for this boolean slice flag value.
+func (s *boolSliceValue) String() string {
+
+ boolStrSlice := make([]string, len(*s.value))
+ for i, b := range *s.value {
+ boolStrSlice[i] = strconv.FormatBool(b)
+ }
+
+ out, _ := writeAsCSV(boolStrSlice)
+
+ return "[" + out + "]"
+}
+
+func (s *boolSliceValue) fromString(val string) (bool, error) {
+ return strconv.ParseBool(val)
+}
+
+func (s *boolSliceValue) toString(val bool) string {
+ return strconv.FormatBool(val)
+}
+
+func (s *boolSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *boolSliceValue) Replace(val []string) error {
+ out := make([]bool, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *boolSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func boolSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []bool{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]bool, len(ss))
+ for i, t := range ss {
+ var err error
+ out[i], err = strconv.ParseBool(t)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetBoolSlice returns the []bool value of a flag with the given name.
+func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) {
+ val, err := f.getFlagType(name, "boolSlice", boolSliceConv)
+ if err != nil {
+ return []bool{}, err
+ }
+ return val.([]bool), nil
+}
+
+// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+ f.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+ f.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSliceVar defines a []bool flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+ CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+ CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool {
+ p := []bool{}
+ f.BoolSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+ p := []bool{}
+ f.BoolSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func BoolSlice(name string, value []bool, usage string) *[]bool {
+ return CommandLine.BoolSliceP(name, "", value, usage)
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+ return CommandLine.BoolSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go
new file mode 100644
index 0000000..67d5304
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bytes.go
@@ -0,0 +1,209 @@
+package pflag
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "strings"
+)
+
+// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
+type bytesHexValue []byte
+
+// String implements pflag.Value.String.
+func (bytesHex bytesHexValue) String() string {
+ return fmt.Sprintf("%X", []byte(bytesHex))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesHex *bytesHexValue) Set(value string) error {
+ bin, err := hex.DecodeString(strings.TrimSpace(value))
+
+ if err != nil {
+ return err
+ }
+
+ *bytesHex = bin
+
+ return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesHexValue) Type() string {
+ return "bytesHex"
+}
+
+func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue {
+ *p = val
+ return (*bytesHexValue)(p)
+}
+
+func bytesHexConv(sval string) (interface{}, error) {
+
+ bin, err := hex.DecodeString(sval)
+
+ if err == nil {
+ return bin, nil
+ }
+
+ return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesHex return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesHex(name string) ([]byte, error) {
+ val, err := f.getFlagType(name, "bytesHex", bytesHexConv)
+
+ if err != nil {
+ return []byte{}, err
+ }
+
+ return val.([]byte), nil
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+ f.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ f.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+ CommandLine.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesHexVarP(p, name, "", value, usage)
+ return p
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesHexVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesHex(name string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesHexP(name, "", value, usage)
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesHexP(name, shorthand, value, usage)
+}
+
+// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
+type bytesBase64Value []byte
+
+// String implements pflag.Value.String.
+func (bytesBase64 bytesBase64Value) String() string {
+ return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesBase64 *bytesBase64Value) Set(value string) error {
+ bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
+
+ if err != nil {
+ return err
+ }
+
+ *bytesBase64 = bin
+
+ return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesBase64Value) Type() string {
+ return "bytesBase64"
+}
+
+func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
+ *p = val
+ return (*bytesBase64Value)(p)
+}
+
+func bytesBase64ValueConv(sval string) (interface{}, error) {
+
+ bin, err := base64.StdEncoding.DecodeString(sval)
+ if err == nil {
+ return bin, nil
+ }
+
+ return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesBase64 return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
+ val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
+
+ if err != nil {
+ return []byte{}, err
+ }
+
+ return val.([]byte), nil
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, "", value, usage)
+ return p
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesBase64(name string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, "", value, usage)
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go
new file mode 100644
index 0000000..a0b2679
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/count.go
@@ -0,0 +1,96 @@
+package pflag
+
+import "strconv"
+
+// -- count Value
+type countValue int
+
+func newCountValue(val int, p *int) *countValue {
+ *p = val
+ return (*countValue)(p)
+}
+
+func (i *countValue) Set(s string) error {
+ // "+1" means that no specific value was passed, so increment
+ if s == "+1" {
+ *i = countValue(*i + 1)
+ return nil
+ }
+ v, err := strconv.ParseInt(s, 0, 0)
+ *i = countValue(v)
+ return err
+}
+
+func (i *countValue) Type() string {
+ return "count"
+}
+
+func (i *countValue) String() string { return strconv.Itoa(int(*i)) }
+
+func countConv(sval string) (interface{}, error) {
+ i, err := strconv.Atoi(sval)
+ if err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+// GetCount return the int value of a flag with the given name
+func (f *FlagSet) GetCount(name string) (int, error) {
+ val, err := f.getFlagType(name, "count", countConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// CountVar defines a count flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+// A count flag will add 1 to its value every time it is found on the command line
+func (f *FlagSet) CountVar(p *int, name string, usage string) {
+ f.CountVarP(p, name, "", usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
+ flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
+ flag.NoOptDefVal = "+1"
+}
+
+// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
+func CountVar(p *int, name string, usage string) {
+ CommandLine.CountVar(p, name, usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func CountVarP(p *int, name, shorthand string, usage string) {
+ CommandLine.CountVarP(p, name, shorthand, usage)
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value every time it is found on the command line
+func (f *FlagSet) Count(name string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, "", usage)
+ return p
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, shorthand, usage)
+ return p
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func Count(name string, usage string) *int {
+ return CommandLine.CountP(name, "", usage)
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func CountP(name, shorthand string, usage string) *int {
+ return CommandLine.CountP(name, shorthand, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go
new file mode 100644
index 0000000..e9debef
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration.go
@@ -0,0 +1,86 @@
+package pflag
+
+import (
+ "time"
+)
+
+// -- time.Duration Value
+type durationValue time.Duration
+
+func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
+ *p = val
+ return (*durationValue)(p)
+}
+
+func (d *durationValue) Set(s string) error {
+ v, err := time.ParseDuration(s)
+ *d = durationValue(v)
+ return err
+}
+
+func (d *durationValue) Type() string {
+ return "duration"
+}
+
+func (d *durationValue) String() string { return (*time.Duration)(d).String() }
+
+func durationConv(sval string) (interface{}, error) {
+ return time.ParseDuration(sval)
+}
+
+// GetDuration return the duration value of a flag with the given name
+func (f *FlagSet) GetDuration(name string) (time.Duration, error) {
+ val, err := f.getFlagType(name, "duration", durationConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(time.Duration), nil
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, "", value, usage)
+ return p
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func Duration(name string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, "", value, usage)
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go
new file mode 100644
index 0000000..badadda
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strings"
+ "time"
+)
+
+// -- durationSlice Value
+type durationSliceValue struct {
+ value *[]time.Duration
+ changed bool
+}
+
+func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue {
+ dsv := new(durationSliceValue)
+ dsv.value = p
+ *dsv.value = val
+ return dsv
+}
+
+func (s *durationSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]time.Duration, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = time.ParseDuration(d)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *durationSliceValue) Type() string {
+ return "durationSlice"
+}
+
+func (s *durationSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%s", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *durationSliceValue) fromString(val string) (time.Duration, error) {
+ return time.ParseDuration(val)
+}
+
+func (s *durationSliceValue) toString(val time.Duration) string {
+ return fmt.Sprintf("%s", val)
+}
+
+func (s *durationSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *durationSliceValue) Replace(val []string) error {
+ out := make([]time.Duration, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *durationSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func durationSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []time.Duration{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]time.Duration, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = time.ParseDuration(d)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetDurationSlice returns the []time.Duration value of a flag with the given name
+func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) {
+ val, err := f.getFlagType(name, "durationSlice", durationSliceConv)
+ if err != nil {
+ return []time.Duration{}, err
+ }
+ return val.([]time.Duration), nil
+}
+
+// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string.
+// The argument p points to a []time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+ f.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+ f.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string.
+// The argument p points to a duration[] variable in which to store the value of the flag.
+func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+ CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+ CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+ p := []time.Duration{}
+ f.DurationSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+ p := []time.Duration{}
+ f.DurationSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+ return CommandLine.DurationSliceP(name, "", value, usage)
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+ return CommandLine.DurationSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
new file mode 100644
index 0000000..24a5036
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -0,0 +1,1239 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the GNU extensions to the POSIX recommendations
+for command-line options. See
+http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+Usage:
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+ import flag "github.com/spf13/pflag"
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+ var ip = flag.Int("flagname", 1234, "help message for flagname")
+If you like, you can bind the flag to a variable using the Var() functions.
+ var flagvar int
+ func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+ }
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+ flag.Var(&flagVal, "name", "help message for flagname")
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+ flag.Parse()
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+ fmt.Println("ip has value ", *ip)
+ fmt.Println("flagvar has value ", flagvar)
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+ var ip = flag.IntP("flagname", "f", 1234, "help message")
+ var flagvar bool
+ func init() {
+ flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
+ }
+ flag.VarP(&flagval, "varname", "v", "help message")
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+Command line flag syntax:
+ --flag // boolean flags only
+ --flag=x
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags.
+ // boolean flags
+ -f
+ -abc
+ // non-boolean flags
+ -n 1234
+ -Ifile
+ // mixed
+ -abcs "hello"
+ -abcn1234
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+*/
+package pflag
+
+import (
+ "bytes"
+ "errors"
+ goflag "flag"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+)
+
+// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
+var ErrHelp = errors.New("pflag: help requested")
+
+// ErrorHandling defines how to handle flag parsing errors.
+type ErrorHandling int
+
+const (
+ // ContinueOnError will return an err from Parse() if an error is found
+ ContinueOnError ErrorHandling = iota
+ // ExitOnError will call os.Exit(2) if an error is found when parsing
+ ExitOnError
+ // PanicOnError will panic() if an error is found when parsing flags
+ PanicOnError
+)
+
+// ParseErrorsWhitelist defines the parsing errors that can be ignored
+type ParseErrorsWhitelist struct {
+ // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags
+ UnknownFlags bool
+}
+
+// NormalizedName is a flag name that has been normalized according to rules
+// for the FlagSet (e.g. making '-' and '_' equivalent).
+type NormalizedName string
+
+// A FlagSet represents a set of defined flags.
+type FlagSet struct {
+ // Usage is the function called when an error occurs while parsing flags.
+ // The field is a function (not a method) that may be changed to point to
+ // a custom error handler.
+ Usage func()
+
+ // SortFlags is used to indicate, if user wants to have sorted flags in
+ // help/usage messages.
+ SortFlags bool
+
+ // ParseErrorsWhitelist is used to configure a whitelist of errors
+ ParseErrorsWhitelist ParseErrorsWhitelist
+
+ name string
+ parsed bool
+ actual map[NormalizedName]*Flag
+ orderedActual []*Flag
+ sortedActual []*Flag
+ formal map[NormalizedName]*Flag
+ orderedFormal []*Flag
+ sortedFormal []*Flag
+ shorthands map[byte]*Flag
+ args []string // arguments after flags
+ argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no --
+ errorHandling ErrorHandling
+ output io.Writer // nil means stderr; use out() accessor
+ interspersed bool // allow interspersed option/non-option args
+ normalizeNameFunc func(f *FlagSet, name string) NormalizedName
+
+ addedGoFlagSets []*goflag.FlagSet
+}
+
+// A Flag represents the state of a flag.
+type Flag struct {
+ Name string // name as it appears on command line
+ Shorthand string // one-letter abbreviated flag
+ Usage string // help message
+ Value Value // value as set
+ DefValue string // default value (as text); for usage message
+ Changed bool // If the user set the value (or if left to default)
+ NoOptDefVal string // default value (as text); if the flag is on the command line without any options
+ Deprecated string // If this flag is deprecated, this string is the new or now thing to use
+ Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text
+ ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use
+ Annotations map[string][]string // used by cobra.Command bash autocomple code
+}
+
+// Value is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+type Value interface {
+ String() string
+ Set(string) error
+ Type() string
+}
+
+// SliceValue is a secondary interface to all flags which hold a list
+// of values. This allows full control over the value of list flags,
+// and avoids complicated marshalling and unmarshalling to csv.
+type SliceValue interface {
+ // Append adds the specified value to the end of the flag value list.
+ Append(string) error
+ // Replace will fully overwrite any data currently in the flag value list.
+ Replace([]string) error
+ // GetSlice returns the flag value list as an array of strings.
+ GetSlice() []string
+}
+
+// sortFlags returns the flags as a slice in lexicographical sorted order.
+func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
+ list := make(sort.StringSlice, len(flags))
+ i := 0
+ for k := range flags {
+ list[i] = string(k)
+ i++
+ }
+ list.Sort()
+ result := make([]*Flag, len(list))
+ for i, name := range list {
+ result[i] = flags[NormalizedName(name)]
+ }
+ return result
+}
+
+// SetNormalizeFunc allows you to add a function which can translate flag names.
+// Flags added to the FlagSet will be translated and then when anything tries to
+// look up the flag that will also be translated. So it would be possible to create
+// a flag named "getURL" and have it translated to "geturl". A user could then pass
+// "--getUrl" which may also be translated to "geturl" and everything will work.
+func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
+ f.normalizeNameFunc = n
+ f.sortedFormal = f.sortedFormal[:0]
+ for fname, flag := range f.formal {
+ nname := f.normalizeFlagName(flag.Name)
+ if fname == nname {
+ continue
+ }
+ flag.Name = string(nname)
+ delete(f.formal, fname)
+ f.formal[nname] = flag
+ if _, set := f.actual[fname]; set {
+ delete(f.actual, fname)
+ f.actual[nname] = flag
+ }
+ }
+}
+
+// GetNormalizeFunc returns the previously set NormalizeFunc of a function which
+// does no translation, if not set previously.
+func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName {
+ if f.normalizeNameFunc != nil {
+ return f.normalizeNameFunc
+ }
+ return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) }
+}
+
+func (f *FlagSet) normalizeFlagName(name string) NormalizedName {
+ n := f.GetNormalizeFunc()
+ return n(f, name)
+}
+
+func (f *FlagSet) out() io.Writer {
+ if f.output == nil {
+ return os.Stderr
+ }
+ return f.output
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (f *FlagSet) SetOutput(output io.Writer) {
+ f.output = output
+}
+
+// VisitAll visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func (f *FlagSet) VisitAll(fn func(*Flag)) {
+ if len(f.formal) == 0 {
+ return
+ }
+
+ var flags []*Flag
+ if f.SortFlags {
+ if len(f.formal) != len(f.sortedFormal) {
+ f.sortedFormal = sortFlags(f.formal)
+ }
+ flags = f.sortedFormal
+ } else {
+ flags = f.orderedFormal
+ }
+
+ for _, flag := range flags {
+ fn(flag)
+ }
+}
+
+// HasFlags returns a bool to indicate if the FlagSet has any flags defined.
+func (f *FlagSet) HasFlags() bool {
+ return len(f.formal) > 0
+}
+
+// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags
+// that are not hidden.
+func (f *FlagSet) HasAvailableFlags() bool {
+ for _, flag := range f.formal {
+ if !flag.Hidden {
+ return true
+ }
+ }
+ return false
+}
+
+// VisitAll visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func VisitAll(fn func(*Flag)) {
+ CommandLine.VisitAll(fn)
+}
+
+// Visit visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func (f *FlagSet) Visit(fn func(*Flag)) {
+ if len(f.actual) == 0 {
+ return
+ }
+
+ var flags []*Flag
+ if f.SortFlags {
+ if len(f.actual) != len(f.sortedActual) {
+ f.sortedActual = sortFlags(f.actual)
+ }
+ flags = f.sortedActual
+ } else {
+ flags = f.orderedActual
+ }
+
+ for _, flag := range flags {
+ fn(flag)
+ }
+}
+
+// Visit visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func Visit(fn func(*Flag)) {
+ CommandLine.Visit(fn)
+}
+
+// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) Lookup(name string) *Flag {
+ return f.lookup(f.normalizeFlagName(name))
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+// It panics, if len(name) > 1.
+func (f *FlagSet) ShorthandLookup(name string) *Flag {
+ if name == "" {
+ return nil
+ }
+ if len(name) > 1 {
+ msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name)
+ fmt.Fprintf(f.out(), msg)
+ panic(msg)
+ }
+ c := name[0]
+ return f.shorthands[c]
+}
+
+// lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) lookup(name NormalizedName) *Flag {
+ return f.formal[name]
+}
+
+// func to return a given type for a given flag name
+func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) {
+ flag := f.Lookup(name)
+ if flag == nil {
+ err := fmt.Errorf("flag accessed but not defined: %s", name)
+ return nil, err
+ }
+
+ if flag.Value.Type() != ftype {
+ err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type())
+ return nil, err
+ }
+
+ sval := flag.Value.String()
+ result, err := convFunc(sval)
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// ArgsLenAtDash will return the length of f.Args at the moment when a -- was
+// found during arg parsing. This allows your program to know which args were
+// before the -- and which came after.
+func (f *FlagSet) ArgsLenAtDash() int {
+ return f.argsLenAtDash
+}
+
+// MarkDeprecated indicated that a flag is deprecated in your program. It will
+// continue to function but will not show up in help or usage messages. Using
+// this flag will also print the given usageMessage.
+func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if usageMessage == "" {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.Deprecated = usageMessage
+ flag.Hidden = true
+ return nil
+}
+
+// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your
+// program. It will continue to function but will not show up in help or usage
+// messages. Using this flag will also print the given usageMessage.
+func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if usageMessage == "" {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.ShorthandDeprecated = usageMessage
+ return nil
+}
+
+// MarkHidden sets a flag to 'hidden' in your program. It will continue to
+// function but will not show up in help or usage messages.
+func (f *FlagSet) MarkHidden(name string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ flag.Hidden = true
+ return nil
+}
+
+// Lookup returns the Flag structure of the named command-line flag,
+// returning nil if none exists.
+func Lookup(name string) *Flag {
+ return CommandLine.Lookup(name)
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+func ShorthandLookup(name string) *Flag {
+ return CommandLine.ShorthandLookup(name)
+}
+
+// Set sets the value of the named flag.
+func (f *FlagSet) Set(name, value string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+
+ err := flag.Value.Set(value)
+ if err != nil {
+ var flagName string
+ if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+ flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
+ } else {
+ flagName = fmt.Sprintf("--%s", flag.Name)
+ }
+ return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
+ }
+
+ if !flag.Changed {
+ if f.actual == nil {
+ f.actual = make(map[NormalizedName]*Flag)
+ }
+ f.actual[normalName] = flag
+ f.orderedActual = append(f.orderedActual, flag)
+
+ flag.Changed = true
+ }
+
+ if flag.Deprecated != "" {
+ fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+ }
+ return nil
+}
+
+// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet.
+// This is sometimes used by spf13/cobra programs which want to generate additional
+// bash completion information.
+func (f *FlagSet) SetAnnotation(name, key string, values []string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[key] = values
+ return nil
+}
+
+// Changed returns true if the flag was explicitly set during Parse() and false
+// otherwise
+func (f *FlagSet) Changed(name string) bool {
+ flag := f.Lookup(name)
+ // If a flag doesn't exist, it wasn't changed....
+ if flag == nil {
+ return false
+ }
+ return flag.Changed
+}
+
+// Set sets the value of the named command-line flag.
+func Set(name, value string) error {
+ return CommandLine.Set(name, value)
+}
+
+// PrintDefaults prints, to standard error unless configured
+// otherwise, the default values of all defined flags in the set.
+func (f *FlagSet) PrintDefaults() {
+ usages := f.FlagUsages()
+ fmt.Fprint(f.out(), usages)
+}
+
+// defaultIsZeroValue returns true if the default value for this flag represents
+// a zero value.
+func (f *Flag) defaultIsZeroValue() bool {
+ switch f.Value.(type) {
+ case boolFlag:
+ return f.DefValue == "false"
+ case *durationValue:
+ // Beginning in Go 1.7, duration zero values are "0s"
+ return f.DefValue == "0" || f.DefValue == "0s"
+ case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value:
+ return f.DefValue == "0"
+ case *stringValue:
+ return f.DefValue == ""
+ case *ipValue, *ipMaskValue, *ipNetValue:
+ return f.DefValue == ""
+ case *intSliceValue, *stringSliceValue, *stringArrayValue:
+ return f.DefValue == "[]"
+ default:
+ switch f.Value.String() {
+ case "false":
+ return true
+ case "":
+ return true
+ case "":
+ return true
+ case "0":
+ return true
+ }
+ return false
+ }
+}
+
+// UnquoteUsage extracts a back-quoted name from the usage
+// string for a flag and returns it and the un-quoted usage.
+// Given "a `name` to show" it returns ("name", "a name to show").
+// If there are no back quotes, the name is an educated guess of the
+// type of the flag's value, or the empty string if the flag is boolean.
+func UnquoteUsage(flag *Flag) (name string, usage string) {
+ // Look for a back-quoted name, but avoid the strings package.
+ usage = flag.Usage
+ for i := 0; i < len(usage); i++ {
+ if usage[i] == '`' {
+ for j := i + 1; j < len(usage); j++ {
+ if usage[j] == '`' {
+ name = usage[i+1 : j]
+ usage = usage[:i] + name + usage[j+1:]
+ return name, usage
+ }
+ }
+ break // Only one back quote; use type name.
+ }
+ }
+
+ name = flag.Value.Type()
+ switch name {
+ case "bool":
+ name = ""
+ case "float64":
+ name = "float"
+ case "int64":
+ name = "int"
+ case "uint64":
+ name = "uint"
+ case "stringSlice":
+ name = "strings"
+ case "intSlice":
+ name = "ints"
+ case "uintSlice":
+ name = "uints"
+ case "boolSlice":
+ name = "bools"
+ }
+
+ return
+}
+
+// Splits the string `s` on whitespace into an initial substring up to
+// `i` runes in length and the remainder. Will go `slop` over `i` if
+// that encompasses the entire string (which allows the caller to
+// avoid short orphan words on the final line).
+func wrapN(i, slop int, s string) (string, string) {
+ if i+slop > len(s) {
+ return s, ""
+ }
+
+ w := strings.LastIndexAny(s[:i], " \t\n")
+ if w <= 0 {
+ return s, ""
+ }
+ nlPos := strings.LastIndex(s[:i], "\n")
+ if nlPos > 0 && nlPos < w {
+ return s[:nlPos], s[nlPos+1:]
+ }
+ return s[:w], s[w+1:]
+}
+
+// Wraps the string `s` to a maximum width `w` with leading indent
+// `i`. The first line is not indented (this is assumed to be done by
+// caller). Pass `w` == 0 to do no wrapping
+func wrap(i, w int, s string) string {
+ if w == 0 {
+ return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1)
+ }
+
+ // space between indent i and end of line width w into which
+ // we should wrap the text.
+ wrap := w - i
+
+ var r, l string
+
+ // Not enough space for sensible wrapping. Wrap as a block on
+ // the next line instead.
+ if wrap < 24 {
+ i = 16
+ wrap = w - i
+ r += "\n" + strings.Repeat(" ", i)
+ }
+ // If still not enough space then don't even try to wrap.
+ if wrap < 24 {
+ return strings.Replace(s, "\n", r, -1)
+ }
+
+ // Try to avoid short orphan words on the final line, by
+ // allowing wrapN to go a bit over if that would fit in the
+ // remainder of the line.
+ slop := 5
+ wrap = wrap - slop
+
+ // Handle first line, which is indented by the caller (or the
+ // special case above)
+ l, s = wrapN(wrap, slop, s)
+ r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1)
+
+ // Now wrap the rest
+ for s != "" {
+ var t string
+
+ t, s = wrapN(wrap, slop, s)
+ r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1)
+ }
+
+ return r
+
+}
+
+// FlagUsagesWrapped returns a string containing the usage information
+// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
+// wrapping)
+func (f *FlagSet) FlagUsagesWrapped(cols int) string {
+ buf := new(bytes.Buffer)
+
+ lines := make([]string, 0, len(f.formal))
+
+ maxlen := 0
+ f.VisitAll(func(flag *Flag) {
+ if flag.Hidden {
+ return
+ }
+
+ line := ""
+ if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+ line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name)
+ } else {
+ line = fmt.Sprintf(" --%s", flag.Name)
+ }
+
+ varname, usage := UnquoteUsage(flag)
+ if varname != "" {
+ line += " " + varname
+ }
+ if flag.NoOptDefVal != "" {
+ switch flag.Value.Type() {
+ case "string":
+ line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
+ case "bool":
+ if flag.NoOptDefVal != "true" {
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ case "count":
+ if flag.NoOptDefVal != "+1" {
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ default:
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ }
+
+ // This special character will be replaced with spacing once the
+ // correct alignment is calculated
+ line += "\x00"
+ if len(line) > maxlen {
+ maxlen = len(line)
+ }
+
+ line += usage
+ if !flag.defaultIsZeroValue() {
+ if flag.Value.Type() == "string" {
+ line += fmt.Sprintf(" (default %q)", flag.DefValue)
+ } else {
+ line += fmt.Sprintf(" (default %s)", flag.DefValue)
+ }
+ }
+ if len(flag.Deprecated) != 0 {
+ line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated)
+ }
+
+ lines = append(lines, line)
+ })
+
+ for _, line := range lines {
+ sidx := strings.Index(line, "\x00")
+ spacing := strings.Repeat(" ", maxlen-sidx)
+ // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
+ fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
+ }
+
+ return buf.String()
+}
+
+// FlagUsages returns a string containing the usage information for all flags in
+// the FlagSet
+func (f *FlagSet) FlagUsages() string {
+ return f.FlagUsagesWrapped(0)
+}
+
+// PrintDefaults prints to standard error the default values of all defined command-line flags.
+func PrintDefaults() {
+ CommandLine.PrintDefaults()
+}
+
+// defaultUsage is the default function to print a usage message.
+func defaultUsage(f *FlagSet) {
+ fmt.Fprintf(f.out(), "Usage of %s:\n", f.name)
+ f.PrintDefaults()
+}
+
+// NOTE: Usage is not just defaultUsage(CommandLine)
+// because it serves (via godoc flag Usage) as the example
+// for how to write your own usage function.
+
+// Usage prints to standard error a usage message documenting all defined command-line flags.
+// The function is a variable that may be changed to point to a custom function.
+// By default it prints a simple header and calls PrintDefaults; for details about the
+// format of the output and how to control it, see the documentation for PrintDefaults.
+var Usage = func() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ PrintDefaults()
+}
+
+// NFlag returns the number of flags that have been set.
+func (f *FlagSet) NFlag() int { return len(f.actual) }
+
+// NFlag returns the number of command-line flags that have been set.
+func NFlag() int { return len(CommandLine.actual) }
+
+// Arg returns the i'th argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func (f *FlagSet) Arg(i int) string {
+ if i < 0 || i >= len(f.args) {
+ return ""
+ }
+ return f.args[i]
+}
+
+// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func Arg(i int) string {
+ return CommandLine.Arg(i)
+}
+
+// NArg is the number of arguments remaining after flags have been processed.
+func (f *FlagSet) NArg() int { return len(f.args) }
+
+// NArg is the number of arguments remaining after flags have been processed.
+func NArg() int { return len(CommandLine.args) }
+
+// Args returns the non-flag arguments.
+func (f *FlagSet) Args() []string { return f.args }
+
+// Args returns the non-flag command-line arguments.
+func Args() []string { return CommandLine.args }
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func (f *FlagSet) Var(value Value, name string, usage string) {
+ f.VarP(value, name, "", usage)
+}
+
+// VarPF is like VarP, but returns the flag created
+func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: name,
+ Shorthand: shorthand,
+ Usage: usage,
+ Value: value,
+ DefValue: value.String(),
+ }
+ f.AddFlag(flag)
+ return flag
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
+ f.VarPF(value, name, shorthand, usage)
+}
+
+// AddFlag will add the flag to the FlagSet
+func (f *FlagSet) AddFlag(flag *Flag) {
+ normalizedFlagName := f.normalizeFlagName(flag.Name)
+
+ _, alreadyThere := f.formal[normalizedFlagName]
+ if alreadyThere {
+ msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
+ fmt.Fprintln(f.out(), msg)
+ panic(msg) // Happens only if flags are declared with identical names
+ }
+ if f.formal == nil {
+ f.formal = make(map[NormalizedName]*Flag)
+ }
+
+ flag.Name = string(normalizedFlagName)
+ f.formal[normalizedFlagName] = flag
+ f.orderedFormal = append(f.orderedFormal, flag)
+
+ if flag.Shorthand == "" {
+ return
+ }
+ if len(flag.Shorthand) > 1 {
+ msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand)
+ fmt.Fprintf(f.out(), msg)
+ panic(msg)
+ }
+ if f.shorthands == nil {
+ f.shorthands = make(map[byte]*Flag)
+ }
+ c := flag.Shorthand[0]
+ used, alreadyThere := f.shorthands[c]
+ if alreadyThere {
+ msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name)
+ fmt.Fprintf(f.out(), msg)
+ panic(msg)
+ }
+ f.shorthands[c] = flag
+}
+
+// AddFlagSet adds one FlagSet to another. If a flag is already present in f
+// the flag from newSet will be ignored.
+func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(flag *Flag) {
+ if f.Lookup(flag.Name) == nil {
+ f.AddFlag(flag)
+ }
+ })
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func Var(value Value, name string, usage string) {
+ CommandLine.VarP(value, name, "", usage)
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func VarP(value Value, name, shorthand, usage string) {
+ CommandLine.VarP(value, name, shorthand, usage)
+}
+
+// failf prints to standard error a formatted error and usage message and
+// returns the error.
+func (f *FlagSet) failf(format string, a ...interface{}) error {
+ err := fmt.Errorf(format, a...)
+ if f.errorHandling != ContinueOnError {
+ fmt.Fprintln(f.out(), err)
+ f.usage()
+ }
+ return err
+}
+
+// usage calls the Usage method for the flag set, or the usage function if
+// the flag set is CommandLine.
+func (f *FlagSet) usage() {
+ if f == CommandLine {
+ Usage()
+ } else if f.Usage == nil {
+ defaultUsage(f)
+ } else {
+ f.Usage()
+ }
+}
+
+//--unknown (args will be empty)
+//--unknown --next-flag ... (args will be --next-flag ...)
+//--unknown arg ... (args will be arg ...)
+func stripUnknownFlagValue(args []string) []string {
+ if len(args) == 0 {
+ //--unknown
+ return args
+ }
+
+ first := args[0]
+ if len(first) > 0 && first[0] == '-' {
+ //--unknown --next-flag ...
+ return args
+ }
+
+ //--unknown arg ... (args will be arg ...)
+ if len(args) > 1 {
+ return args[1:]
+ }
+ return nil
+}
+
+func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
+ a = args
+ name := s[2:]
+ if len(name) == 0 || name[0] == '-' || name[0] == '=' {
+ err = f.failf("bad flag syntax: %s", s)
+ return
+ }
+
+ split := strings.SplitN(name, "=", 2)
+ name = split[0]
+ flag, exists := f.formal[f.normalizeFlagName(name)]
+
+ if !exists {
+ switch {
+ case name == "help":
+ f.usage()
+ return a, ErrHelp
+ case f.ParseErrorsWhitelist.UnknownFlags:
+ // --unknown=unknownval arg ...
+ // we do not want to lose arg in this case
+ if len(split) >= 2 {
+ return a, nil
+ }
+
+ return stripUnknownFlagValue(a), nil
+ default:
+ err = f.failf("unknown flag: --%s", name)
+ return
+ }
+ }
+
+ var value string
+ if len(split) == 2 {
+ // '--flag=arg'
+ value = split[1]
+ } else if flag.NoOptDefVal != "" {
+ // '--flag' (arg was optional)
+ value = flag.NoOptDefVal
+ } else if len(a) > 0 {
+ // '--flag arg'
+ value = a[0]
+ a = a[1:]
+ } else {
+ // '--flag' (arg was required)
+ err = f.failf("flag needs an argument: %s", s)
+ return
+ }
+
+ err = fn(flag, value)
+ if err != nil {
+ f.failf(err.Error())
+ }
+ return
+}
+
+func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
+ outArgs = args
+
+ if strings.HasPrefix(shorthands, "test.") {
+ return
+ }
+
+ outShorts = shorthands[1:]
+ c := shorthands[0]
+
+ flag, exists := f.shorthands[c]
+ if !exists {
+ switch {
+ case c == 'h':
+ f.usage()
+ err = ErrHelp
+ return
+ case f.ParseErrorsWhitelist.UnknownFlags:
+ // '-f=arg arg ...'
+ // we do not want to lose arg in this case
+ if len(shorthands) > 2 && shorthands[1] == '=' {
+ outShorts = ""
+ return
+ }
+
+ outArgs = stripUnknownFlagValue(outArgs)
+ return
+ default:
+ err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
+ return
+ }
+ }
+
+ var value string
+ if len(shorthands) > 2 && shorthands[1] == '=' {
+ // '-f=arg'
+ value = shorthands[2:]
+ outShorts = ""
+ } else if flag.NoOptDefVal != "" {
+ // '-f' (arg was optional)
+ value = flag.NoOptDefVal
+ } else if len(shorthands) > 1 {
+ // '-farg'
+ value = shorthands[1:]
+ outShorts = ""
+ } else if len(args) > 0 {
+ // '-f arg'
+ value = args[0]
+ outArgs = args[1:]
+ } else {
+ // '-f' (arg was required)
+ err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
+ return
+ }
+
+ if flag.ShorthandDeprecated != "" {
+ fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
+ }
+
+ err = fn(flag, value)
+ if err != nil {
+ f.failf(err.Error())
+ }
+ return
+}
+
+func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) {
+ a = args
+ shorthands := s[1:]
+
+ // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv").
+ for len(shorthands) > 0 {
+ shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ if len(s) == 0 || s[0] != '-' || len(s) == 1 {
+ if !f.interspersed {
+ f.args = append(f.args, s)
+ f.args = append(f.args, args...)
+ return nil
+ }
+ f.args = append(f.args, s)
+ continue
+ }
+
+ if s[1] == '-' {
+ if len(s) == 2 { // "--" terminates the flags
+ f.argsLenAtDash = len(f.args)
+ f.args = append(f.args, args...)
+ break
+ }
+ args, err = f.parseLongArg(s, args, fn)
+ } else {
+ args, err = f.parseShortArg(s, args, fn)
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// Parse parses flag definitions from the argument list, which should not
+// include the command name. Must be called after all flags in the FlagSet
+// are defined and before flags are accessed by the program.
+// The return value will be ErrHelp if -help was set but not defined.
+func (f *FlagSet) Parse(arguments []string) error {
+ if f.addedGoFlagSets != nil {
+ for _, goFlagSet := range f.addedGoFlagSets {
+ goFlagSet.Parse(nil)
+ }
+ }
+ f.parsed = true
+
+ if len(arguments) < 0 {
+ return nil
+ }
+
+ f.args = make([]string, 0, len(arguments))
+
+ set := func(flag *Flag, value string) error {
+ return f.Set(flag.Name, value)
+ }
+
+ err := f.parseArgs(arguments, set)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ fmt.Println(err)
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+type parseFunc func(flag *Flag, value string) error
+
+// ParseAll parses flag definitions from the argument list, which should not
+// include the command name. The arguments for fn are flag and value. Must be
+// called after all flags in the FlagSet are defined and before flags are
+// accessed by the program. The return value will be ErrHelp if -help was set
+// but not defined.
+func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error {
+ f.parsed = true
+ f.args = make([]string, 0, len(arguments))
+
+ err := f.parseArgs(arguments, fn)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+// Parsed reports whether f.Parse has been called.
+func (f *FlagSet) Parsed() bool {
+ return f.parsed
+}
+
+// Parse parses the command-line flags from os.Args[1:]. Must be called
+// after all flags are defined and before flags are accessed by the program.
+func Parse() {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.Parse(os.Args[1:])
+}
+
+// ParseAll parses the command-line flags from os.Args[1:] and called fn for each.
+// The arguments for fn are flag and value. Must be called after all flags are
+// defined and before flags are accessed by the program.
+func ParseAll(fn func(flag *Flag, value string) error) {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.ParseAll(os.Args[1:], fn)
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func SetInterspersed(interspersed bool) {
+ CommandLine.SetInterspersed(interspersed)
+}
+
+// Parsed returns true if the command-line flags have been parsed.
+func Parsed() bool {
+ return CommandLine.Parsed()
+}
+
+// CommandLine is the default set of command-line flags, parsed from os.Args.
+var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
+
+// NewFlagSet returns a new, empty flag set with the specified name,
+// error handling property and SortFlags set to true.
+func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
+ f := &FlagSet{
+ name: name,
+ errorHandling: errorHandling,
+ argsLenAtDash: -1,
+ interspersed: true,
+ SortFlags: true,
+ }
+ return f
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func (f *FlagSet) SetInterspersed(interspersed bool) {
+ f.interspersed = interspersed
+}
+
+// Init sets the name and error handling property for a flag set.
+// By default, the zero FlagSet uses an empty name and the
+// ContinueOnError error handling policy.
+func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
+ f.name = name
+ f.errorHandling = errorHandling
+ f.argsLenAtDash = -1
+}
diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go
new file mode 100644
index 0000000..a243f81
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- float32 Value
+type float32Value float32
+
+func newFloat32Value(val float32, p *float32) *float32Value {
+ *p = val
+ return (*float32Value)(p)
+}
+
+func (f *float32Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 32)
+ *f = float32Value(v)
+ return err
+}
+
+func (f *float32Value) Type() string {
+ return "float32"
+}
+
+func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) }
+
+func float32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseFloat(sval, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(v), nil
+}
+
+// GetFloat32 return the float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32(name string) (float32, error) {
+ val, err := f.getFlagType(name, "float32", float32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float32), nil
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func Float32Var(p *float32, name string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32(name string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func Float32(name string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, "", value, usage)
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func Float32P(name, shorthand string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float32_slice.go b/vendor/github.com/spf13/pflag/float32_slice.go
new file mode 100644
index 0000000..caa3527
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float32_slice.go
@@ -0,0 +1,174 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- float32Slice Value
+type float32SliceValue struct {
+ value *[]float32
+ changed bool
+}
+
+func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue {
+ isv := new(float32SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *float32SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]float32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 float64
+ temp64, err = strconv.ParseFloat(d, 32)
+ if err != nil {
+ return err
+ }
+ out[i] = float32(temp64)
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *float32SliceValue) Type() string {
+ return "float32Slice"
+}
+
+func (s *float32SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%f", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *float32SliceValue) fromString(val string) (float32, error) {
+ t64, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(t64), nil
+}
+
+func (s *float32SliceValue) toString(val float32) string {
+ return fmt.Sprintf("%f", val)
+}
+
+func (s *float32SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *float32SliceValue) Replace(val []string) error {
+ out := make([]float32, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *float32SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func float32SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []float32{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]float32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 float64
+ temp64, err = strconv.ParseFloat(d, 32)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = float32(temp64)
+
+ }
+ return out, nil
+}
+
+// GetFloat32Slice return the []float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) {
+ val, err := f.getFlagType(name, "float32Slice", float32SliceConv)
+ if err != nil {
+ return []float32{}, err
+ }
+ return val.([]float32), nil
+}
+
+// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string.
+// The argument p points to a []float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) {
+ f.VarP(newFloat32SliceValue(value, p), name, "", usage)
+}
+
+// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) {
+ f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string.
+// The argument p points to a float32[] variable in which to store the value of the flag.
+func Float32SliceVar(p *[]float32, name string, value []float32, usage string) {
+ CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage)
+}
+
+// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) {
+ CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float32Slice defines a []float32 flag with specified name, default value, and usage string.
+// The return value is the address of a []float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 {
+ p := []float32{}
+ f.Float32SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 {
+ p := []float32{}
+ f.Float32SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Float32Slice defines a []float32 flag with specified name, default value, and usage string.
+// The return value is the address of a []float32 variable that stores the value of the flag.
+func Float32Slice(name string, value []float32, usage string) *[]float32 {
+ return CommandLine.Float32SliceP(name, "", value, usage)
+}
+
+// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash.
+func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 {
+ return CommandLine.Float32SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go
new file mode 100644
index 0000000..04b5492
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- float64 Value
+type float64Value float64
+
+func newFloat64Value(val float64, p *float64) *float64Value {
+ *p = val
+ return (*float64Value)(p)
+}
+
+func (f *float64Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ *f = float64Value(v)
+ return err
+}
+
+func (f *float64Value) Type() string {
+ return "float64"
+}
+
+func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
+
+func float64Conv(sval string) (interface{}, error) {
+ return strconv.ParseFloat(sval, 64)
+}
+
+// GetFloat64 return the float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64(name string) (float64, error) {
+ val, err := f.getFlagType(name, "float64", float64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float64), nil
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func Float64Var(p *float64, name string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func Float64(name string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, "", value, usage)
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func Float64P(name, shorthand string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float64_slice.go b/vendor/github.com/spf13/pflag/float64_slice.go
new file mode 100644
index 0000000..85bf307
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float64_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- float64Slice Value
+type float64SliceValue struct {
+ value *[]float64
+ changed bool
+}
+
+func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue {
+ isv := new(float64SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *float64SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]float64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseFloat(d, 64)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *float64SliceValue) Type() string {
+ return "float64Slice"
+}
+
+func (s *float64SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%f", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *float64SliceValue) fromString(val string) (float64, error) {
+ return strconv.ParseFloat(val, 64)
+}
+
+func (s *float64SliceValue) toString(val float64) string {
+ return fmt.Sprintf("%f", val)
+}
+
+func (s *float64SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *float64SliceValue) Replace(val []string) error {
+ out := make([]float64, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *float64SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func float64SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []float64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]float64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseFloat(d, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetFloat64Slice return the []float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) {
+ val, err := f.getFlagType(name, "float64Slice", float64SliceConv)
+ if err != nil {
+ return []float64{}, err
+ }
+ return val.([]float64), nil
+}
+
+// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string.
+// The argument p points to a []float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) {
+ f.VarP(newFloat64SliceValue(value, p), name, "", usage)
+}
+
+// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) {
+ f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string.
+// The argument p points to a float64[] variable in which to store the value of the flag.
+func Float64SliceVar(p *[]float64, name string, value []float64, usage string) {
+ CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage)
+}
+
+// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) {
+ CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float64Slice defines a []float64 flag with specified name, default value, and usage string.
+// The return value is the address of a []float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 {
+ p := []float64{}
+ f.Float64SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 {
+ p := []float64{}
+ f.Float64SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Float64Slice defines a []float64 flag with specified name, default value, and usage string.
+// The return value is the address of a []float64 variable that stores the value of the flag.
+func Float64Slice(name string, value []float64, usage string) *[]float64 {
+ return CommandLine.Float64SliceP(name, "", value, usage)
+}
+
+// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash.
+func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 {
+ return CommandLine.Float64SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/go.mod b/vendor/github.com/spf13/pflag/go.mod
new file mode 100644
index 0000000..b2287ee
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/go.mod
@@ -0,0 +1,3 @@
+module github.com/spf13/pflag
+
+go 1.12
diff --git a/vendor/github.com/spf13/pflag/go.sum b/vendor/github.com/spf13/pflag/go.sum
new file mode 100644
index 0000000..e69de29
diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go
new file mode 100644
index 0000000..d3dd72b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/golangflag.go
@@ -0,0 +1,105 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ goflag "flag"
+ "reflect"
+ "strings"
+)
+
+// flagValueWrapper implements pflag.Value around a flag.Value. The main
+// difference here is the addition of the Type method that returns a string
+// name of the type. As this is generally unknown, we approximate that with
+// reflection.
+type flagValueWrapper struct {
+ inner goflag.Value
+ flagType string
+}
+
+// We are just copying the boolFlag interface out of goflag as that is what
+// they use to decide if a flag should get "true" when no arg is given.
+type goBoolFlag interface {
+ goflag.Value
+ IsBoolFlag() bool
+}
+
+func wrapFlagValue(v goflag.Value) Value {
+ // If the flag.Value happens to also be a pflag.Value, just use it directly.
+ if pv, ok := v.(Value); ok {
+ return pv
+ }
+
+ pv := &flagValueWrapper{
+ inner: v,
+ }
+
+ t := reflect.TypeOf(v)
+ if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ pv.flagType = strings.TrimSuffix(t.Name(), "Value")
+ return pv
+}
+
+func (v *flagValueWrapper) String() string {
+ return v.inner.String()
+}
+
+func (v *flagValueWrapper) Set(s string) error {
+ return v.inner.Set(s)
+}
+
+func (v *flagValueWrapper) Type() string {
+ return v.flagType
+}
+
+// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag
+// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei
+// with both `-v` and `--v` in flags. If the golang flag was more than a single
+// character (ex: `verbose`) it will only be accessible via `--verbose`
+func PFlagFromGoFlag(goflag *goflag.Flag) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: goflag.Name,
+ Usage: goflag.Usage,
+ Value: wrapFlagValue(goflag.Value),
+ // Looks like golang flags don't set DefValue correctly :-(
+ //DefValue: goflag.DefValue,
+ DefValue: goflag.Value.String(),
+ }
+ // Ex: if the golang flag was -v, allow both -v and --v to work
+ if len(flag.Name) == 1 {
+ flag.Shorthand = flag.Name
+ }
+ if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() {
+ flag.NoOptDefVal = "true"
+ }
+ return flag
+}
+
+// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet
+func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) {
+ if f.Lookup(goflag.Name) != nil {
+ return
+ }
+ newflag := PFlagFromGoFlag(goflag)
+ f.AddFlag(newflag)
+}
+
+// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet
+func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(goflag *goflag.Flag) {
+ f.AddGoFlag(goflag)
+ })
+ if f.addedGoFlagSets == nil {
+ f.addedGoFlagSets = make([]*goflag.FlagSet, 0)
+ }
+ f.addedGoFlagSets = append(f.addedGoFlagSets, newSet)
+}
diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go
new file mode 100644
index 0000000..1474b89
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int Value
+type intValue int
+
+func newIntValue(val int, p *int) *intValue {
+ *p = val
+ return (*intValue)(p)
+}
+
+func (i *intValue) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = intValue(v)
+ return err
+}
+
+func (i *intValue) Type() string {
+ return "int"
+}
+
+func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
+
+func intConv(sval string) (interface{}, error) {
+ return strconv.Atoi(sval)
+}
+
+// GetInt return the int value of a flag with the given name
+func (f *FlagSet) GetInt(name string) (int, error) {
+ val, err := f.getFlagType(name, "int", intConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func IntVar(p *int, name string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func IntVarP(p *int, name, shorthand string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func (f *FlagSet) Int(name string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, "", value, usage)
+ return p
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func Int(name string, value int, usage string) *int {
+ return CommandLine.IntP(name, "", value, usage)
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func IntP(name, shorthand string, value int, usage string) *int {
+ return CommandLine.IntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go
new file mode 100644
index 0000000..f1a01d0
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int16 Value
+type int16Value int16
+
+func newInt16Value(val int16, p *int16) *int16Value {
+ *p = val
+ return (*int16Value)(p)
+}
+
+func (i *int16Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 16)
+ *i = int16Value(v)
+ return err
+}
+
+func (i *int16Value) Type() string {
+ return "int16"
+}
+
+func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int16(v), nil
+}
+
+// GetInt16 returns the int16 value of a flag with the given name
+func (f *FlagSet) GetInt16(name string) (int16, error) {
+ val, err := f.getFlagType(name, "int16", int16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int16), nil
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) {
+ f.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+ f.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func Int16Var(p *int16, name string, value int16, usage string) {
+ CommandLine.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+ CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func (f *FlagSet) Int16(name string, value int16, usage string) *int16 {
+ p := new(int16)
+ f.Int16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 {
+ p := new(int16)
+ f.Int16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func Int16(name string, value int16, usage string) *int16 {
+ return CommandLine.Int16P(name, "", value, usage)
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func Int16P(name, shorthand string, value int16, usage string) *int16 {
+ return CommandLine.Int16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go
new file mode 100644
index 0000000..9b95944
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int32 Value
+type int32Value int32
+
+func newInt32Value(val int32, p *int32) *int32Value {
+ *p = val
+ return (*int32Value)(p)
+}
+
+func (i *int32Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 32)
+ *i = int32Value(v)
+ return err
+}
+
+func (i *int32Value) Type() string {
+ return "int32"
+}
+
+func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(v), nil
+}
+
+// GetInt32 return the int32 value of a flag with the given name
+func (f *FlagSet) GetInt32(name string) (int32, error) {
+ val, err := f.getFlagType(name, "int32", int32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int32), nil
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func Int32Var(p *int32, name string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32(name string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func Int32(name string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, "", value, usage)
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func Int32P(name, shorthand string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int32_slice.go b/vendor/github.com/spf13/pflag/int32_slice.go
new file mode 100644
index 0000000..ff128ff
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int32_slice.go
@@ -0,0 +1,174 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- int32Slice Value
+type int32SliceValue struct {
+ value *[]int32
+ changed bool
+}
+
+func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue {
+ isv := new(int32SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *int32SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 int64
+ temp64, err = strconv.ParseInt(d, 0, 32)
+ if err != nil {
+ return err
+ }
+ out[i] = int32(temp64)
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *int32SliceValue) Type() string {
+ return "int32Slice"
+}
+
+func (s *int32SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *int32SliceValue) fromString(val string) (int32, error) {
+ t64, err := strconv.ParseInt(val, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(t64), nil
+}
+
+func (s *int32SliceValue) toString(val int32) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *int32SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *int32SliceValue) Replace(val []string) error {
+ out := make([]int32, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *int32SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func int32SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int32{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 int64
+ temp64, err = strconv.ParseInt(d, 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = int32(temp64)
+
+ }
+ return out, nil
+}
+
+// GetInt32Slice return the []int32 value of a flag with the given name
+func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) {
+ val, err := f.getFlagType(name, "int32Slice", int32SliceConv)
+ if err != nil {
+ return []int32{}, err
+ }
+ return val.([]int32), nil
+}
+
+// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string.
+// The argument p points to a []int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) {
+ f.VarP(newInt32SliceValue(value, p), name, "", usage)
+}
+
+// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) {
+ f.VarP(newInt32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string.
+// The argument p points to a int32[] variable in which to store the value of the flag.
+func Int32SliceVar(p *[]int32, name string, value []int32, usage string) {
+ CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage)
+}
+
+// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) {
+ CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int32Slice defines a []int32 flag with specified name, default value, and usage string.
+// The return value is the address of a []int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 {
+ p := []int32{}
+ f.Int32SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 {
+ p := []int32{}
+ f.Int32SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Int32Slice defines a []int32 flag with specified name, default value, and usage string.
+// The return value is the address of a []int32 variable that stores the value of the flag.
+func Int32Slice(name string, value []int32, usage string) *[]int32 {
+ return CommandLine.Int32SliceP(name, "", value, usage)
+}
+
+// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash.
+func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 {
+ return CommandLine.Int32SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go
new file mode 100644
index 0000000..0026d78
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int64 Value
+type int64Value int64
+
+func newInt64Value(val int64, p *int64) *int64Value {
+ *p = val
+ return (*int64Value)(p)
+}
+
+func (i *int64Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = int64Value(v)
+ return err
+}
+
+func (i *int64Value) Type() string {
+ return "int64"
+}
+
+func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int64Conv(sval string) (interface{}, error) {
+ return strconv.ParseInt(sval, 0, 64)
+}
+
+// GetInt64 return the int64 value of a flag with the given name
+func (f *FlagSet) GetInt64(name string) (int64, error) {
+ val, err := f.getFlagType(name, "int64", int64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int64), nil
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func Int64Var(p *int64, name string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func Int64(name string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, "", value, usage)
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func Int64P(name, shorthand string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int64_slice.go b/vendor/github.com/spf13/pflag/int64_slice.go
new file mode 100644
index 0000000..2546463
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int64_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- int64Slice Value
+type int64SliceValue struct {
+ value *[]int64
+ changed bool
+}
+
+func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue {
+ isv := new(int64SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *int64SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseInt(d, 0, 64)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *int64SliceValue) Type() string {
+ return "int64Slice"
+}
+
+func (s *int64SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *int64SliceValue) fromString(val string) (int64, error) {
+ return strconv.ParseInt(val, 0, 64)
+}
+
+func (s *int64SliceValue) toString(val int64) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *int64SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *int64SliceValue) Replace(val []string) error {
+ out := make([]int64, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *int64SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func int64SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseInt(d, 0, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetInt64Slice return the []int64 value of a flag with the given name
+func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) {
+ val, err := f.getFlagType(name, "int64Slice", int64SliceConv)
+ if err != nil {
+ return []int64{}, err
+ }
+ return val.([]int64), nil
+}
+
+// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string.
+// The argument p points to a []int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) {
+ f.VarP(newInt64SliceValue(value, p), name, "", usage)
+}
+
+// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) {
+ f.VarP(newInt64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string.
+// The argument p points to a int64[] variable in which to store the value of the flag.
+func Int64SliceVar(p *[]int64, name string, value []int64, usage string) {
+ CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage)
+}
+
+// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) {
+ CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int64Slice defines a []int64 flag with specified name, default value, and usage string.
+// The return value is the address of a []int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 {
+ p := []int64{}
+ f.Int64SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 {
+ p := []int64{}
+ f.Int64SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Int64Slice defines a []int64 flag with specified name, default value, and usage string.
+// The return value is the address of a []int64 variable that stores the value of the flag.
+func Int64Slice(name string, value []int64, usage string) *[]int64 {
+ return CommandLine.Int64SliceP(name, "", value, usage)
+}
+
+// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash.
+func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 {
+ return CommandLine.Int64SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go
new file mode 100644
index 0000000..4da9222
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int8 Value
+type int8Value int8
+
+func newInt8Value(val int8, p *int8) *int8Value {
+ *p = val
+ return (*int8Value)(p)
+}
+
+func (i *int8Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 8)
+ *i = int8Value(v)
+ return err
+}
+
+func (i *int8Value) Type() string {
+ return "int8"
+}
+
+func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return int8(v), nil
+}
+
+// GetInt8 return the int8 value of a flag with the given name
+func (f *FlagSet) GetInt8(name string) (int8, error) {
+ val, err := f.getFlagType(name, "int8", int8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int8), nil
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func Int8Var(p *int8, name string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func (f *FlagSet) Int8(name string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func Int8(name string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, "", value, usage)
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func Int8P(name, shorthand string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go
new file mode 100644
index 0000000..e71c39d
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int_slice.go
@@ -0,0 +1,158 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- intSlice Value
+type intSliceValue struct {
+ value *[]int
+ changed bool
+}
+
+func newIntSliceValue(val []int, p *[]int) *intSliceValue {
+ isv := new(intSliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *intSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *intSliceValue) Type() string {
+ return "intSlice"
+}
+
+func (s *intSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *intSliceValue) Append(val string) error {
+ i, err := strconv.Atoi(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *intSliceValue) Replace(val []string) error {
+ out := make([]int, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *intSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = strconv.Itoa(d)
+ }
+ return out
+}
+
+func intSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetIntSlice return the []int value of a flag with the given name
+func (f *FlagSet) GetIntSlice(name string) ([]int, error) {
+ val, err := f.getFlagType(name, "intSlice", intSliceConv)
+ if err != nil {
+ return []int{}, err
+ }
+ return val.([]int), nil
+}
+
+// IntSliceVar defines a intSlice flag with specified name, default value, and usage string.
+// The argument p points to a []int variable in which to store the value of the flag.
+func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSliceVar defines a int[] flag with specified name, default value, and usage string.
+// The argument p points to a int[] variable in which to store the value of the flag.
+func IntSliceVar(p *[]int, name string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func IntSlice(name string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, "", value, usage)
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go
new file mode 100644
index 0000000..3d414ba
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip.go
@@ -0,0 +1,94 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// -- net.IP value
+type ipValue net.IP
+
+func newIPValue(val net.IP, p *net.IP) *ipValue {
+ *p = val
+ return (*ipValue)(p)
+}
+
+func (i *ipValue) String() string { return net.IP(*i).String() }
+func (i *ipValue) Set(s string) error {
+ ip := net.ParseIP(strings.TrimSpace(s))
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP: %q", s)
+ }
+ *i = ipValue(ip)
+ return nil
+}
+
+func (i *ipValue) Type() string {
+ return "ip"
+}
+
+func ipConv(sval string) (interface{}, error) {
+ ip := net.ParseIP(sval)
+ if ip != nil {
+ return ip, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+}
+
+// GetIP return the net.IP value of a flag with the given name
+func (f *FlagSet) GetIP(name string) (net.IP, error) {
+ val, err := f.getFlagType(name, "ip", ipConv)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IP), nil
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func IPVar(p *net.IP, name string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func IP(name string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, "", value, usage)
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go
new file mode 100644
index 0000000..775faae
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip_slice.go
@@ -0,0 +1,186 @@
+package pflag
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strings"
+)
+
+// -- ipSlice Value
+type ipSliceValue struct {
+ value *[]net.IP
+ changed bool
+}
+
+func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue {
+ ipsv := new(ipSliceValue)
+ ipsv.value = p
+ *ipsv.value = val
+ return ipsv
+}
+
+// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag.
+// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended.
+func (s *ipSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ ipStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse ip values into slice
+ out := make([]net.IP, 0, len(ipStrSlice))
+ for _, ipStr := range ipStrSlice {
+ ip := net.ParseIP(strings.TrimSpace(ipStr))
+ if ip == nil {
+ return fmt.Errorf("invalid string being converted to IP address: %s", ipStr)
+ }
+ out = append(out, ip)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *ipSliceValue) Type() string {
+ return "ipSlice"
+}
+
+// String defines a "native" format for this net.IP slice flag value.
+func (s *ipSliceValue) String() string {
+
+ ipStrSlice := make([]string, len(*s.value))
+ for i, ip := range *s.value {
+ ipStrSlice[i] = ip.String()
+ }
+
+ out, _ := writeAsCSV(ipStrSlice)
+
+ return "[" + out + "]"
+}
+
+func (s *ipSliceValue) fromString(val string) (net.IP, error) {
+ return net.ParseIP(strings.TrimSpace(val)), nil
+}
+
+func (s *ipSliceValue) toString(val net.IP) string {
+ return val.String()
+}
+
+func (s *ipSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *ipSliceValue) Replace(val []string) error {
+ out := make([]net.IP, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *ipSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func ipSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []net.IP{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]net.IP, len(ss))
+ for i, sval := range ss {
+ ip := net.ParseIP(strings.TrimSpace(sval))
+ if ip == nil {
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+ }
+ out[i] = ip
+ }
+ return out, nil
+}
+
+// GetIPSlice returns the []net.IP value of a flag with the given name
+func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) {
+ val, err := f.getFlagType(name, "ipSlice", ipSliceConv)
+ if err != nil {
+ return []net.IP{}, err
+ }
+ return val.([]net.IP), nil
+}
+
+// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+ f.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+ f.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+ CommandLine.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+ CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of that flag.
+func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+ p := []net.IP{}
+ f.IPSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+ p := []net.IP{}
+ f.IPSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of the flag.
+func IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+ return CommandLine.IPSliceP(name, "", value, usage)
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+ return CommandLine.IPSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go
new file mode 100644
index 0000000..5bd44bd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipmask.go
@@ -0,0 +1,122 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+)
+
+// -- net.IPMask value
+type ipMaskValue net.IPMask
+
+func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue {
+ *p = val
+ return (*ipMaskValue)(p)
+}
+
+func (i *ipMaskValue) String() string { return net.IPMask(*i).String() }
+func (i *ipMaskValue) Set(s string) error {
+ ip := ParseIPv4Mask(s)
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP mask: %q", s)
+ }
+ *i = ipMaskValue(ip)
+ return nil
+}
+
+func (i *ipMaskValue) Type() string {
+ return "ipMask"
+}
+
+// ParseIPv4Mask written in IP form (e.g. 255.255.255.0).
+// This function should really belong to the net package.
+func ParseIPv4Mask(s string) net.IPMask {
+ mask := net.ParseIP(s)
+ if mask == nil {
+ if len(s) != 8 {
+ return nil
+ }
+ // net.IPMask.String() actually outputs things like ffffff00
+ // so write a horrible parser for that as well :-(
+ m := []int{}
+ for i := 0; i < 4; i++ {
+ b := "0x" + s[2*i:2*i+2]
+ d, err := strconv.ParseInt(b, 0, 0)
+ if err != nil {
+ return nil
+ }
+ m = append(m, int(d))
+ }
+ s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3])
+ mask = net.ParseIP(s)
+ if mask == nil {
+ return nil
+ }
+ }
+ return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])
+}
+
+func parseIPv4Mask(sval string) (interface{}, error) {
+ mask := ParseIPv4Mask(sval)
+ if mask == nil {
+ return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval)
+ }
+ return mask, nil
+}
+
+// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name
+func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) {
+ val, err := f.getFlagType(name, "ipMask", parseIPv4Mask)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IPMask), nil
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, "", value, usage)
+}
+
+// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go
new file mode 100644
index 0000000..e2c1b8b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipnet.go
@@ -0,0 +1,98 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// IPNet adapts net.IPNet for use as a flag.
+type ipNetValue net.IPNet
+
+func (ipnet ipNetValue) String() string {
+ n := net.IPNet(ipnet)
+ return n.String()
+}
+
+func (ipnet *ipNetValue) Set(value string) error {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(value))
+ if err != nil {
+ return err
+ }
+ *ipnet = ipNetValue(*n)
+ return nil
+}
+
+func (*ipNetValue) Type() string {
+ return "ipNet"
+}
+
+func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
+ *p = val
+ return (*ipNetValue)(p)
+}
+
+func ipNetConv(sval string) (interface{}, error) {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(sval))
+ if err == nil {
+ return *n, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval)
+}
+
+// GetIPNet return the net.IPNet value of a flag with the given name
+func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) {
+ val, err := f.getFlagType(name, "ipNet", ipNetConv)
+ if err != nil {
+ return net.IPNet{}, err
+ }
+ return val.(net.IPNet), nil
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, "", value, usage)
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go
new file mode 100644
index 0000000..04e0a26
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string.go
@@ -0,0 +1,80 @@
+package pflag
+
+// -- string Value
+type stringValue string
+
+func newStringValue(val string, p *string) *stringValue {
+ *p = val
+ return (*stringValue)(p)
+}
+
+func (s *stringValue) Set(val string) error {
+ *s = stringValue(val)
+ return nil
+}
+func (s *stringValue) Type() string {
+ return "string"
+}
+
+func (s *stringValue) String() string { return string(*s) }
+
+func stringConv(sval string) (interface{}, error) {
+ return sval, nil
+}
+
+// GetString return the string value of a flag with the given name
+func (f *FlagSet) GetString(name string) (string, error) {
+ val, err := f.getFlagType(name, "string", stringConv)
+ if err != nil {
+ return "", err
+ }
+ return val.(string), nil
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func StringVar(p *string, name string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringVarP(p *string, name, shorthand string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func (f *FlagSet) String(name string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, "", value, usage)
+ return p
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func String(name string, value string, usage string) *string {
+ return CommandLine.StringP(name, "", value, usage)
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func StringP(name, shorthand string, value string, usage string) *string {
+ return CommandLine.StringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go
new file mode 100644
index 0000000..4894af8
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_array.go
@@ -0,0 +1,129 @@
+package pflag
+
+// -- stringArray Value
+type stringArrayValue struct {
+ value *[]string
+ changed bool
+}
+
+func newStringArrayValue(val []string, p *[]string) *stringArrayValue {
+ ssv := new(stringArrayValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+func (s *stringArrayValue) Set(val string) error {
+ if !s.changed {
+ *s.value = []string{val}
+ s.changed = true
+ } else {
+ *s.value = append(*s.value, val)
+ }
+ return nil
+}
+
+func (s *stringArrayValue) Append(val string) error {
+ *s.value = append(*s.value, val)
+ return nil
+}
+
+func (s *stringArrayValue) Replace(val []string) error {
+ out := make([]string, len(val))
+ for i, d := range val {
+ var err error
+ out[i] = d
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *stringArrayValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = d
+ }
+ return out
+}
+
+func (s *stringArrayValue) Type() string {
+ return "stringArray"
+}
+
+func (s *stringArrayValue) String() string {
+ str, _ := writeAsCSV(*s.value)
+ return "[" + str + "]"
+}
+
+func stringArrayConv(sval string) (interface{}, error) {
+ sval = sval[1 : len(sval)-1]
+ // An empty string would cause a array with one (empty) string
+ if len(sval) == 0 {
+ return []string{}, nil
+ }
+ return readAsCSV(sval)
+}
+
+// GetStringArray return the []string value of a flag with the given name
+func (f *FlagSet) GetStringArray(name string) ([]string, error) {
+ val, err := f.getFlagType(name, "stringArray", stringArrayConv)
+ if err != nil {
+ return []string{}, err
+ }
+ return val.([]string), nil
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) {
+ f.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ f.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArrayVar(p *[]string, name string, value []string, usage string) {
+ CommandLine.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringArrayVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringArrayVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArray(name string, value []string, usage string) *[]string {
+ return CommandLine.StringArrayP(name, "", value, usage)
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+ return CommandLine.StringArrayP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go
new file mode 100644
index 0000000..3cb2e69
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_slice.go
@@ -0,0 +1,163 @@
+package pflag
+
+import (
+ "bytes"
+ "encoding/csv"
+ "strings"
+)
+
+// -- stringSlice Value
+type stringSliceValue struct {
+ value *[]string
+ changed bool
+}
+
+func newStringSliceValue(val []string, p *[]string) *stringSliceValue {
+ ssv := new(stringSliceValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+func readAsCSV(val string) ([]string, error) {
+ if val == "" {
+ return []string{}, nil
+ }
+ stringReader := strings.NewReader(val)
+ csvReader := csv.NewReader(stringReader)
+ return csvReader.Read()
+}
+
+func writeAsCSV(vals []string) (string, error) {
+ b := &bytes.Buffer{}
+ w := csv.NewWriter(b)
+ err := w.Write(vals)
+ if err != nil {
+ return "", err
+ }
+ w.Flush()
+ return strings.TrimSuffix(b.String(), "\n"), nil
+}
+
+func (s *stringSliceValue) Set(val string) error {
+ v, err := readAsCSV(val)
+ if err != nil {
+ return err
+ }
+ if !s.changed {
+ *s.value = v
+ } else {
+ *s.value = append(*s.value, v...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringSliceValue) Type() string {
+ return "stringSlice"
+}
+
+func (s *stringSliceValue) String() string {
+ str, _ := writeAsCSV(*s.value)
+ return "[" + str + "]"
+}
+
+func (s *stringSliceValue) Append(val string) error {
+ *s.value = append(*s.value, val)
+ return nil
+}
+
+func (s *stringSliceValue) Replace(val []string) error {
+ *s.value = val
+ return nil
+}
+
+func (s *stringSliceValue) GetSlice() []string {
+ return *s.value
+}
+
+func stringSliceConv(sval string) (interface{}, error) {
+ sval = sval[1 : len(sval)-1]
+ // An empty string would cause a slice with one (empty) string
+ if len(sval) == 0 {
+ return []string{}, nil
+ }
+ return readAsCSV(sval)
+}
+
+// GetStringSlice return the []string value of a flag with the given name
+func (f *FlagSet) GetStringSlice(name string) ([]string, error) {
+ val, err := f.getFlagType(name, "stringSlice", stringSliceConv)
+ if err != nil {
+ return []string{}, err
+ }
+ return val.([]string), nil
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func StringSliceVar(p *[]string, name string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func StringSlice(name string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, "", value, usage)
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_int.go b/vendor/github.com/spf13/pflag/string_to_int.go
new file mode 100644
index 0000000..5ceda39
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_int.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- stringToInt Value
+type stringToIntValue struct {
+ value *map[string]int
+ changed bool
+}
+
+func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue {
+ ssv := new(stringToIntValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToIntValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make(map[string]int, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.Atoi(kv[1])
+ if err != nil {
+ return err
+ }
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToIntValue) Type() string {
+ return "stringToInt"
+}
+
+func (s *stringToIntValue) String() string {
+ var buf bytes.Buffer
+ i := 0
+ for k, v := range *s.value {
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(k)
+ buf.WriteRune('=')
+ buf.WriteString(strconv.Itoa(v))
+ i++
+ }
+ return "[" + buf.String() + "]"
+}
+
+func stringToIntConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make(map[string]int, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.Atoi(kv[1])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetStringToInt return the map[string]int value of a flag with the given name
+func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) {
+ val, err := f.getFlagType(name, "stringToInt", stringToIntConv)
+ if err != nil {
+ return map[string]int{}, err
+ }
+ return val.(map[string]int), nil
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+ f.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+ f.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+ CommandLine.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+ CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int {
+ p := map[string]int{}
+ f.StringToIntVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+ p := map[string]int{}
+ f.StringToIntVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt(name string, value map[string]int, usage string) *map[string]int {
+ return CommandLine.StringToIntP(name, "", value, usage)
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+ return CommandLine.StringToIntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_int64.go b/vendor/github.com/spf13/pflag/string_to_int64.go
new file mode 100644
index 0000000..a807a04
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_int64.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- stringToInt64 Value
+type stringToInt64Value struct {
+ value *map[string]int64
+ changed bool
+}
+
+func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value {
+ ssv := new(stringToInt64Value)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToInt64Value) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make(map[string]int64, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64)
+ if err != nil {
+ return err
+ }
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToInt64Value) Type() string {
+ return "stringToInt64"
+}
+
+func (s *stringToInt64Value) String() string {
+ var buf bytes.Buffer
+ i := 0
+ for k, v := range *s.value {
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(k)
+ buf.WriteRune('=')
+ buf.WriteString(strconv.FormatInt(v, 10))
+ i++
+ }
+ return "[" + buf.String() + "]"
+}
+
+func stringToInt64Conv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]int64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make(map[string]int64, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetStringToInt64 return the map[string]int64 value of a flag with the given name
+func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) {
+ val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv)
+ if err != nil {
+ return map[string]int64{}, err
+ }
+ return val.(map[string]int64), nil
+}
+
+// StringToInt64Var defines a string flag with specified name, default value, and usage string.
+// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) {
+ f.VarP(newStringToInt64Value(value, p), name, "", usage)
+}
+
+// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) {
+ f.VarP(newStringToInt64Value(value, p), name, shorthand, usage)
+}
+
+// StringToInt64Var defines a string flag with specified name, default value, and usage string.
+// The argument p point64s to a map[string]int64 variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) {
+ CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage)
+}
+
+// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash.
+func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) {
+ CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage)
+}
+
+// StringToInt64 defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int64 variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 {
+ p := map[string]int64{}
+ f.StringToInt64VarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 {
+ p := map[string]int64{}
+ f.StringToInt64VarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToInt64 defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int64 variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 {
+ return CommandLine.StringToInt64P(name, "", value, usage)
+}
+
+// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash.
+func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 {
+ return CommandLine.StringToInt64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go
new file mode 100644
index 0000000..890a01a
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_string.go
@@ -0,0 +1,160 @@
+package pflag
+
+import (
+ "bytes"
+ "encoding/csv"
+ "fmt"
+ "strings"
+)
+
+// -- stringToString Value
+type stringToStringValue struct {
+ value *map[string]string
+ changed bool
+}
+
+func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue {
+ ssv := new(stringToStringValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToStringValue) Set(val string) error {
+ var ss []string
+ n := strings.Count(val, "=")
+ switch n {
+ case 0:
+ return fmt.Errorf("%s must be formatted as key=value", val)
+ case 1:
+ ss = append(ss, strings.Trim(val, `"`))
+ default:
+ r := csv.NewReader(strings.NewReader(val))
+ var err error
+ ss, err = r.Read()
+ if err != nil {
+ return err
+ }
+ }
+
+ out := make(map[string]string, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[kv[0]] = kv[1]
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToStringValue) Type() string {
+ return "stringToString"
+}
+
+func (s *stringToStringValue) String() string {
+ records := make([]string, 0, len(*s.value)>>1)
+ for k, v := range *s.value {
+ records = append(records, k+"="+v)
+ }
+
+ var buf bytes.Buffer
+ w := csv.NewWriter(&buf)
+ if err := w.Write(records); err != nil {
+ panic(err)
+ }
+ w.Flush()
+ return "[" + strings.TrimSpace(buf.String()) + "]"
+}
+
+func stringToStringConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]string{}, nil
+ }
+ r := csv.NewReader(strings.NewReader(val))
+ ss, err := r.Read()
+ if err != nil {
+ return nil, err
+ }
+ out := make(map[string]string, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[kv[0]] = kv[1]
+ }
+ return out, nil
+}
+
+// GetStringToString return the map[string]string value of a flag with the given name
+func (f *FlagSet) GetStringToString(name string) (map[string]string, error) {
+ val, err := f.getFlagType(name, "stringToString", stringToStringConv)
+ if err != nil {
+ return map[string]string{}, err
+ }
+ return val.(map[string]string), nil
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+ f.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+ f.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+ CommandLine.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+ CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string {
+ p := map[string]string{}
+ f.StringToStringVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+ p := map[string]string{}
+ f.StringToStringVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToString(name string, value map[string]string, usage string) *map[string]string {
+ return CommandLine.StringToStringP(name, "", value, usage)
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+ return CommandLine.StringToStringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go
new file mode 100644
index 0000000..dcbc2b7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint Value
+type uintValue uint
+
+func newUintValue(val uint, p *uint) *uintValue {
+ *p = val
+ return (*uintValue)(p)
+}
+
+func (i *uintValue) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uintValue(v)
+ return err
+}
+
+func (i *uintValue) Type() string {
+ return "uint"
+}
+
+func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uintConv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 0)
+ if err != nil {
+ return 0, err
+ }
+ return uint(v), nil
+}
+
+// GetUint return the uint value of a flag with the given name
+func (f *FlagSet) GetUint(name string) (uint, error) {
+ val, err := f.getFlagType(name, "uint", uintConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint), nil
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func UintVar(p *uint, name string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, "", value, usage)
+ return p
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint(name string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, "", value, usage)
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func UintP(name, shorthand string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go
new file mode 100644
index 0000000..7e9914e
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint16 value
+type uint16Value uint16
+
+func newUint16Value(val uint16, p *uint16) *uint16Value {
+ *p = val
+ return (*uint16Value)(p)
+}
+
+func (i *uint16Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 16)
+ *i = uint16Value(v)
+ return err
+}
+
+func (i *uint16Value) Type() string {
+ return "uint16"
+}
+
+func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return uint16(v), nil
+}
+
+// GetUint16 return the uint16 value of a flag with the given name
+func (f *FlagSet) GetUint16(name string) (uint16, error) {
+ val, err := f.getFlagType(name, "uint16", uint16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint16), nil
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func Uint16Var(p *uint16, name string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint16(name string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, "", value, usage)
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go
new file mode 100644
index 0000000..d802453
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint32 value
+type uint32Value uint32
+
+func newUint32Value(val uint32, p *uint32) *uint32Value {
+ *p = val
+ return (*uint32Value)(p)
+}
+
+func (i *uint32Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 32)
+ *i = uint32Value(v)
+ return err
+}
+
+func (i *uint32Value) Type() string {
+ return "uint32"
+}
+
+func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(v), nil
+}
+
+// GetUint32 return the uint32 value of a flag with the given name
+func (f *FlagSet) GetUint32(name string) (uint32, error) {
+ val, err := f.getFlagType(name, "uint32", uint32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint32), nil
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func Uint32Var(p *uint32, name string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func Uint32(name string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, "", value, usage)
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go
new file mode 100644
index 0000000..f62240f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint64.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint64 Value
+type uint64Value uint64
+
+func newUint64Value(val uint64, p *uint64) *uint64Value {
+ *p = val
+ return (*uint64Value)(p)
+}
+
+func (i *uint64Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uint64Value(v)
+ return err
+}
+
+func (i *uint64Value) Type() string {
+ return "uint64"
+}
+
+func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint64Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 64)
+ if err != nil {
+ return 0, err
+ }
+ return uint64(v), nil
+}
+
+// GetUint64 return the uint64 value of a flag with the given name
+func (f *FlagSet) GetUint64(name string) (uint64, error) {
+ val, err := f.getFlagType(name, "uint64", uint64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint64), nil
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func Uint64Var(p *uint64, name string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func Uint64(name string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, "", value, usage)
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go
new file mode 100644
index 0000000..bb0e83c
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint8 Value
+type uint8Value uint8
+
+func newUint8Value(val uint8, p *uint8) *uint8Value {
+ *p = val
+ return (*uint8Value)(p)
+}
+
+func (i *uint8Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 8)
+ *i = uint8Value(v)
+ return err
+}
+
+func (i *uint8Value) Type() string {
+ return "uint8"
+}
+
+func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return uint8(v), nil
+}
+
+// GetUint8 return the uint8 value of a flag with the given name
+func (f *FlagSet) GetUint8(name string) (uint8, error) {
+ val, err := f.getFlagType(name, "uint8", uint8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint8), nil
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func Uint8Var(p *uint8, name string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func Uint8(name string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, "", value, usage)
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go
new file mode 100644
index 0000000..5fa9248
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint_slice.go
@@ -0,0 +1,168 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- uintSlice Value
+type uintSliceValue struct {
+ value *[]uint
+ changed bool
+}
+
+func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue {
+ uisv := new(uintSliceValue)
+ uisv.value = p
+ *uisv.value = val
+ return uisv
+}
+
+func (s *uintSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]uint, len(ss))
+ for i, d := range ss {
+ u, err := strconv.ParseUint(d, 10, 0)
+ if err != nil {
+ return err
+ }
+ out[i] = uint(u)
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *uintSliceValue) Type() string {
+ return "uintSlice"
+}
+
+func (s *uintSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *uintSliceValue) fromString(val string) (uint, error) {
+ t, err := strconv.ParseUint(val, 10, 0)
+ if err != nil {
+ return 0, err
+ }
+ return uint(t), nil
+}
+
+func (s *uintSliceValue) toString(val uint) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *uintSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *uintSliceValue) Replace(val []string) error {
+ out := make([]uint, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *uintSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func uintSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []uint{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]uint, len(ss))
+ for i, d := range ss {
+ u, err := strconv.ParseUint(d, 10, 0)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = uint(u)
+ }
+ return out, nil
+}
+
+// GetUintSlice returns the []uint value of a flag with the given name.
+func (f *FlagSet) GetUintSlice(name string) ([]uint, error) {
+ val, err := f.getFlagType(name, "uintSlice", uintSliceConv)
+ if err != nil {
+ return []uint{}, err
+ }
+ return val.([]uint), nil
+}
+
+// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string.
+// The argument p points to a []uint variable in which to store the value of the flag.
+func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+ f.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+ f.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSliceVar defines a uint[] flag with specified name, default value, and usage string.
+// The argument p points to a uint[] variable in which to store the value of the flag.
+func UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+ CommandLine.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+ CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint {
+ p := []uint{}
+ f.UintSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+ p := []uint{}
+ f.UintSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func UintSlice(name string, value []uint, usage string) *[]uint {
+ return CommandLine.UintSliceP(name, "", value, usage)
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+ return CommandLine.UintSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE
new file mode 100644
index 0000000..4b0421c
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
new file mode 100644
index 0000000..41649d2
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -0,0 +1,394 @@
+package assert
+
+import (
+ "fmt"
+ "reflect"
+)
+
+type CompareType int
+
+const (
+ compareLess CompareType = iota - 1
+ compareEqual
+ compareGreater
+)
+
+var (
+ intType = reflect.TypeOf(int(1))
+ int8Type = reflect.TypeOf(int8(1))
+ int16Type = reflect.TypeOf(int16(1))
+ int32Type = reflect.TypeOf(int32(1))
+ int64Type = reflect.TypeOf(int64(1))
+
+ uintType = reflect.TypeOf(uint(1))
+ uint8Type = reflect.TypeOf(uint8(1))
+ uint16Type = reflect.TypeOf(uint16(1))
+ uint32Type = reflect.TypeOf(uint32(1))
+ uint64Type = reflect.TypeOf(uint64(1))
+
+ float32Type = reflect.TypeOf(float32(1))
+ float64Type = reflect.TypeOf(float64(1))
+
+ stringType = reflect.TypeOf("")
+)
+
+func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
+ obj1Value := reflect.ValueOf(obj1)
+ obj2Value := reflect.ValueOf(obj2)
+
+ // throughout this switch we try and avoid calling .Convert() if possible,
+ // as this has a pretty big performance impact
+ switch kind {
+ case reflect.Int:
+ {
+ intobj1, ok := obj1.(int)
+ if !ok {
+ intobj1 = obj1Value.Convert(intType).Interface().(int)
+ }
+ intobj2, ok := obj2.(int)
+ if !ok {
+ intobj2 = obj2Value.Convert(intType).Interface().(int)
+ }
+ if intobj1 > intobj2 {
+ return compareGreater, true
+ }
+ if intobj1 == intobj2 {
+ return compareEqual, true
+ }
+ if intobj1 < intobj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Int8:
+ {
+ int8obj1, ok := obj1.(int8)
+ if !ok {
+ int8obj1 = obj1Value.Convert(int8Type).Interface().(int8)
+ }
+ int8obj2, ok := obj2.(int8)
+ if !ok {
+ int8obj2 = obj2Value.Convert(int8Type).Interface().(int8)
+ }
+ if int8obj1 > int8obj2 {
+ return compareGreater, true
+ }
+ if int8obj1 == int8obj2 {
+ return compareEqual, true
+ }
+ if int8obj1 < int8obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Int16:
+ {
+ int16obj1, ok := obj1.(int16)
+ if !ok {
+ int16obj1 = obj1Value.Convert(int16Type).Interface().(int16)
+ }
+ int16obj2, ok := obj2.(int16)
+ if !ok {
+ int16obj2 = obj2Value.Convert(int16Type).Interface().(int16)
+ }
+ if int16obj1 > int16obj2 {
+ return compareGreater, true
+ }
+ if int16obj1 == int16obj2 {
+ return compareEqual, true
+ }
+ if int16obj1 < int16obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Int32:
+ {
+ int32obj1, ok := obj1.(int32)
+ if !ok {
+ int32obj1 = obj1Value.Convert(int32Type).Interface().(int32)
+ }
+ int32obj2, ok := obj2.(int32)
+ if !ok {
+ int32obj2 = obj2Value.Convert(int32Type).Interface().(int32)
+ }
+ if int32obj1 > int32obj2 {
+ return compareGreater, true
+ }
+ if int32obj1 == int32obj2 {
+ return compareEqual, true
+ }
+ if int32obj1 < int32obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Int64:
+ {
+ int64obj1, ok := obj1.(int64)
+ if !ok {
+ int64obj1 = obj1Value.Convert(int64Type).Interface().(int64)
+ }
+ int64obj2, ok := obj2.(int64)
+ if !ok {
+ int64obj2 = obj2Value.Convert(int64Type).Interface().(int64)
+ }
+ if int64obj1 > int64obj2 {
+ return compareGreater, true
+ }
+ if int64obj1 == int64obj2 {
+ return compareEqual, true
+ }
+ if int64obj1 < int64obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint:
+ {
+ uintobj1, ok := obj1.(uint)
+ if !ok {
+ uintobj1 = obj1Value.Convert(uintType).Interface().(uint)
+ }
+ uintobj2, ok := obj2.(uint)
+ if !ok {
+ uintobj2 = obj2Value.Convert(uintType).Interface().(uint)
+ }
+ if uintobj1 > uintobj2 {
+ return compareGreater, true
+ }
+ if uintobj1 == uintobj2 {
+ return compareEqual, true
+ }
+ if uintobj1 < uintobj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint8:
+ {
+ uint8obj1, ok := obj1.(uint8)
+ if !ok {
+ uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8)
+ }
+ uint8obj2, ok := obj2.(uint8)
+ if !ok {
+ uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8)
+ }
+ if uint8obj1 > uint8obj2 {
+ return compareGreater, true
+ }
+ if uint8obj1 == uint8obj2 {
+ return compareEqual, true
+ }
+ if uint8obj1 < uint8obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint16:
+ {
+ uint16obj1, ok := obj1.(uint16)
+ if !ok {
+ uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16)
+ }
+ uint16obj2, ok := obj2.(uint16)
+ if !ok {
+ uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16)
+ }
+ if uint16obj1 > uint16obj2 {
+ return compareGreater, true
+ }
+ if uint16obj1 == uint16obj2 {
+ return compareEqual, true
+ }
+ if uint16obj1 < uint16obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint32:
+ {
+ uint32obj1, ok := obj1.(uint32)
+ if !ok {
+ uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32)
+ }
+ uint32obj2, ok := obj2.(uint32)
+ if !ok {
+ uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32)
+ }
+ if uint32obj1 > uint32obj2 {
+ return compareGreater, true
+ }
+ if uint32obj1 == uint32obj2 {
+ return compareEqual, true
+ }
+ if uint32obj1 < uint32obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint64:
+ {
+ uint64obj1, ok := obj1.(uint64)
+ if !ok {
+ uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64)
+ }
+ uint64obj2, ok := obj2.(uint64)
+ if !ok {
+ uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64)
+ }
+ if uint64obj1 > uint64obj2 {
+ return compareGreater, true
+ }
+ if uint64obj1 == uint64obj2 {
+ return compareEqual, true
+ }
+ if uint64obj1 < uint64obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Float32:
+ {
+ float32obj1, ok := obj1.(float32)
+ if !ok {
+ float32obj1 = obj1Value.Convert(float32Type).Interface().(float32)
+ }
+ float32obj2, ok := obj2.(float32)
+ if !ok {
+ float32obj2 = obj2Value.Convert(float32Type).Interface().(float32)
+ }
+ if float32obj1 > float32obj2 {
+ return compareGreater, true
+ }
+ if float32obj1 == float32obj2 {
+ return compareEqual, true
+ }
+ if float32obj1 < float32obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Float64:
+ {
+ float64obj1, ok := obj1.(float64)
+ if !ok {
+ float64obj1 = obj1Value.Convert(float64Type).Interface().(float64)
+ }
+ float64obj2, ok := obj2.(float64)
+ if !ok {
+ float64obj2 = obj2Value.Convert(float64Type).Interface().(float64)
+ }
+ if float64obj1 > float64obj2 {
+ return compareGreater, true
+ }
+ if float64obj1 == float64obj2 {
+ return compareEqual, true
+ }
+ if float64obj1 < float64obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.String:
+ {
+ stringobj1, ok := obj1.(string)
+ if !ok {
+ stringobj1 = obj1Value.Convert(stringType).Interface().(string)
+ }
+ stringobj2, ok := obj2.(string)
+ if !ok {
+ stringobj2 = obj2Value.Convert(stringType).Interface().(string)
+ }
+ if stringobj1 > stringobj2 {
+ return compareGreater, true
+ }
+ if stringobj1 == stringobj2 {
+ return compareEqual, true
+ }
+ if stringobj1 < stringobj2 {
+ return compareLess, true
+ }
+ }
+ }
+
+ return compareEqual, false
+}
+
+// Greater asserts that the first element is greater than the second
+//
+// assert.Greater(t, 2, 1)
+// assert.Greater(t, float64(2), float64(1))
+// assert.Greater(t, "b", "a")
+func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
+}
+
+// GreaterOrEqual asserts that the first element is greater than or equal to the second
+//
+// assert.GreaterOrEqual(t, 2, 1)
+// assert.GreaterOrEqual(t, 2, 2)
+// assert.GreaterOrEqual(t, "b", "a")
+// assert.GreaterOrEqual(t, "b", "b")
+func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
+}
+
+// Less asserts that the first element is less than the second
+//
+// assert.Less(t, 1, 2)
+// assert.Less(t, float64(1), float64(2))
+// assert.Less(t, "a", "b")
+func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
+}
+
+// LessOrEqual asserts that the first element is less than or equal to the second
+//
+// assert.LessOrEqual(t, 1, 2)
+// assert.LessOrEqual(t, 2, 2)
+// assert.LessOrEqual(t, "a", "b")
+// assert.LessOrEqual(t, "b", "b")
+func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
+}
+
+// Positive asserts that the specified element is positive
+//
+// assert.Positive(t, 1)
+// assert.Positive(t, 1.23)
+func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
+ zero := reflect.Zero(reflect.TypeOf(e))
+ return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs)
+}
+
+// Negative asserts that the specified element is negative
+//
+// assert.Negative(t, -1)
+// assert.Negative(t, -1.23)
+func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
+ zero := reflect.Zero(reflect.TypeOf(e))
+ return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs)
+}
+
+func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ e1Kind := reflect.ValueOf(e1).Kind()
+ e2Kind := reflect.ValueOf(e2).Kind()
+ if e1Kind != e2Kind {
+ return Fail(t, "Elements should be the same type", msgAndArgs...)
+ }
+
+ compareResult, isComparable := compare(e1, e2, e1Kind)
+ if !isComparable {
+ return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+ }
+
+ if !containsValue(allowedComparesResults, compareResult) {
+ return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
+ }
+
+ return true
+}
+
+func containsValue(values []CompareType, value CompareType) bool {
+ for _, v := range values {
+ if v == value {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
new file mode 100644
index 0000000..4dfd122
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -0,0 +1,741 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+ */
+
+package assert
+
+import (
+ http "net/http"
+ url "net/url"
+ time "time"
+)
+
+// Conditionf uses a Comparison to assert a complex condition.
+func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Condition(t, comp, append([]interface{}{msg}, args...)...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
+// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
+// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
+func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return DirExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
+func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
+}
+
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// assert.Emptyf(t, obj, "error message %s", "formatted")
+func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Empty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+// assert.Equalf(t, 123, 123, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Equal(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
+func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
+func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Errorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Error(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
+// ErrorIsf asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
+// Eventuallyf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
+func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Failf reports a failure through
+func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// FailNowf fails test
+func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+// assert.Falsef(t, myBool, "error message %s", "formatted")
+func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return False(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
+func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return FileExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// Greaterf asserts that the first element is greater than the second
+//
+// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
+// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
+// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
+func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Greater(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// GreaterOrEqualf asserts that the first element is greater than or equal to the second
+//
+// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
+// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
+// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
+// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
+func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPStatusCodef asserts that a specified handler returns a specified status code.
+//
+// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCode(t, handler, method, url, values, statuscode, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// IsDecreasingf asserts that the collection is decreasing
+//
+// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
+// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
+// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsDecreasing(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// IsIncreasingf asserts that the collection is increasing
+//
+// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
+// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
+// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsIncreasing(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// IsNonDecreasingf asserts that the collection is not decreasing
+//
+// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
+// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
+// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// IsNonIncreasingf asserts that the collection is not increasing
+//
+// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
+// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
+// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
+func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Len(t, object, length, append([]interface{}{msg}, args...)...)
+}
+
+// Lessf asserts that the first element is less than the second
+//
+// assert.Lessf(t, 1, 2, "error message %s", "formatted")
+// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
+// assert.Lessf(t, "a", "b", "error message %s", "formatted")
+func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Less(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// LessOrEqualf asserts that the first element is less than or equal to the second
+//
+// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
+// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
+// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
+// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
+func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// Negativef asserts that the specified element is negative
+//
+// assert.Negativef(t, -1, "error message %s", "formatted")
+// assert.Negativef(t, -1.23, "error message %s", "formatted")
+func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Negative(t, e, append([]interface{}{msg}, args...)...)
+}
+
+// Neverf asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+// assert.Nilf(t, err, "error message %s", "formatted")
+func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Nil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NoDirExistsf checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoErrorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoError(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// NoFileExistsf checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
+func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEmpty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
+//
+// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
+func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+// assert.NotNilf(t, err, "error message %s", "formatted")
+func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotNil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
+func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotPanics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
+// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
+func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// NotSamef asserts that two pointers do not reference the same object.
+//
+// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// NotZerof asserts that i is not the zero value for its type.
+func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotZero(t, i, append([]interface{}{msg}, args...)...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
+func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Panics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
+}
+
+// Positivef asserts that the specified element is positive
+//
+// assert.Positivef(t, 1, "error message %s", "formatted")
+// assert.Positivef(t, 1.23, "error message %s", "formatted")
+func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Positive(t, e, append([]interface{}{msg}, args...)...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
+// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
+func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// Samef asserts that two pointers reference the same object.
+//
+// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Subset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// Truef asserts that the specified value is true.
+//
+// assert.Truef(t, myBool, "error message %s", "formatted")
+func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return True(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// YAMLEqf asserts that two YAML strings are equivalent.
+func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Zerof asserts that i is the zero value for its type.
+func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Zero(t, i, append([]interface{}{msg}, args...)...)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
new file mode 100644
index 0000000..d2bb0b8
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
@@ -0,0 +1,5 @@
+{{.CommentFormat}}
+func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool {
+ if h, ok := t.(tHelper); ok { h.Helper() }
+ return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}})
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
new file mode 100644
index 0000000..25337a6
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -0,0 +1,1470 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+ */
+
+package assert
+
+import (
+ http "net/http"
+ url "net/url"
+ time "time"
+)
+
+// Condition uses a Comparison to assert a complex condition.
+func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Condition(a.t, comp, msgAndArgs...)
+}
+
+// Conditionf uses a Comparison to assert a complex condition.
+func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Conditionf(a.t, comp, msg, args...)
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// a.Contains("Hello World", "World")
+// a.Contains(["Hello", "World"], "World")
+// a.Contains({"Hello": "World"}, "Hello")
+func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Contains(a.t, s, contains, msgAndArgs...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// a.Containsf("Hello World", "World", "error message %s", "formatted")
+// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
+// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
+func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Containsf(a.t, s, contains, msg, args...)
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return DirExists(a.t, path, msgAndArgs...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return DirExistsf(a.t, path, msg, args...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2])
+func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ElementsMatch(a.t, listA, listB, msgAndArgs...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
+func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ElementsMatchf(a.t, listA, listB, msg, args...)
+}
+
+// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// a.Empty(obj)
+func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Empty(a.t, object, msgAndArgs...)
+}
+
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// a.Emptyf(obj, "error message %s", "formatted")
+func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Emptyf(a.t, object, msg, args...)
+}
+
+// Equal asserts that two objects are equal.
+//
+// a.Equal(123, 123)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Equal(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// a.EqualError(err, expectedErrorString)
+func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualError(a.t, theError, errString, msgAndArgs...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted")
+func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualErrorf(a.t, theError, errString, msg, args...)
+}
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// a.EqualValues(uint32(123), int32(123))
+func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
+func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualValuesf(a.t, expected, actual, msg, args...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+// a.Equalf(123, 123, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Equalf(a.t, expected, actual, msg, args...)
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.Error(err) {
+// assert.Equal(t, expectedError, err)
+// }
+func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Error(a.t, err, msgAndArgs...)
+}
+
+// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorAs(a.t, err, target, msgAndArgs...)
+}
+
+// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorAsf(a.t, err, target, msg, args...)
+}
+
+// ErrorIs asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorIs(a.t, err, target, msgAndArgs...)
+}
+
+// ErrorIsf asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorIsf(a.t, err, target, msg, args...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.Errorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Errorf(a.t, err, msg, args...)
+}
+
+// Eventually asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond)
+func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Eventually(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// Eventuallyf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Eventuallyf(a.t, condition, waitFor, tick, msg, args...)
+}
+
+// Exactly asserts that two objects are equal in value and type.
+//
+// a.Exactly(int32(123), int64(123))
+func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Exactly(a.t, expected, actual, msgAndArgs...)
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
+func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Exactlyf(a.t, expected, actual, msg, args...)
+}
+
+// Fail reports a failure through
+func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(a.t, failureMessage, msgAndArgs...)
+}
+
+// FailNow fails test
+func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return FailNow(a.t, failureMessage, msgAndArgs...)
+}
+
+// FailNowf fails test
+func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return FailNowf(a.t, failureMessage, msg, args...)
+}
+
+// Failf reports a failure through
+func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Failf(a.t, failureMessage, msg, args...)
+}
+
+// False asserts that the specified value is false.
+//
+// a.False(myBool)
+func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return False(a.t, value, msgAndArgs...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+// a.Falsef(myBool, "error message %s", "formatted")
+func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Falsef(a.t, value, msg, args...)
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return FileExists(a.t, path, msgAndArgs...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return FileExistsf(a.t, path, msg, args...)
+}
+
+// Greater asserts that the first element is greater than the second
+//
+// a.Greater(2, 1)
+// a.Greater(float64(2), float64(1))
+// a.Greater("b", "a")
+func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Greater(a.t, e1, e2, msgAndArgs...)
+}
+
+// GreaterOrEqual asserts that the first element is greater than or equal to the second
+//
+// a.GreaterOrEqual(2, 1)
+// a.GreaterOrEqual(2, 2)
+// a.GreaterOrEqual("b", "a")
+// a.GreaterOrEqual("b", "b")
+func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return GreaterOrEqual(a.t, e1, e2, msgAndArgs...)
+}
+
+// GreaterOrEqualf asserts that the first element is greater than or equal to the second
+//
+// a.GreaterOrEqualf(2, 1, "error message %s", "formatted")
+// a.GreaterOrEqualf(2, 2, "error message %s", "formatted")
+// a.GreaterOrEqualf("b", "a", "error message %s", "formatted")
+// a.GreaterOrEqualf("b", "b", "error message %s", "formatted")
+func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return GreaterOrEqualf(a.t, e1, e2, msg, args...)
+}
+
+// Greaterf asserts that the first element is greater than the second
+//
+// a.Greaterf(2, 1, "error message %s", "formatted")
+// a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
+// a.Greaterf("b", "a", "error message %s", "formatted")
+func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Greaterf(a.t, e1, e2, msg, args...)
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPError(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPErrorf(a.t, handler, method, url, values, msg, args...)
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
+}
+
+// HTTPStatusCode asserts that a specified handler returns a specified status code.
+//
+// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...)
+}
+
+// HTTPStatusCodef asserts that a specified handler returns a specified status code.
+//
+// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...)
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPSuccessf(a.t, handler, method, url, values, msg, args...)
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+// a.Implements((*MyInterface)(nil), new(MyObject))
+func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Implements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Implementsf(a.t, interfaceObject, object, msg, args...)
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// a.InDelta(math.Pi, 22/7.0, 0.01)
+func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDelta(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaSlicef(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaf(a.t, expected, actual, delta, msg, args...)
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// IsDecreasing asserts that the collection is decreasing
+//
+// a.IsDecreasing([]int{2, 1, 0})
+// a.IsDecreasing([]float{2, 1})
+// a.IsDecreasing([]string{"b", "a"})
+func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsDecreasing(a.t, object, msgAndArgs...)
+}
+
+// IsDecreasingf asserts that the collection is decreasing
+//
+// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
+// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
+// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
+func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsDecreasingf(a.t, object, msg, args...)
+}
+
+// IsIncreasing asserts that the collection is increasing
+//
+// a.IsIncreasing([]int{1, 2, 3})
+// a.IsIncreasing([]float{1, 2})
+// a.IsIncreasing([]string{"a", "b"})
+func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsIncreasing(a.t, object, msgAndArgs...)
+}
+
+// IsIncreasingf asserts that the collection is increasing
+//
+// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
+// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
+// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
+func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsIncreasingf(a.t, object, msg, args...)
+}
+
+// IsNonDecreasing asserts that the collection is not decreasing
+//
+// a.IsNonDecreasing([]int{1, 1, 2})
+// a.IsNonDecreasing([]float{1, 2})
+// a.IsNonDecreasing([]string{"a", "b"})
+func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonDecreasing(a.t, object, msgAndArgs...)
+}
+
+// IsNonDecreasingf asserts that the collection is not decreasing
+//
+// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
+// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
+// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
+func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonDecreasingf(a.t, object, msg, args...)
+}
+
+// IsNonIncreasing asserts that the collection is not increasing
+//
+// a.IsNonIncreasing([]int{2, 1, 1})
+// a.IsNonIncreasing([]float{2, 1})
+// a.IsNonIncreasing([]string{"b", "a"})
+func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonIncreasing(a.t, object, msgAndArgs...)
+}
+
+// IsNonIncreasingf asserts that the collection is not increasing
+//
+// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
+// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
+// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
+func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonIncreasingf(a.t, object, msg, args...)
+}
+
+// IsType asserts that the specified objects are of the same type.
+func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsType(a.t, expectedType, object, msgAndArgs...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsTypef(a.t, expectedType, object, msg, args...)
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return JSONEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return JSONEqf(a.t, expected, actual, msg, args...)
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+// a.Len(mySlice, 3)
+func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Len(a.t, object, length, msgAndArgs...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// a.Lenf(mySlice, 3, "error message %s", "formatted")
+func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Lenf(a.t, object, length, msg, args...)
+}
+
+// Less asserts that the first element is less than the second
+//
+// a.Less(1, 2)
+// a.Less(float64(1), float64(2))
+// a.Less("a", "b")
+func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Less(a.t, e1, e2, msgAndArgs...)
+}
+
+// LessOrEqual asserts that the first element is less than or equal to the second
+//
+// a.LessOrEqual(1, 2)
+// a.LessOrEqual(2, 2)
+// a.LessOrEqual("a", "b")
+// a.LessOrEqual("b", "b")
+func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return LessOrEqual(a.t, e1, e2, msgAndArgs...)
+}
+
+// LessOrEqualf asserts that the first element is less than or equal to the second
+//
+// a.LessOrEqualf(1, 2, "error message %s", "formatted")
+// a.LessOrEqualf(2, 2, "error message %s", "formatted")
+// a.LessOrEqualf("a", "b", "error message %s", "formatted")
+// a.LessOrEqualf("b", "b", "error message %s", "formatted")
+func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return LessOrEqualf(a.t, e1, e2, msg, args...)
+}
+
+// Lessf asserts that the first element is less than the second
+//
+// a.Lessf(1, 2, "error message %s", "formatted")
+// a.Lessf(float64(1), float64(2), "error message %s", "formatted")
+// a.Lessf("a", "b", "error message %s", "formatted")
+func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Lessf(a.t, e1, e2, msg, args...)
+}
+
+// Negative asserts that the specified element is negative
+//
+// a.Negative(-1)
+// a.Negative(-1.23)
+func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Negative(a.t, e, msgAndArgs...)
+}
+
+// Negativef asserts that the specified element is negative
+//
+// a.Negativef(-1, "error message %s", "formatted")
+// a.Negativef(-1.23, "error message %s", "formatted")
+func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Negativef(a.t, e, msg, args...)
+}
+
+// Never asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
+func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Never(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// Neverf asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Neverf(a.t, condition, waitFor, tick, msg, args...)
+}
+
+// Nil asserts that the specified object is nil.
+//
+// a.Nil(err)
+func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Nil(a.t, object, msgAndArgs...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+// a.Nilf(err, "error message %s", "formatted")
+func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Nilf(a.t, object, msg, args...)
+}
+
+// NoDirExists checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExists(a.t, path, msgAndArgs...)
+}
+
+// NoDirExistsf checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExistsf(a.t, path, msg, args...)
+}
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.NoError(err) {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoError(a.t, err, msgAndArgs...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.NoErrorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoErrorf(a.t, err, msg, args...)
+}
+
+// NoFileExists checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExists(a.t, path, msgAndArgs...)
+}
+
+// NoFileExistsf checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExistsf(a.t, path, msg, args...)
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// a.NotContains("Hello World", "Earth")
+// a.NotContains(["Hello", "World"], "Earth")
+// a.NotContains({"Hello": "World"}, "Earth")
+func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotContains(a.t, s, contains, msgAndArgs...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
+// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
+// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
+func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotContainsf(a.t, s, contains, msg, args...)
+}
+
+// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if a.NotEmpty(obj) {
+// assert.Equal(t, "two", obj[1])
+// }
+func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEmpty(a.t, object, msgAndArgs...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if a.NotEmptyf(obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEmptyf(a.t, object, msg, args...)
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+// a.NotEqual(obj1, obj2)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqual(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotEqualValues asserts that two objects are not equal even when converted to the same type
+//
+// a.NotEqualValues(obj1, obj2)
+func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
+//
+// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
+func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValuesf(a.t, expected, actual, msg, args...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualf(a.t, expected, actual, msg, args...)
+}
+
+// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorIs(a.t, err, target, msgAndArgs...)
+}
+
+// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorIsf(a.t, err, target, msg, args...)
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+// a.NotNil(err)
+func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotNil(a.t, object, msgAndArgs...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+// a.NotNilf(err, "error message %s", "formatted")
+func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotNilf(a.t, object, msg, args...)
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// a.NotPanics(func(){ RemainCalm() })
+func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotPanics(a.t, f, msgAndArgs...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
+func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotPanicsf(a.t, f, msg, args...)
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
+// a.NotRegexp("^start", "it's not starting")
+func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotRegexp(a.t, rx, str, msgAndArgs...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
+// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
+func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotRegexpf(a.t, rx, str, msg, args...)
+}
+
+// NotSame asserts that two pointers do not reference the same object.
+//
+// a.NotSame(ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSame(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotSamef asserts that two pointers do not reference the same object.
+//
+// a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSamef(a.t, expected, actual, msg, args...)
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSubset(a.t, list, subset, msgAndArgs...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSubsetf(a.t, list, subset, msg, args...)
+}
+
+// NotZero asserts that i is not the zero value for its type.
+func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotZero(a.t, i, msgAndArgs...)
+}
+
+// NotZerof asserts that i is not the zero value for its type.
+func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotZerof(a.t, i, msg, args...)
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+// a.Panics(func(){ GoCrazy() })
+func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Panics(a.t, f, msgAndArgs...)
+}
+
+// PanicsWithError asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// a.PanicsWithError("crazy error", func(){ GoCrazy() })
+func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithError(a.t, errString, f, msgAndArgs...)
+}
+
+// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithErrorf(a.t, errString, f, msg, args...)
+}
+
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValue("crazy error", func(){ GoCrazy() })
+func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithValue(a.t, expected, f, msgAndArgs...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithValuef(a.t, expected, f, msg, args...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Panicsf(a.t, f, msg, args...)
+}
+
+// Positive asserts that the specified element is positive
+//
+// a.Positive(1)
+// a.Positive(1.23)
+func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Positive(a.t, e, msgAndArgs...)
+}
+
+// Positivef asserts that the specified element is positive
+//
+// a.Positivef(1, "error message %s", "formatted")
+// a.Positivef(1.23, "error message %s", "formatted")
+func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Positivef(a.t, e, msg, args...)
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+// a.Regexp(regexp.MustCompile("start"), "it's starting")
+// a.Regexp("start...$", "it's not starting")
+func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Regexp(a.t, rx, str, msgAndArgs...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
+// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
+func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Regexpf(a.t, rx, str, msg, args...)
+}
+
+// Same asserts that two pointers reference the same object.
+//
+// a.Same(ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Same(a.t, expected, actual, msgAndArgs...)
+}
+
+// Samef asserts that two pointers reference the same object.
+//
+// a.Samef(ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Samef(a.t, expected, actual, msg, args...)
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Subset(a.t, list, subset, msgAndArgs...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Subsetf(a.t, list, subset, msg, args...)
+}
+
+// True asserts that the specified value is true.
+//
+// a.True(myBool)
+func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return True(a.t, value, msgAndArgs...)
+}
+
+// Truef asserts that the specified value is true.
+//
+// a.Truef(myBool, "error message %s", "formatted")
+func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Truef(a.t, value, msg, args...)
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+// a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
+func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinDurationf(a.t, expected, actual, delta, msg, args...)
+}
+
+// YAMLEq asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// YAMLEqf asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEqf(a.t, expected, actual, msg, args...)
+}
+
+// Zero asserts that i is the zero value for its type.
+func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Zero(a.t, i, msgAndArgs...)
+}
+
+// Zerof asserts that i is the zero value for its type.
+func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Zerof(a.t, i, msg, args...)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
new file mode 100644
index 0000000..188bb9e
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
@@ -0,0 +1,5 @@
+{{.CommentWithoutT "a"}}
+func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {
+ if h, ok := a.t.(tHelper); ok { h.Helper() }
+ return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
new file mode 100644
index 0000000..1c3b471
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -0,0 +1,81 @@
+package assert
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// isOrdered checks that collection contains orderable elements.
+func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+ objKind := reflect.TypeOf(object).Kind()
+ if objKind != reflect.Slice && objKind != reflect.Array {
+ return false
+ }
+
+ objValue := reflect.ValueOf(object)
+ objLen := objValue.Len()
+
+ if objLen <= 1 {
+ return true
+ }
+
+ value := objValue.Index(0)
+ valueInterface := value.Interface()
+ firstValueKind := value.Kind()
+
+ for i := 1; i < objLen; i++ {
+ prevValue := value
+ prevValueInterface := valueInterface
+
+ value = objValue.Index(i)
+ valueInterface = value.Interface()
+
+ compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
+
+ if !isComparable {
+ return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...)
+ }
+
+ if !containsValue(allowedComparesResults, compareResult) {
+ return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...)
+ }
+ }
+
+ return true
+}
+
+// IsIncreasing asserts that the collection is increasing
+//
+// assert.IsIncreasing(t, []int{1, 2, 3})
+// assert.IsIncreasing(t, []float{1, 2})
+// assert.IsIncreasing(t, []string{"a", "b"})
+func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
+}
+
+// IsNonIncreasing asserts that the collection is not increasing
+//
+// assert.IsNonIncreasing(t, []int{2, 1, 1})
+// assert.IsNonIncreasing(t, []float{2, 1})
+// assert.IsNonIncreasing(t, []string{"b", "a"})
+func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
+}
+
+// IsDecreasing asserts that the collection is decreasing
+//
+// assert.IsDecreasing(t, []int{2, 1, 0})
+// assert.IsDecreasing(t, []float{2, 1})
+// assert.IsDecreasing(t, []string{"b", "a"})
+func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
+}
+
+// IsNonDecreasing asserts that the collection is not decreasing
+//
+// assert.IsNonDecreasing(t, []int{1, 1, 2})
+// assert.IsNonDecreasing(t, []float{1, 2})
+// assert.IsNonDecreasing(t, []string{"a", "b"})
+func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
new file mode 100644
index 0000000..bcac440
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -0,0 +1,1774 @@
+package assert
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "os"
+ "reflect"
+ "regexp"
+ "runtime"
+ "runtime/debug"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/pmezard/go-difflib/difflib"
+ yaml "gopkg.in/yaml.v3"
+)
+
+//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
+
+// TestingT is an interface wrapper around *testing.T
+type TestingT interface {
+ Errorf(format string, args ...interface{})
+}
+
+// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful
+// for table driven tests.
+type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool
+
+// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful
+// for table driven tests.
+type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool
+
+// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful
+// for table driven tests.
+type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
+
+// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful
+// for table driven tests.
+type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
+
+// Comparison is a custom function that returns true on success and false on failure
+type Comparison func() (success bool)
+
+/*
+ Helper functions
+*/
+
+// ObjectsAreEqual determines if two objects are considered equal.
+//
+// This function does no assertion of any kind.
+func ObjectsAreEqual(expected, actual interface{}) bool {
+ if expected == nil || actual == nil {
+ return expected == actual
+ }
+
+ exp, ok := expected.([]byte)
+ if !ok {
+ return reflect.DeepEqual(expected, actual)
+ }
+
+ act, ok := actual.([]byte)
+ if !ok {
+ return false
+ }
+ if exp == nil || act == nil {
+ return exp == nil && act == nil
+ }
+ return bytes.Equal(exp, act)
+}
+
+// ObjectsAreEqualValues gets whether two objects are equal, or if their
+// values are equal.
+func ObjectsAreEqualValues(expected, actual interface{}) bool {
+ if ObjectsAreEqual(expected, actual) {
+ return true
+ }
+
+ actualType := reflect.TypeOf(actual)
+ if actualType == nil {
+ return false
+ }
+ expectedValue := reflect.ValueOf(expected)
+ if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
+ // Attempt comparison after type conversion
+ return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)
+ }
+
+ return false
+}
+
+/* CallerInfo is necessary because the assert functions use the testing object
+internally, causing it to print the file:line of the assert method, rather than where
+the problem actually occurred in calling code.*/
+
+// CallerInfo returns an array of strings containing the file and line number
+// of each stack frame leading from the current test to the assert call that
+// failed.
+func CallerInfo() []string {
+
+ var pc uintptr
+ var ok bool
+ var file string
+ var line int
+ var name string
+
+ callers := []string{}
+ for i := 0; ; i++ {
+ pc, file, line, ok = runtime.Caller(i)
+ if !ok {
+ // The breaks below failed to terminate the loop, and we ran off the
+ // end of the call stack.
+ break
+ }
+
+ // This is a huge edge case, but it will panic if this is the case, see #180
+ if file == "" {
+ break
+ }
+
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ break
+ }
+ name = f.Name()
+
+ // testing.tRunner is the standard library function that calls
+ // tests. Subtests are called directly by tRunner, without going through
+ // the Test/Benchmark/Example function that contains the t.Run calls, so
+ // with subtests we should break when we hit tRunner, without adding it
+ // to the list of callers.
+ if name == "testing.tRunner" {
+ break
+ }
+
+ parts := strings.Split(file, "/")
+ file = parts[len(parts)-1]
+ if len(parts) > 1 {
+ dir := parts[len(parts)-2]
+ if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
+ callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ }
+ }
+
+ // Drop the package
+ segments := strings.Split(name, ".")
+ name = segments[len(segments)-1]
+ if isTest(name, "Test") ||
+ isTest(name, "Benchmark") ||
+ isTest(name, "Example") {
+ break
+ }
+ }
+
+ return callers
+}
+
+// Stolen from the `go test` tool.
+// isTest tells whether name looks like a test (or benchmark, according to prefix).
+// It is a Test (say) if there is a character after Test that is not a lower-case letter.
+// We don't want TesticularCancer.
+func isTest(name, prefix string) bool {
+ if !strings.HasPrefix(name, prefix) {
+ return false
+ }
+ if len(name) == len(prefix) { // "Test" is ok
+ return true
+ }
+ r, _ := utf8.DecodeRuneInString(name[len(prefix):])
+ return !unicode.IsLower(r)
+}
+
+func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
+ if len(msgAndArgs) == 0 || msgAndArgs == nil {
+ return ""
+ }
+ if len(msgAndArgs) == 1 {
+ msg := msgAndArgs[0]
+ if msgAsStr, ok := msg.(string); ok {
+ return msgAsStr
+ }
+ return fmt.Sprintf("%+v", msg)
+ }
+ if len(msgAndArgs) > 1 {
+ return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
+ }
+ return ""
+}
+
+// Aligns the provided message so that all lines after the first line start at the same location as the first line.
+// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
+// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the
+// basis on which the alignment occurs).
+func indentMessageLines(message string, longestLabelLen int) string {
+ outBuf := new(bytes.Buffer)
+
+ for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ {
+ // no need to align first line because it starts at the correct location (after the label)
+ if i != 0 {
+ // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab
+ outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t")
+ }
+ outBuf.WriteString(scanner.Text())
+ }
+
+ return outBuf.String()
+}
+
+type failNower interface {
+ FailNow()
+}
+
+// FailNow fails test
+func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ Fail(t, failureMessage, msgAndArgs...)
+
+ // We cannot extend TestingT with FailNow() and
+ // maintain backwards compatibility, so we fallback
+ // to panicking when FailNow is not available in
+ // TestingT.
+ // See issue #263
+
+ if t, ok := t.(failNower); ok {
+ t.FailNow()
+ } else {
+ panic("test failed and t is missing `FailNow()`")
+ }
+ return false
+}
+
+// Fail reports a failure through
+func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ content := []labeledContent{
+ {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")},
+ {"Error", failureMessage},
+ }
+
+ // Add test name if the Go version supports it
+ if n, ok := t.(interface {
+ Name() string
+ }); ok {
+ content = append(content, labeledContent{"Test", n.Name()})
+ }
+
+ message := messageFromMsgAndArgs(msgAndArgs...)
+ if len(message) > 0 {
+ content = append(content, labeledContent{"Messages", message})
+ }
+
+ t.Errorf("\n%s", ""+labeledOutput(content...))
+
+ return false
+}
+
+type labeledContent struct {
+ label string
+ content string
+}
+
+// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner:
+//
+// \t{{label}}:{{align_spaces}}\t{{content}}\n
+//
+// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label.
+// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this
+// alignment is achieved, "\t{{content}}\n" is added for the output.
+//
+// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line.
+func labeledOutput(content ...labeledContent) string {
+ longestLabel := 0
+ for _, v := range content {
+ if len(v.label) > longestLabel {
+ longestLabel = len(v.label)
+ }
+ }
+ var output string
+ for _, v := range content {
+ output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n"
+ }
+ return output
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+// assert.Implements(t, (*MyInterface)(nil), new(MyObject))
+func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ interfaceType := reflect.TypeOf(interfaceObject).Elem()
+
+ if object == nil {
+ return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...)
+ }
+ if !reflect.TypeOf(object).Implements(interfaceType) {
+ return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...)
+ }
+
+ return true
+}
+
+// IsType asserts that the specified objects are of the same type.
+func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
+ return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
+ }
+
+ return true
+}
+
+// Equal asserts that two objects are equal.
+//
+// assert.Equal(t, 123, 123)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if err := validateEqualArgs(expected, actual); err != nil {
+ return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)",
+ expected, actual, err), msgAndArgs...)
+ }
+
+ if !ObjectsAreEqual(expected, actual) {
+ diff := diff(expected, actual)
+ expected, actual = formatUnequalValues(expected, actual)
+ return Fail(t, fmt.Sprintf("Not equal: \n"+
+ "expected: %s\n"+
+ "actual : %s%s", expected, actual, diff), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// validateEqualArgs checks whether provided arguments can be safely used in the
+// Equal/NotEqual functions.
+func validateEqualArgs(expected, actual interface{}) error {
+ if expected == nil && actual == nil {
+ return nil
+ }
+
+ if isFunction(expected) || isFunction(actual) {
+ return errors.New("cannot take func type as argument")
+ }
+ return nil
+}
+
+// Same asserts that two pointers reference the same object.
+//
+// assert.Same(t, ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if !samePointers(expected, actual) {
+ return Fail(t, fmt.Sprintf("Not same: \n"+
+ "expected: %p %#v\n"+
+ "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...)
+ }
+
+ return true
+}
+
+// NotSame asserts that two pointers do not reference the same object.
+//
+// assert.NotSame(t, ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if samePointers(expected, actual) {
+ return Fail(t, fmt.Sprintf(
+ "Expected and actual point to the same object: %p %#v",
+ expected, expected), msgAndArgs...)
+ }
+ return true
+}
+
+// samePointers compares two generic interface objects and returns whether
+// they point to the same object
+func samePointers(first, second interface{}) bool {
+ firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
+ if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
+ return false
+ }
+
+ firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
+ if firstType != secondType {
+ return false
+ }
+
+ // compare pointer addresses
+ return first == second
+}
+
+// formatUnequalValues takes two values of arbitrary types and returns string
+// representations appropriate to be presented to the user.
+//
+// If the values are not of like type, the returned strings will be prefixed
+// with the type name, and the value will be enclosed in parenthesis similar
+// to a type conversion in the Go grammar.
+func formatUnequalValues(expected, actual interface{}) (e string, a string) {
+ if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
+ return fmt.Sprintf("%T(%s)", expected, truncatingFormat(expected)),
+ fmt.Sprintf("%T(%s)", actual, truncatingFormat(actual))
+ }
+ switch expected.(type) {
+ case time.Duration:
+ return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual)
+ }
+ return truncatingFormat(expected), truncatingFormat(actual)
+}
+
+// truncatingFormat formats the data and truncates it if it's too long.
+//
+// This helps keep formatted error messages lines from exceeding the
+// bufio.MaxScanTokenSize max line length that the go testing framework imposes.
+func truncatingFormat(data interface{}) string {
+ value := fmt.Sprintf("%#v", data)
+ max := bufio.MaxScanTokenSize - 100 // Give us some space the type info too if needed.
+ if len(value) > max {
+ value = value[0:max] + "<... truncated>"
+ }
+ return value
+}
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// assert.EqualValues(t, uint32(123), int32(123))
+func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if !ObjectsAreEqualValues(expected, actual) {
+ diff := diff(expected, actual)
+ expected, actual = formatUnequalValues(expected, actual)
+ return Fail(t, fmt.Sprintf("Not equal: \n"+
+ "expected: %s\n"+
+ "actual : %s%s", expected, actual, diff), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// Exactly asserts that two objects are equal in value and type.
+//
+// assert.Exactly(t, int32(123), int64(123))
+func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ aType := reflect.TypeOf(expected)
+ bType := reflect.TypeOf(actual)
+
+ if aType != bType {
+ return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
+ }
+
+ return Equal(t, expected, actual, msgAndArgs...)
+
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+// assert.NotNil(t, err)
+func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ if !isNil(object) {
+ return true
+ }
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, "Expected value not to be nil.", msgAndArgs...)
+}
+
+// containsKind checks if a specified kind in the slice of kinds.
+func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
+ for i := 0; i < len(kinds); i++ {
+ if kind == kinds[i] {
+ return true
+ }
+ }
+
+ return false
+}
+
+// isNil checks if a specified object is nil or not, without Failing.
+func isNil(object interface{}) bool {
+ if object == nil {
+ return true
+ }
+
+ value := reflect.ValueOf(object)
+ kind := value.Kind()
+ isNilableKind := containsKind(
+ []reflect.Kind{
+ reflect.Chan, reflect.Func,
+ reflect.Interface, reflect.Map,
+ reflect.Ptr, reflect.Slice},
+ kind)
+
+ if isNilableKind && value.IsNil() {
+ return true
+ }
+
+ return false
+}
+
+// Nil asserts that the specified object is nil.
+//
+// assert.Nil(t, err)
+func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ if isNil(object) {
+ return true
+ }
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
+}
+
+// isEmpty gets whether the specified object is considered empty or not.
+func isEmpty(object interface{}) bool {
+
+ // get nil case out of the way
+ if object == nil {
+ return true
+ }
+
+ objValue := reflect.ValueOf(object)
+
+ switch objValue.Kind() {
+ // collection types are empty when they have no element
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ return objValue.Len() == 0
+ // pointers are empty if nil or if the value they point to is empty
+ case reflect.Ptr:
+ if objValue.IsNil() {
+ return true
+ }
+ deref := objValue.Elem().Interface()
+ return isEmpty(deref)
+ // for all other types, compare against the zero value
+ default:
+ zero := reflect.Zero(objValue.Type())
+ return reflect.DeepEqual(object, zero.Interface())
+ }
+}
+
+// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// assert.Empty(t, obj)
+func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ pass := isEmpty(object)
+ if !pass {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
+ }
+
+ return pass
+
+}
+
+// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if assert.NotEmpty(t, obj) {
+// assert.Equal(t, "two", obj[1])
+// }
+func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ pass := !isEmpty(object)
+ if !pass {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
+ }
+
+ return pass
+
+}
+
+// getLen try to get length of object.
+// return (false, 0) if impossible.
+func getLen(x interface{}) (ok bool, length int) {
+ v := reflect.ValueOf(x)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+ return true, v.Len()
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+// assert.Len(t, mySlice, 3)
+func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ ok, l := getLen(object)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...)
+ }
+
+ if l != length {
+ return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
+ }
+ return true
+}
+
+// True asserts that the specified value is true.
+//
+// assert.True(t, myBool)
+func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+ if !value {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, "Should be true", msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// False asserts that the specified value is false.
+//
+// assert.False(t, myBool)
+func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+ if value {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, "Should be false", msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+// assert.NotEqual(t, obj1, obj2)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if err := validateEqualArgs(expected, actual); err != nil {
+ return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)",
+ expected, actual, err), msgAndArgs...)
+ }
+
+ if ObjectsAreEqual(expected, actual) {
+ return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// NotEqualValues asserts that two objects are not equal even when converted to the same type
+//
+// assert.NotEqualValues(t, obj1, obj2)
+func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if ObjectsAreEqualValues(expected, actual) {
+ return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
+ }
+
+ return true
+}
+
+// containsElement try loop over the list check if the list includes the element.
+// return (false, false) if impossible.
+// return (true, false) if element was not found.
+// return (true, true) if element was found.
+func includeElement(list interface{}, element interface{}) (ok, found bool) {
+
+ listValue := reflect.ValueOf(list)
+ listKind := reflect.TypeOf(list).Kind()
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ found = false
+ }
+ }()
+
+ if listKind == reflect.String {
+ elementValue := reflect.ValueOf(element)
+ return true, strings.Contains(listValue.String(), elementValue.String())
+ }
+
+ if listKind == reflect.Map {
+ mapKeys := listValue.MapKeys()
+ for i := 0; i < len(mapKeys); i++ {
+ if ObjectsAreEqual(mapKeys[i].Interface(), element) {
+ return true, true
+ }
+ }
+ return true, false
+ }
+
+ for i := 0; i < listValue.Len(); i++ {
+ if ObjectsAreEqual(listValue.Index(i).Interface(), element) {
+ return true, true
+ }
+ }
+ return true, false
+
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// assert.Contains(t, "Hello World", "World")
+// assert.Contains(t, ["Hello", "World"], "World")
+// assert.Contains(t, {"Hello": "World"}, "Hello")
+func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ok, found := includeElement(s, contains)
+ if !ok {
+ return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
+ }
+ if !found {
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", s, contains), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// assert.NotContains(t, "Hello World", "Earth")
+// assert.NotContains(t, ["Hello", "World"], "Earth")
+// assert.NotContains(t, {"Hello": "World"}, "Earth")
+func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ok, found := includeElement(s, contains)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+ }
+ if found {
+ return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if subset == nil {
+ return true // we consider nil to be equal to the nil set
+ }
+
+ subsetValue := reflect.ValueOf(subset)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+
+ listKind := reflect.TypeOf(list).Kind()
+ subsetKind := reflect.TypeOf(subset).Kind()
+
+ if listKind != reflect.Array && listKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+ }
+
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+ }
+
+ for i := 0; i < subsetValue.Len(); i++ {
+ element := subsetValue.Index(i).Interface()
+ ok, found := includeElement(list, element)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ }
+ if !found {
+ return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
+ }
+ }
+
+ return true
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if subset == nil {
+ return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...)
+ }
+
+ subsetValue := reflect.ValueOf(subset)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+
+ listKind := reflect.TypeOf(list).Kind()
+ subsetKind := reflect.TypeOf(subset).Kind()
+
+ if listKind != reflect.Array && listKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+ }
+
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+ }
+
+ for i := 0; i < subsetValue.Len(); i++ {
+ element := subsetValue.Index(i).Interface()
+ ok, found := includeElement(list, element)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ }
+ if !found {
+ return true
+ }
+ }
+
+ return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2])
+func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if isEmpty(listA) && isEmpty(listB) {
+ return true
+ }
+
+ if !isList(t, listA, msgAndArgs...) || !isList(t, listB, msgAndArgs...) {
+ return false
+ }
+
+ extraA, extraB := diffLists(listA, listB)
+
+ if len(extraA) == 0 && len(extraB) == 0 {
+ return true
+ }
+
+ return Fail(t, formatListDiff(listA, listB, extraA, extraB), msgAndArgs...)
+}
+
+// isList checks that the provided value is array or slice.
+func isList(t TestingT, list interface{}, msgAndArgs ...interface{}) (ok bool) {
+ kind := reflect.TypeOf(list).Kind()
+ if kind != reflect.Array && kind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s, expecting array or slice", list, kind),
+ msgAndArgs...)
+ }
+ return true
+}
+
+// diffLists diffs two arrays/slices and returns slices of elements that are only in A and only in B.
+// If some element is present multiple times, each instance is counted separately (e.g. if something is 2x in A and
+// 5x in B, it will be 0x in extraA and 3x in extraB). The order of items in both lists is ignored.
+func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) {
+ aValue := reflect.ValueOf(listA)
+ bValue := reflect.ValueOf(listB)
+
+ aLen := aValue.Len()
+ bLen := bValue.Len()
+
+ // Mark indexes in bValue that we already used
+ visited := make([]bool, bLen)
+ for i := 0; i < aLen; i++ {
+ element := aValue.Index(i).Interface()
+ found := false
+ for j := 0; j < bLen; j++ {
+ if visited[j] {
+ continue
+ }
+ if ObjectsAreEqual(bValue.Index(j).Interface(), element) {
+ visited[j] = true
+ found = true
+ break
+ }
+ }
+ if !found {
+ extraA = append(extraA, element)
+ }
+ }
+
+ for j := 0; j < bLen; j++ {
+ if visited[j] {
+ continue
+ }
+ extraB = append(extraB, bValue.Index(j).Interface())
+ }
+
+ return
+}
+
+func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) string {
+ var msg bytes.Buffer
+
+ msg.WriteString("elements differ")
+ if len(extraA) > 0 {
+ msg.WriteString("\n\nextra elements in list A:\n")
+ msg.WriteString(spewConfig.Sdump(extraA))
+ }
+ if len(extraB) > 0 {
+ msg.WriteString("\n\nextra elements in list B:\n")
+ msg.WriteString(spewConfig.Sdump(extraB))
+ }
+ msg.WriteString("\n\nlistA:\n")
+ msg.WriteString(spewConfig.Sdump(listA))
+ msg.WriteString("\n\nlistB:\n")
+ msg.WriteString(spewConfig.Sdump(listB))
+
+ return msg.String()
+}
+
+// Condition uses a Comparison to assert a complex condition.
+func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ result := comp()
+ if !result {
+ Fail(t, "Condition failed!", msgAndArgs...)
+ }
+ return result
+}
+
+// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics
+// methods, and represents a simple func that takes no arguments, and returns nothing.
+type PanicTestFunc func()
+
+// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
+func didPanic(f PanicTestFunc) (bool, interface{}, string) {
+
+ didPanic := false
+ var message interface{}
+ var stack string
+ func() {
+
+ defer func() {
+ if message = recover(); message != nil {
+ didPanic = true
+ stack = string(debug.Stack())
+ }
+ }()
+
+ // call the target function
+ f()
+
+ }()
+
+ return didPanic, message, stack
+
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panics(t, func(){ GoCrazy() })
+func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if funcDidPanic, panicValue, _ := didPanic(f); !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+ }
+
+ return true
+}
+
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
+func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ funcDidPanic, panicValue, panickedStack := didPanic(f)
+ if !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+ }
+ if panicValue != expected {
+ return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, expected, panicValue, panickedStack), msgAndArgs...)
+ }
+
+ return true
+}
+
+// PanicsWithError asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
+func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ funcDidPanic, panicValue, panickedStack := didPanic(f)
+ if !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+ }
+ panicErr, ok := panicValue.(error)
+ if !ok || panicErr.Error() != errString {
+ return Fail(t, fmt.Sprintf("func %#v should panic with error message:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, errString, panicValue, panickedStack), msgAndArgs...)
+ }
+
+ return true
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanics(t, func(){ RemainCalm() })
+func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if funcDidPanic, panicValue, panickedStack := didPanic(f); funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v\n\tPanic stack:\t%s", f, panicValue, panickedStack), msgAndArgs...)
+ }
+
+ return true
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
+func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ dt := expected.Sub(actual)
+ if dt < -delta || dt > delta {
+ return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+ }
+
+ return true
+}
+
+func toFloat(x interface{}) (float64, bool) {
+ var xf float64
+ xok := true
+
+ switch xn := x.(type) {
+ case uint:
+ xf = float64(xn)
+ case uint8:
+ xf = float64(xn)
+ case uint16:
+ xf = float64(xn)
+ case uint32:
+ xf = float64(xn)
+ case uint64:
+ xf = float64(xn)
+ case int:
+ xf = float64(xn)
+ case int8:
+ xf = float64(xn)
+ case int16:
+ xf = float64(xn)
+ case int32:
+ xf = float64(xn)
+ case int64:
+ xf = float64(xn)
+ case float32:
+ xf = float64(xn)
+ case float64:
+ xf = xn
+ case time.Duration:
+ xf = float64(xn)
+ default:
+ xok = false
+ }
+
+ return xf, xok
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// assert.InDelta(t, math.Pi, 22/7.0, 0.01)
+func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ af, aok := toFloat(expected)
+ bf, bok := toFloat(actual)
+
+ if !aok || !bok {
+ return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...)
+ }
+
+ if math.IsNaN(af) {
+ return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...)
+ }
+
+ if math.IsNaN(bf) {
+ return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...)
+ }
+
+ dt := af - bf
+ if dt < -delta || dt > delta {
+ return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+ }
+
+ return true
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if expected == nil || actual == nil ||
+ reflect.TypeOf(actual).Kind() != reflect.Slice ||
+ reflect.TypeOf(expected).Kind() != reflect.Slice {
+ return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+ }
+
+ actualSlice := reflect.ValueOf(actual)
+ expectedSlice := reflect.ValueOf(expected)
+
+ for i := 0; i < actualSlice.Len(); i++ {
+ result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...)
+ if !result {
+ return result
+ }
+ }
+
+ return true
+}
+
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if expected == nil || actual == nil ||
+ reflect.TypeOf(actual).Kind() != reflect.Map ||
+ reflect.TypeOf(expected).Kind() != reflect.Map {
+ return Fail(t, "Arguments must be maps", msgAndArgs...)
+ }
+
+ expectedMap := reflect.ValueOf(expected)
+ actualMap := reflect.ValueOf(actual)
+
+ if expectedMap.Len() != actualMap.Len() {
+ return Fail(t, "Arguments must have the same number of keys", msgAndArgs...)
+ }
+
+ for _, k := range expectedMap.MapKeys() {
+ ev := expectedMap.MapIndex(k)
+ av := actualMap.MapIndex(k)
+
+ if !ev.IsValid() {
+ return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...)
+ }
+
+ if !av.IsValid() {
+ return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...)
+ }
+
+ if !InDelta(
+ t,
+ ev.Interface(),
+ av.Interface(),
+ delta,
+ msgAndArgs...,
+ ) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func calcRelativeError(expected, actual interface{}) (float64, error) {
+ af, aok := toFloat(expected)
+ if !aok {
+ return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
+ }
+ if math.IsNaN(af) {
+ return 0, errors.New("expected value must not be NaN")
+ }
+ if af == 0 {
+ return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
+ }
+ bf, bok := toFloat(actual)
+ if !bok {
+ return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
+ }
+ if math.IsNaN(bf) {
+ return 0, errors.New("actual value must not be NaN")
+ }
+
+ return math.Abs(af-bf) / math.Abs(af), nil
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if math.IsNaN(epsilon) {
+ return Fail(t, "epsilon must not be NaN")
+ }
+ actualEpsilon, err := calcRelativeError(expected, actual)
+ if err != nil {
+ return Fail(t, err.Error(), msgAndArgs...)
+ }
+ if actualEpsilon > epsilon {
+ return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
+ }
+
+ return true
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if expected == nil || actual == nil ||
+ reflect.TypeOf(actual).Kind() != reflect.Slice ||
+ reflect.TypeOf(expected).Kind() != reflect.Slice {
+ return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+ }
+
+ actualSlice := reflect.ValueOf(actual)
+ expectedSlice := reflect.ValueOf(expected)
+
+ for i := 0; i < actualSlice.Len(); i++ {
+ result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon)
+ if !result {
+ return result
+ }
+ }
+
+ return true
+}
+
+/*
+ Errors
+*/
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoError(t, err) {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
+ if err != nil {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...)
+ }
+
+ return true
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Error(t, err) {
+// assert.Equal(t, expectedError, err)
+// }
+func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
+ if err == nil {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, "An error is expected but got nil.", msgAndArgs...)
+ }
+
+ return true
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// assert.EqualError(t, err, expectedErrorString)
+func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if !Error(t, theError, msgAndArgs...) {
+ return false
+ }
+ expected := errString
+ actual := theError.Error()
+ // don't need to use deep equals here, we know they are both strings
+ if expected != actual {
+ return Fail(t, fmt.Sprintf("Error message not equal:\n"+
+ "expected: %q\n"+
+ "actual : %q", expected, actual), msgAndArgs...)
+ }
+ return true
+}
+
+// matchRegexp return true if a specified regexp matches a string.
+func matchRegexp(rx interface{}, str interface{}) bool {
+
+ var r *regexp.Regexp
+ if rr, ok := rx.(*regexp.Regexp); ok {
+ r = rr
+ } else {
+ r = regexp.MustCompile(fmt.Sprint(rx))
+ }
+
+ return (r.FindStringIndex(fmt.Sprint(str)) != nil)
+
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
+// assert.Regexp(t, "start...$", "it's not starting")
+func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ match := matchRegexp(rx, str)
+
+ if !match {
+ Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...)
+ }
+
+ return match
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
+// assert.NotRegexp(t, "^start", "it's not starting")
+func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ match := matchRegexp(rx, str)
+
+ if match {
+ Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...)
+ }
+
+ return !match
+
+}
+
+// Zero asserts that i is the zero value for its type.
+func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
+ return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...)
+ }
+ return true
+}
+
+// NotZero asserts that i is not the zero value for its type.
+func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
+ return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...)
+ }
+ return true
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
+func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
+ }
+ return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
+ }
+ if info.IsDir() {
+ return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...)
+ }
+ return true
+}
+
+// NoFileExists checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ return true
+ }
+ if info.IsDir() {
+ return true
+ }
+ return Fail(t, fmt.Sprintf("file %q exists", path), msgAndArgs...)
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
+ }
+ return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
+ }
+ if !info.IsDir() {
+ return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...)
+ }
+ return true
+}
+
+// NoDirExists checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return true
+ }
+ return true
+ }
+ if !info.IsDir() {
+ return true
+ }
+ return Fail(t, fmt.Sprintf("directory %q exists", path), msgAndArgs...)
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ var expectedJSONAsInterface, actualJSONAsInterface interface{}
+
+ if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
+ }
+
+ if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
+ }
+
+ return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...)
+}
+
+// YAMLEq asserts that two YAML strings are equivalent.
+func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ var expectedYAMLAsInterface, actualYAMLAsInterface interface{}
+
+ if err := yaml.Unmarshal([]byte(expected), &expectedYAMLAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...)
+ }
+
+ if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...)
+ }
+
+ return Equal(t, expectedYAMLAsInterface, actualYAMLAsInterface, msgAndArgs...)
+}
+
+func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
+ t := reflect.TypeOf(v)
+ k := t.Kind()
+
+ if k == reflect.Ptr {
+ t = t.Elem()
+ k = t.Kind()
+ }
+ return t, k
+}
+
+// diff returns a diff of both values as long as both are of the same type and
+// are a struct, map, slice, array or string. Otherwise it returns an empty string.
+func diff(expected interface{}, actual interface{}) string {
+ if expected == nil || actual == nil {
+ return ""
+ }
+
+ et, ek := typeAndKind(expected)
+ at, _ := typeAndKind(actual)
+
+ if et != at {
+ return ""
+ }
+
+ if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String {
+ return ""
+ }
+
+ var e, a string
+ if et != reflect.TypeOf("") {
+ e = spewConfig.Sdump(expected)
+ a = spewConfig.Sdump(actual)
+ } else {
+ e = reflect.ValueOf(expected).String()
+ a = reflect.ValueOf(actual).String()
+ }
+
+ diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
+ A: difflib.SplitLines(e),
+ B: difflib.SplitLines(a),
+ FromFile: "Expected",
+ FromDate: "",
+ ToFile: "Actual",
+ ToDate: "",
+ Context: 1,
+ })
+
+ return "\n\nDiff:\n" + diff
+}
+
+func isFunction(arg interface{}) bool {
+ if arg == nil {
+ return false
+ }
+ return reflect.TypeOf(arg).Kind() == reflect.Func
+}
+
+var spewConfig = spew.ConfigState{
+ Indent: " ",
+ DisablePointerAddresses: true,
+ DisableCapacities: true,
+ SortKeys: true,
+ DisableMethods: true,
+ MaxDepth: 10,
+}
+
+type tHelper interface {
+ Helper()
+}
+
+// Eventually asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
+func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ch := make(chan bool, 1)
+
+ timer := time.NewTimer(waitFor)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(tick)
+ defer ticker.Stop()
+
+ for tick := ticker.C; ; {
+ select {
+ case <-timer.C:
+ return Fail(t, "Condition never satisfied", msgAndArgs...)
+ case <-tick:
+ tick = nil
+ go func() { ch <- condition() }()
+ case v := <-ch:
+ if v {
+ return true
+ }
+ tick = ticker.C
+ }
+ }
+}
+
+// Never asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
+func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ch := make(chan bool, 1)
+
+ timer := time.NewTimer(waitFor)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(tick)
+ defer ticker.Stop()
+
+ for tick := ticker.C; ; {
+ select {
+ case <-timer.C:
+ return true
+ case <-tick:
+ tick = nil
+ go func() { ch <- condition() }()
+ case v := <-ch:
+ if v {
+ return Fail(t, "Condition satisfied", msgAndArgs...)
+ }
+ tick = ticker.C
+ }
+ }
+}
+
+// ErrorIs asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if errors.Is(err, target) {
+ return true
+ }
+
+ var expectedText string
+ if target != nil {
+ expectedText = target.Error()
+ }
+
+ chain := buildErrorChainString(err)
+
+ return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+
+ "expected: %q\n"+
+ "in chain: %s", expectedText, chain,
+ ), msgAndArgs...)
+}
+
+// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if !errors.Is(err, target) {
+ return true
+ }
+
+ var expectedText string
+ if target != nil {
+ expectedText = target.Error()
+ }
+
+ chain := buildErrorChainString(err)
+
+ return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
+ "found: %q\n"+
+ "in chain: %s", expectedText, chain,
+ ), msgAndArgs...)
+}
+
+// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if errors.As(err, target) {
+ return true
+ }
+
+ chain := buildErrorChainString(err)
+
+ return Fail(t, fmt.Sprintf("Should be in error chain:\n"+
+ "expected: %q\n"+
+ "in chain: %s", target, chain,
+ ), msgAndArgs...)
+}
+
+func buildErrorChainString(err error) string {
+ if err == nil {
+ return ""
+ }
+
+ e := errors.Unwrap(err)
+ chain := fmt.Sprintf("%q", err.Error())
+ for e != nil {
+ chain += fmt.Sprintf("\n\t%q", e.Error())
+ e = errors.Unwrap(e)
+ }
+ return chain
+}
diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go
new file mode 100644
index 0000000..c9dccc4
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/doc.go
@@ -0,0 +1,45 @@
+// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
+//
+// Example Usage
+//
+// The following is a complete example using assert in a standard test function:
+// import (
+// "testing"
+// "github.com/stretchr/testify/assert"
+// )
+//
+// func TestSomething(t *testing.T) {
+//
+// var a string = "Hello"
+// var b string = "Hello"
+//
+// assert.Equal(t, a, b, "The two words should be the same.")
+//
+// }
+//
+// if you assert many times, use the format below:
+//
+// import (
+// "testing"
+// "github.com/stretchr/testify/assert"
+// )
+//
+// func TestSomething(t *testing.T) {
+// assert := assert.New(t)
+//
+// var a string = "Hello"
+// var b string = "Hello"
+//
+// assert.Equal(a, b, "The two words should be the same.")
+// }
+//
+// Assertions
+//
+// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
+// All assertion functions take, as the first argument, the `*testing.T` object provided by the
+// testing framework. This allows the assertion funcs to write the failings and other details to
+// the correct place.
+//
+// Every assertion function also takes an optional string message as the final argument,
+// allowing custom error messages to be appended to the message the assertion method outputs.
+package assert
diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go
new file mode 100644
index 0000000..ac9dc9d
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/errors.go
@@ -0,0 +1,10 @@
+package assert
+
+import (
+ "errors"
+)
+
+// AnError is an error instance useful for testing. If the code does not care
+// about error specifics, and only needs to return the error for example, this
+// error should be used to make the test code more readable.
+var AnError = errors.New("assert.AnError general error for testing")
diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
new file mode 100644
index 0000000..df189d2
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
@@ -0,0 +1,16 @@
+package assert
+
+// Assertions provides assertion methods around the
+// TestingT interface.
+type Assertions struct {
+ t TestingT
+}
+
+// New makes a new Assertions object for the specified TestingT.
+func New(t TestingT) *Assertions {
+ return &Assertions{
+ t: t,
+ }
+}
+
+//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs"
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
new file mode 100644
index 0000000..4ed341d
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -0,0 +1,162 @@
+package assert
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strings"
+)
+
+// httpCode is a helper that returns HTTP code of the response. It returns -1 and
+// an error if building a new request fails.
+func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
+ w := httptest.NewRecorder()
+ req, err := http.NewRequest(method, url, nil)
+ if err != nil {
+ return -1, err
+ }
+ req.URL.RawQuery = values.Encode()
+ handler(w, req)
+ return w.Code, nil
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ }
+
+ isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
+ if !isSuccessCode {
+ Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isSuccessCode
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ }
+
+ isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
+ if !isRedirectCode {
+ Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isRedirectCode
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ }
+
+ isErrorCode := code >= http.StatusBadRequest
+ if !isErrorCode {
+ Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isErrorCode
+}
+
+// HTTPStatusCode asserts that a specified handler returns a specified status code.
+//
+// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ }
+
+ successful := code == statuscode
+ if !successful {
+ Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
+ }
+
+ return successful
+}
+
+// HTTPBody is a helper that returns HTTP body of the response. It returns
+// empty string if building a new request fails.
+func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
+ w := httptest.NewRecorder()
+ req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
+ if err != nil {
+ return ""
+ }
+ handler(w, req)
+ return w.Body.String()
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ body := HTTPBody(handler, method, url, values)
+
+ contains := strings.Contains(body, fmt.Sprint(str))
+ if !contains {
+ Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+ }
+
+ return contains
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ body := HTTPBody(handler, method, url, values)
+
+ contains := strings.Contains(body, fmt.Sprint(str))
+ if contains {
+ Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+ }
+
+ return !contains
+}
diff --git a/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt
new file mode 100644
index 0000000..55ede8a
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2015 xeipuuv
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/xeipuuv/gojsonpointer/README.md b/vendor/github.com/xeipuuv/gojsonpointer/README.md
new file mode 100644
index 0000000..a4f5f14
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonpointer/README.md
@@ -0,0 +1,41 @@
+# gojsonpointer
+An implementation of JSON Pointer - Go language
+
+## Usage
+ jsonText := `{
+ "name": "Bobby B",
+ "occupation": {
+ "title" : "King",
+ "years" : 15,
+ "heir" : "Joffrey B"
+ }
+ }`
+
+ var jsonDocument map[string]interface{}
+ json.Unmarshal([]byte(jsonText), &jsonDocument)
+
+ //create a JSON pointer
+ pointerString := "/occupation/title"
+ pointer, _ := NewJsonPointer(pointerString)
+
+ //SET a new value for the "title" in the document
+ pointer.Set(jsonDocument, "Supreme Leader of Westeros")
+
+ //GET the new "title" from the document
+ title, _, _ := pointer.Get(jsonDocument)
+ fmt.Println(title) //outputs "Supreme Leader of Westeros"
+
+ //DELETE the "heir" from the document
+ deletePointer := NewJsonPointer("/occupation/heir")
+ deletePointer.Delete(jsonDocument)
+
+ b, _ := json.Marshal(jsonDocument)
+ fmt.Println(string(b))
+ //outputs `{"name":"Bobby B","occupation":{"title":"Supreme Leader of Westeros","years":15}}`
+
+
+## References
+https://tools.ietf.org/html/rfc6901
+
+### Note
+The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented.
diff --git a/vendor/github.com/xeipuuv/gojsonpointer/pointer.go b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go
new file mode 100644
index 0000000..798c1f1
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go
@@ -0,0 +1,211 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonpointer
+// repository-desc An implementation of JSON Pointer - Go language
+//
+// description Main and unique file.
+//
+// created 25-02-2013
+
+package gojsonpointer
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+const (
+ const_empty_pointer = ``
+ const_pointer_separator = `/`
+
+ const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"`
+)
+
+type implStruct struct {
+ mode string // "SET" or "GET"
+
+ inDocument interface{}
+
+ setInValue interface{}
+
+ getOutNode interface{}
+ getOutKind reflect.Kind
+ outError error
+}
+
+type JsonPointer struct {
+ referenceTokens []string
+}
+
+// NewJsonPointer parses the given string JSON pointer and returns an object
+func NewJsonPointer(jsonPointerString string) (p JsonPointer, err error) {
+
+ // Pointer to the root of the document
+ if len(jsonPointerString) == 0 {
+ // Keep referenceTokens nil
+ return
+ }
+ if jsonPointerString[0] != '/' {
+ return p, errors.New(const_invalid_start)
+ }
+
+ p.referenceTokens = strings.Split(jsonPointerString[1:], const_pointer_separator)
+ return
+}
+
+// Uses the pointer to retrieve a value from a JSON document
+func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
+
+ is := &implStruct{mode: "GET", inDocument: document}
+ p.implementation(is)
+ return is.getOutNode, is.getOutKind, is.outError
+
+}
+
+// Uses the pointer to update a value from a JSON document
+func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) {
+
+ is := &implStruct{mode: "SET", inDocument: document, setInValue: value}
+ p.implementation(is)
+ return document, is.outError
+
+}
+
+// Uses the pointer to delete a value from a JSON document
+func (p *JsonPointer) Delete(document interface{}) (interface{}, error) {
+ is := &implStruct{mode: "DEL", inDocument: document}
+ p.implementation(is)
+ return document, is.outError
+}
+
+// Both Get and Set functions use the same implementation to avoid code duplication
+func (p *JsonPointer) implementation(i *implStruct) {
+
+ kind := reflect.Invalid
+
+ // Full document when empty
+ if len(p.referenceTokens) == 0 {
+ i.getOutNode = i.inDocument
+ i.outError = nil
+ i.getOutKind = kind
+ i.outError = nil
+ return
+ }
+
+ node := i.inDocument
+
+ previousNodes := make([]interface{}, len(p.referenceTokens))
+ previousTokens := make([]string, len(p.referenceTokens))
+
+ for ti, token := range p.referenceTokens {
+
+ isLastToken := ti == len(p.referenceTokens)-1
+ previousNodes[ti] = node
+ previousTokens[ti] = token
+
+ switch v := node.(type) {
+
+ case map[string]interface{}:
+ decodedToken := decodeReferenceToken(token)
+ if _, ok := v[decodedToken]; ok {
+ node = v[decodedToken]
+ if isLastToken && i.mode == "SET" {
+ v[decodedToken] = i.setInValue
+ } else if isLastToken && i.mode == "DEL" {
+ delete(v, decodedToken)
+ }
+ } else if isLastToken && i.mode == "SET" {
+ v[decodedToken] = i.setInValue
+ } else {
+ i.outError = fmt.Errorf("Object has no key '%s'", decodedToken)
+ i.getOutKind = reflect.Map
+ i.getOutNode = nil
+ return
+ }
+
+ case []interface{}:
+ tokenIndex, err := strconv.Atoi(token)
+ if err != nil {
+ i.outError = fmt.Errorf("Invalid array index '%s'", token)
+ i.getOutKind = reflect.Slice
+ i.getOutNode = nil
+ return
+ }
+ if tokenIndex < 0 || tokenIndex >= len(v) {
+ i.outError = fmt.Errorf("Out of bound array[0,%d] index '%d'", len(v), tokenIndex)
+ i.getOutKind = reflect.Slice
+ i.getOutNode = nil
+ return
+ }
+
+ node = v[tokenIndex]
+ if isLastToken && i.mode == "SET" {
+ v[tokenIndex] = i.setInValue
+ } else if isLastToken && i.mode == "DEL" {
+ v[tokenIndex] = v[len(v)-1]
+ v[len(v)-1] = nil
+ v = v[:len(v)-1]
+ previousNodes[ti-1].(map[string]interface{})[previousTokens[ti-1]] = v
+ }
+
+ default:
+ i.outError = fmt.Errorf("Invalid token reference '%s'", token)
+ i.getOutKind = reflect.ValueOf(node).Kind()
+ i.getOutNode = nil
+ return
+ }
+
+ }
+
+ i.getOutNode = node
+ i.getOutKind = reflect.ValueOf(node).Kind()
+ i.outError = nil
+}
+
+// Pointer to string representation function
+func (p *JsonPointer) String() string {
+
+ if len(p.referenceTokens) == 0 {
+ return const_empty_pointer
+ }
+
+ pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator)
+
+ return pointerString
+}
+
+// Specific JSON pointer encoding here
+// ~0 => ~
+// ~1 => /
+// ... and vice versa
+
+func decodeReferenceToken(token string) string {
+ step1 := strings.Replace(token, `~1`, `/`, -1)
+ step2 := strings.Replace(step1, `~0`, `~`, -1)
+ return step2
+}
+
+func encodeReferenceToken(token string) string {
+ step1 := strings.Replace(token, `~`, `~0`, -1)
+ step2 := strings.Replace(step1, `/`, `~1`, -1)
+ return step2
+}
diff --git a/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt
new file mode 100644
index 0000000..55ede8a
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2015 xeipuuv
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/xeipuuv/gojsonreference/README.md b/vendor/github.com/xeipuuv/gojsonreference/README.md
new file mode 100644
index 0000000..9ab6e1e
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonreference/README.md
@@ -0,0 +1,10 @@
+# gojsonreference
+An implementation of JSON Reference - Go language
+
+## Dependencies
+https://github.com/xeipuuv/gojsonpointer
+
+## References
+http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
+
+http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03
diff --git a/vendor/github.com/xeipuuv/gojsonreference/reference.go b/vendor/github.com/xeipuuv/gojsonreference/reference.go
new file mode 100644
index 0000000..6457291
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonreference/reference.go
@@ -0,0 +1,147 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonreference
+// repository-desc An implementation of JSON Reference - Go language
+//
+// description Main and unique file.
+//
+// created 26-02-2013
+
+package gojsonreference
+
+import (
+ "errors"
+ "net/url"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/xeipuuv/gojsonpointer"
+)
+
+const (
+ const_fragment_char = `#`
+)
+
+func NewJsonReference(jsonReferenceString string) (JsonReference, error) {
+
+ var r JsonReference
+ err := r.parse(jsonReferenceString)
+ return r, err
+
+}
+
+type JsonReference struct {
+ referenceUrl *url.URL
+ referencePointer gojsonpointer.JsonPointer
+
+ HasFullUrl bool
+ HasUrlPathOnly bool
+ HasFragmentOnly bool
+ HasFileScheme bool
+ HasFullFilePath bool
+}
+
+func (r *JsonReference) GetUrl() *url.URL {
+ return r.referenceUrl
+}
+
+func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer {
+ return &r.referencePointer
+}
+
+func (r *JsonReference) String() string {
+
+ if r.referenceUrl != nil {
+ return r.referenceUrl.String()
+ }
+
+ if r.HasFragmentOnly {
+ return const_fragment_char + r.referencePointer.String()
+ }
+
+ return r.referencePointer.String()
+}
+
+func (r *JsonReference) IsCanonical() bool {
+ return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl)
+}
+
+// "Constructor", parses the given string JSON reference
+func (r *JsonReference) parse(jsonReferenceString string) (err error) {
+
+ r.referenceUrl, err = url.Parse(jsonReferenceString)
+ if err != nil {
+ return
+ }
+ refUrl := r.referenceUrl
+
+ if refUrl.Scheme != "" && refUrl.Host != "" {
+ r.HasFullUrl = true
+ } else {
+ if refUrl.Path != "" {
+ r.HasUrlPathOnly = true
+ } else if refUrl.RawQuery == "" && refUrl.Fragment != "" {
+ r.HasFragmentOnly = true
+ }
+ }
+
+ r.HasFileScheme = refUrl.Scheme == "file"
+ if runtime.GOOS == "windows" {
+ // on Windows, a file URL may have an extra leading slash, and if it
+ // doesn't then its first component will be treated as the host by the
+ // Go runtime
+ if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") {
+ r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:])
+ } else {
+ r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path)
+ }
+ } else {
+ r.HasFullFilePath = filepath.IsAbs(refUrl.Path)
+ }
+
+ // invalid json-pointer error means url has no json-pointer fragment. simply ignore error
+ r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment)
+
+ return
+}
+
+// Creates a new reference from a parent and a child
+// If the child cannot inherit from the parent, an error is returned
+func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) {
+ if child.GetUrl() == nil {
+ return nil, errors.New("childUrl is nil!")
+ }
+
+ if r.GetUrl() == nil {
+ return nil, errors.New("parentUrl is nil!")
+ }
+
+ // Get a copy of the parent url to make sure we do not modify the original.
+ // URL reference resolving fails if the fragment of the child is empty, but the parent's is not.
+ // The fragment of the child must be used, so the fragment of the parent is manually removed.
+ parentUrl := *r.GetUrl()
+ parentUrl.Fragment = ""
+
+ ref, err := NewJsonReference(parentUrl.ResolveReference(child.GetUrl()).String())
+ if err != nil {
+ return nil, err
+ }
+ return &ref, err
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/.gitignore b/vendor/github.com/xeipuuv/gojsonschema/.gitignore
new file mode 100644
index 0000000..68e993c
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/.gitignore
@@ -0,0 +1,3 @@
+*.sw[nop]
+*.iml
+.vscode/
diff --git a/vendor/github.com/xeipuuv/gojsonschema/.travis.yml b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml
new file mode 100644
index 0000000..3289001
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+go:
+ - "1.11"
+ - "1.12"
+ - "1.13"
+before_install:
+ - go get github.com/xeipuuv/gojsonreference
+ - go get github.com/xeipuuv/gojsonpointer
+ - go get github.com/stretchr/testify/assert
diff --git a/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt
new file mode 100644
index 0000000..55ede8a
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2015 xeipuuv
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/xeipuuv/gojsonschema/README.md b/vendor/github.com/xeipuuv/gojsonschema/README.md
new file mode 100644
index 0000000..758f26d
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/README.md
@@ -0,0 +1,466 @@
+[![GoDoc](https://godoc.org/github.com/xeipuuv/gojsonschema?status.svg)](https://godoc.org/github.com/xeipuuv/gojsonschema)
+[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema)
+[![Go Report Card](https://goreportcard.com/badge/github.com/xeipuuv/gojsonschema)](https://goreportcard.com/report/github.com/xeipuuv/gojsonschema)
+
+# gojsonschema
+
+## Description
+
+An implementation of JSON Schema for the Go programming language. Supports draft-04, draft-06 and draft-07.
+
+References :
+
+* http://json-schema.org
+* http://json-schema.org/latest/json-schema-core.html
+* http://json-schema.org/latest/json-schema-validation.html
+
+## Installation
+
+```
+go get github.com/xeipuuv/gojsonschema
+```
+
+Dependencies :
+* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer)
+* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference)
+* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package)
+
+## Usage
+
+### Example
+
+```go
+
+package main
+
+import (
+ "fmt"
+ "github.com/xeipuuv/gojsonschema"
+)
+
+func main() {
+
+ schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json")
+ documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json")
+
+ result, err := gojsonschema.Validate(schemaLoader, documentLoader)
+ if err != nil {
+ panic(err.Error())
+ }
+
+ if result.Valid() {
+ fmt.Printf("The document is valid\n")
+ } else {
+ fmt.Printf("The document is not valid. see errors :\n")
+ for _, desc := range result.Errors() {
+ fmt.Printf("- %s\n", desc)
+ }
+ }
+}
+
+
+```
+
+#### Loaders
+
+There are various ways to load your JSON data.
+In order to load your schemas and documents,
+first declare an appropriate loader :
+
+* Web / HTTP, using a reference :
+
+```go
+loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json")
+```
+
+* Local file, using a reference :
+
+```go
+loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json")
+```
+
+References use the URI scheme, the prefix (file://) and a full path to the file are required.
+
+* JSON strings :
+
+```go
+loader := gojsonschema.NewStringLoader(`{"type": "string"}`)
+```
+
+* Custom Go types :
+
+```go
+m := map[string]interface{}{"type": "string"}
+loader := gojsonschema.NewGoLoader(m)
+```
+
+And
+
+```go
+type Root struct {
+ Users []User `json:"users"`
+}
+
+type User struct {
+ Name string `json:"name"`
+}
+
+...
+
+data := Root{}
+data.Users = append(data.Users, User{"John"})
+data.Users = append(data.Users, User{"Sophia"})
+data.Users = append(data.Users, User{"Bill"})
+
+loader := gojsonschema.NewGoLoader(data)
+```
+
+#### Validation
+
+Once the loaders are set, validation is easy :
+
+```go
+result, err := gojsonschema.Validate(schemaLoader, documentLoader)
+```
+
+Alternatively, you might want to load a schema only once and process to multiple validations :
+
+```go
+schema, err := gojsonschema.NewSchema(schemaLoader)
+...
+result1, err := schema.Validate(documentLoader1)
+...
+result2, err := schema.Validate(documentLoader2)
+...
+// etc ...
+```
+
+To check the result :
+
+```go
+ if result.Valid() {
+ fmt.Printf("The document is valid\n")
+ } else {
+ fmt.Printf("The document is not valid. see errors :\n")
+ for _, err := range result.Errors() {
+ // Err implements the ResultError interface
+ fmt.Printf("- %s\n", err)
+ }
+ }
+```
+
+
+## Loading local schemas
+
+By default `file` and `http(s)` references to external schemas are loaded automatically via the file system or via http(s). An external schema can also be loaded using a `SchemaLoader`.
+
+```go
+ sl := gojsonschema.NewSchemaLoader()
+ loader1 := gojsonschema.NewStringLoader(`{ "type" : "string" }`)
+ err := sl.AddSchema("http://some_host.com/string.json", loader1)
+```
+
+Alternatively if your schema already has an `$id` you can use the `AddSchemas` function
+```go
+ loader2 := gojsonschema.NewStringLoader(`{
+ "$id" : "http://some_host.com/maxlength.json",
+ "maxLength" : 5
+ }`)
+ err = sl.AddSchemas(loader2)
+```
+
+The main schema should be passed to the `Compile` function. This main schema can then directly reference the added schemas without needing to download them.
+```go
+ loader3 := gojsonschema.NewStringLoader(`{
+ "$id" : "http://some_host.com/main.json",
+ "allOf" : [
+ { "$ref" : "http://some_host.com/string.json" },
+ { "$ref" : "http://some_host.com/maxlength.json" }
+ ]
+ }`)
+
+ schema, err := sl.Compile(loader3)
+
+ documentLoader := gojsonschema.NewStringLoader(`"hello world"`)
+
+ result, err := schema.Validate(documentLoader)
+```
+
+It's also possible to pass a `ReferenceLoader` to the `Compile` function that references a loaded schema.
+
+```go
+err = sl.AddSchemas(loader3)
+schema, err := sl.Compile(gojsonschema.NewReferenceLoader("http://some_host.com/main.json"))
+```
+
+Schemas added by `AddSchema` and `AddSchemas` are only validated when the entire schema is compiled, unless meta-schema validation is used.
+
+## Using a specific draft
+By default `gojsonschema` will try to detect the draft of a schema by using the `$schema` keyword and parse it in a strict draft-04, draft-06 or draft-07 mode. If `$schema` is missing, or the draft version is not explicitely set, a hybrid mode is used which merges together functionality of all drafts into one mode.
+
+Autodectection can be turned off with the `AutoDetect` property. Specific draft versions can be specified with the `Draft` property.
+
+```go
+sl := gojsonschema.NewSchemaLoader()
+sl.Draft = gojsonschema.Draft7
+sl.AutoDetect = false
+```
+
+If autodetection is on (default), a draft-07 schema can savely reference draft-04 schemas and vice-versa, as long as `$schema` is specified in all schemas.
+
+## Meta-schema validation
+Schemas that are added using the `AddSchema`, `AddSchemas` and `Compile` can be validated against their meta-schema by setting the `Validate` property.
+
+The following example will produce an error as `multipleOf` must be a number. If `Validate` is off (default), this error is only returned at the `Compile` step.
+
+```go
+sl := gojsonschema.NewSchemaLoader()
+sl.Validate = true
+err := sl.AddSchemas(gojsonschema.NewStringLoader(`{
+ $id" : "http://some_host.com/invalid.json",
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "multipleOf" : true
+}`))
+ ```
+```
+ ```
+
+Errors returned by meta-schema validation are more readable and contain more information, which helps significantly if you are developing a schema.
+
+Meta-schema validation also works with a custom `$schema`. In case `$schema` is missing, or `AutoDetect` is set to `false`, the meta-schema of the used draft is used.
+
+
+## Working with Errors
+
+The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it
+```go
+gojsonschema.Locale = YourCustomLocale{}
+```
+
+However, each error contains additional contextual information.
+
+Newer versions of `gojsonschema` may have new additional errors, so code that uses a custom locale will need to be updated when this happens.
+
+**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below
+
+Note: An error of RequiredType has an err.Type() return value of "required"
+
+ "required": RequiredError
+ "invalid_type": InvalidTypeError
+ "number_any_of": NumberAnyOfError
+ "number_one_of": NumberOneOfError
+ "number_all_of": NumberAllOfError
+ "number_not": NumberNotError
+ "missing_dependency": MissingDependencyError
+ "internal": InternalError
+ "const": ConstEror
+ "enum": EnumError
+ "array_no_additional_items": ArrayNoAdditionalItemsError
+ "array_min_items": ArrayMinItemsError
+ "array_max_items": ArrayMaxItemsError
+ "unique": ItemsMustBeUniqueError
+ "contains" : ArrayContainsError
+ "array_min_properties": ArrayMinPropertiesError
+ "array_max_properties": ArrayMaxPropertiesError
+ "additional_property_not_allowed": AdditionalPropertyNotAllowedError
+ "invalid_property_pattern": InvalidPropertyPatternError
+ "invalid_property_name": InvalidPropertyNameError
+ "string_gte": StringLengthGTEError
+ "string_lte": StringLengthLTEError
+ "pattern": DoesNotMatchPatternError
+ "multiple_of": MultipleOfError
+ "number_gte": NumberGTEError
+ "number_gt": NumberGTError
+ "number_lte": NumberLTEError
+ "number_lt": NumberLTError
+ "condition_then" : ConditionThenError
+ "condition_else" : ConditionElseError
+
+**err.Value()**: *interface{}* Returns the value given
+
+**err.Context()**: *gojsonschema.JsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName
+
+**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix.
+
+**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation.
+
+**err.DescriptionFormat()**: *string* The error description format. This is relevant if you are adding custom validation errors afterwards to the result.
+
+**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()*
+
+Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e.
+```
+{{.field}} must be greater than or equal to {{.min}}
+```
+
+The library allows you to specify custom template functions, should you require more complex error message handling.
+```go
+gojsonschema.ErrorTemplateFuncs = map[string]interface{}{
+ "allcaps": func(s string) string {
+ return strings.ToUpper(s)
+ },
+}
+```
+
+Given the above definition, you can use the custom function `"allcaps"` in your localization templates:
+```
+{{allcaps .field}} must be greater than or equal to {{.min}}
+```
+
+The above error message would then be rendered with the `field` value in capital letters. For example:
+```
+"PASSWORD must be greater than or equal to 8"
+```
+
+Learn more about what types of template functions you can use in `ErrorTemplateFuncs` by referring to Go's [text/template FuncMap](https://golang.org/pkg/text/template/#FuncMap) type.
+
+## Formats
+JSON Schema allows for optional "format" property to validate instances against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this:
+
+````json
+{"type": "string", "format": "email"}
+````
+
+Not all formats defined in draft-07 are available. Implemented formats are:
+
+* `date`
+* `time`
+* `date-time`
+* `hostname`. Subdomains that start with a number are also supported, but this means that it doesn't strictly follow [RFC1034](http://tools.ietf.org/html/rfc1034#section-3.5) and has the implication that ipv4 addresses are also recognized as valid hostnames.
+* `email`. Go's email parser deviates slightly from [RFC5322](https://tools.ietf.org/html/rfc5322). Includes unicode support.
+* `idn-email`. Same caveat as `email`.
+* `ipv4`
+* `ipv6`
+* `uri`. Includes unicode support.
+* `uri-reference`. Includes unicode support.
+* `iri`
+* `iri-reference`
+* `uri-template`
+* `uuid`
+* `regex`. Go uses the [RE2](https://github.com/google/re2/wiki/Syntax) engine and is not [ECMA262](http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf) compatible.
+* `json-pointer`
+* `relative-json-pointer`
+
+`email`, `uri` and `uri-reference` use the same validation code as their unicode counterparts `idn-email`, `iri` and `iri-reference`. If you rely on unicode support you should use the specific
+unicode enabled formats for the sake of interoperability as other implementations might not support unicode in the regular formats.
+
+The validation code for `uri`, `idn-email` and their relatives use mostly standard library code.
+
+For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this:
+
+```go
+// Define the format checker
+type RoleFormatChecker struct {}
+
+// Ensure it meets the gojsonschema.FormatChecker interface
+func (f RoleFormatChecker) IsFormat(input interface{}) bool {
+
+ asString, ok := input.(string)
+ if ok == false {
+ return false
+ }
+
+ return strings.HasPrefix("ROLE_", asString)
+}
+
+// Add it to the library
+gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{})
+````
+
+Now to use in your json schema:
+````json
+{"type": "string", "format": "role"}
+````
+
+Another example would be to check if the provided integer matches an id on database:
+
+JSON schema:
+```json
+{"type": "integer", "format": "ValidUserId"}
+```
+
+```go
+// Define the format checker
+type ValidUserIdFormatChecker struct {}
+
+// Ensure it meets the gojsonschema.FormatChecker interface
+func (f ValidUserIdFormatChecker) IsFormat(input interface{}) bool {
+
+ asFloat64, ok := input.(float64) // Numbers are always float64 here
+ if ok == false {
+ return false
+ }
+
+ // XXX
+ // do the magic on the database looking for the int(asFloat64)
+
+ return true
+}
+
+// Add it to the library
+gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{})
+````
+
+Formats can also be removed, for example if you want to override one of the formats that is defined by default.
+
+```go
+gojsonschema.FormatCheckers.Remove("hostname")
+```
+
+
+## Additional custom validation
+After the validation has run and you have the results, you may add additional
+errors using `Result.AddError`. This is useful to maintain the same format within the resultset instead
+of having to add special exceptions for your own errors. Below is an example.
+
+```go
+type AnswerInvalidError struct {
+ gojsonschema.ResultErrorFields
+}
+
+func newAnswerInvalidError(context *gojsonschema.JsonContext, value interface{}, details gojsonschema.ErrorDetails) *AnswerInvalidError {
+ err := AnswerInvalidError{}
+ err.SetContext(context)
+ err.SetType("custom_invalid_error")
+ // it is important to use SetDescriptionFormat() as this is used to call SetDescription() after it has been parsed
+ // using the description of err will be overridden by this.
+ err.SetDescriptionFormat("Answer to the Ultimate Question of Life, the Universe, and Everything is {{.answer}}")
+ err.SetValue(value)
+ err.SetDetails(details)
+
+ return &err
+}
+
+func main() {
+ // ...
+ schema, err := gojsonschema.NewSchema(schemaLoader)
+ result, err := gojsonschema.Validate(schemaLoader, documentLoader)
+
+ if true { // some validation
+ jsonContext := gojsonschema.NewJsonContext("question", nil)
+ errDetail := gojsonschema.ErrorDetails{
+ "answer": 42,
+ }
+ result.AddError(
+ newAnswerInvalidError(
+ gojsonschema.NewJsonContext("answer", jsonContext),
+ 52,
+ errDetail,
+ ),
+ errDetail,
+ )
+ }
+
+ return result, err
+
+}
+```
+
+This is especially useful if you want to add validation beyond what the
+json schema drafts can provide such business specific logic.
+
+## Uses
+
+gojsonschema uses the following test suite :
+
+https://github.com/json-schema/JSON-Schema-Test-Suite
diff --git a/vendor/github.com/xeipuuv/gojsonschema/draft.go b/vendor/github.com/xeipuuv/gojsonschema/draft.go
new file mode 100644
index 0000000..61298e7
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/draft.go
@@ -0,0 +1,125 @@
+// Copyright 2018 johandorland ( https://github.com/johandorland )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gojsonschema
+
+import (
+ "errors"
+ "math"
+ "reflect"
+
+ "github.com/xeipuuv/gojsonreference"
+)
+
+// Draft is a JSON-schema draft version
+type Draft int
+
+// Supported Draft versions
+const (
+ Draft4 Draft = 4
+ Draft6 Draft = 6
+ Draft7 Draft = 7
+ Hybrid Draft = math.MaxInt32
+)
+
+type draftConfig struct {
+ Version Draft
+ MetaSchemaURL string
+ MetaSchema string
+}
+type draftConfigs []draftConfig
+
+var drafts draftConfigs
+
+func init() {
+ drafts = []draftConfig{
+ {
+ Version: Draft4,
+ MetaSchemaURL: "http://json-schema.org/draft-04/schema",
+ MetaSchema: `{"id":"http://json-schema.org/draft-04/schema#","$schema":"http://json-schema.org/draft-04/schema#","description":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"positiveInteger":{"type":"integer","minimum":0},"positiveIntegerDefault0":{"allOf":[{"$ref":"#/definitions/positiveInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"minItems":1,"uniqueItems":true}},"type":"object","properties":{"id":{"type":"string"},"$schema":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"multipleOf":{"type":"number","minimum":0,"exclusiveMinimum":true},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"boolean","default":false},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"boolean","default":false},"maxLength":{"$ref":"#/definitions/positiveInteger"},"minLength":{"$ref":"#/definitions/positiveIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/positiveInteger"},"minItems":{"$ref":"#/definitions/positiveIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"maxProperties":{"$ref":"#/definitions/positiveInteger"},"minProperties":{"$ref":"#/definitions/positiveIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"dependencies":{"exclusiveMaximum":["maximum"],"exclusiveMinimum":["minimum"]},"default":{}}`,
+ },
+ {
+ Version: Draft6,
+ MetaSchemaURL: "http://json-schema.org/draft-06/schema",
+ MetaSchema: `{"$schema":"http://json-schema.org/draft-06/schema#","$id":"http://json-schema.org/draft-06/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"examples":{"type":"array","items":{}},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":{},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":{}}`,
+ },
+ {
+ Version: Draft7,
+ MetaSchemaURL: "http://json-schema.org/draft-07/schema",
+ MetaSchema: `{"$schema":"http://json-schema.org/draft-07/schema#","$id":"http://json-schema.org/draft-07/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"$comment":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":true,"readOnly":{"type":"boolean","default":false},"examples":{"type":"array","items":true},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":true},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"propertyNames":{"format":"regex"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":true,"enum":{"type":"array","items":true,"minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"contentMediaType":{"type":"string"},"contentEncoding":{"type":"string"},"if":{"$ref":"#"},"then":{"$ref":"#"},"else":{"$ref":"#"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":true}`,
+ },
+ }
+}
+
+func (dc draftConfigs) GetMetaSchema(url string) string {
+ for _, config := range dc {
+ if config.MetaSchemaURL == url {
+ return config.MetaSchema
+ }
+ }
+ return ""
+}
+func (dc draftConfigs) GetDraftVersion(url string) *Draft {
+ for _, config := range dc {
+ if config.MetaSchemaURL == url {
+ return &config.Version
+ }
+ }
+ return nil
+}
+func (dc draftConfigs) GetSchemaURL(draft Draft) string {
+ for _, config := range dc {
+ if config.Version == draft {
+ return config.MetaSchemaURL
+ }
+ }
+ return ""
+}
+
+func parseSchemaURL(documentNode interface{}) (string, *Draft, error) {
+
+ if isKind(documentNode, reflect.Bool) {
+ return "", nil, nil
+ }
+
+ if !isKind(documentNode, reflect.Map) {
+ return "", nil, errors.New("schema is invalid")
+ }
+
+ m := documentNode.(map[string]interface{})
+
+ if existsMapKey(m, KEY_SCHEMA) {
+ if !isKind(m[KEY_SCHEMA], reflect.String) {
+ return "", nil, errors.New(formatErrorDescription(
+ Locale.MustBeOfType(),
+ ErrorDetails{
+ "key": KEY_SCHEMA,
+ "type": TYPE_STRING,
+ },
+ ))
+ }
+
+ schemaReference, err := gojsonreference.NewJsonReference(m[KEY_SCHEMA].(string))
+
+ if err != nil {
+ return "", nil, err
+ }
+
+ schema := schemaReference.String()
+
+ return schema, drafts.GetDraftVersion(schema), nil
+ }
+
+ return "", nil, nil
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/errors.go b/vendor/github.com/xeipuuv/gojsonschema/errors.go
new file mode 100644
index 0000000..e4e9814
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/errors.go
@@ -0,0 +1,364 @@
+package gojsonschema
+
+import (
+ "bytes"
+ "sync"
+ "text/template"
+)
+
+var errorTemplates = errorTemplate{template.New("errors-new"), sync.RWMutex{}}
+
+// template.Template is not thread-safe for writing, so some locking is done
+// sync.RWMutex is used for efficiently locking when new templates are created
+type errorTemplate struct {
+ *template.Template
+ sync.RWMutex
+}
+
+type (
+
+ // FalseError. ErrorDetails: -
+ FalseError struct {
+ ResultErrorFields
+ }
+
+ // RequiredError indicates that a required field is missing
+ // ErrorDetails: property string
+ RequiredError struct {
+ ResultErrorFields
+ }
+
+ // InvalidTypeError indicates that a field has the incorrect type
+ // ErrorDetails: expected, given
+ InvalidTypeError struct {
+ ResultErrorFields
+ }
+
+ // NumberAnyOfError is produced in case of a failing "anyOf" validation
+ // ErrorDetails: -
+ NumberAnyOfError struct {
+ ResultErrorFields
+ }
+
+ // NumberOneOfError is produced in case of a failing "oneOf" validation
+ // ErrorDetails: -
+ NumberOneOfError struct {
+ ResultErrorFields
+ }
+
+ // NumberAllOfError is produced in case of a failing "allOf" validation
+ // ErrorDetails: -
+ NumberAllOfError struct {
+ ResultErrorFields
+ }
+
+ // NumberNotError is produced if a "not" validation failed
+ // ErrorDetails: -
+ NumberNotError struct {
+ ResultErrorFields
+ }
+
+ // MissingDependencyError is produced in case of a "missing dependency" problem
+ // ErrorDetails: dependency
+ MissingDependencyError struct {
+ ResultErrorFields
+ }
+
+ // InternalError indicates an internal error
+ // ErrorDetails: error
+ InternalError struct {
+ ResultErrorFields
+ }
+
+ // ConstError indicates a const error
+ // ErrorDetails: allowed
+ ConstError struct {
+ ResultErrorFields
+ }
+
+ // EnumError indicates an enum error
+ // ErrorDetails: allowed
+ EnumError struct {
+ ResultErrorFields
+ }
+
+ // ArrayNoAdditionalItemsError is produced if additional items were found, but not allowed
+ // ErrorDetails: -
+ ArrayNoAdditionalItemsError struct {
+ ResultErrorFields
+ }
+
+ // ArrayMinItemsError is produced if an array contains less items than the allowed minimum
+ // ErrorDetails: min
+ ArrayMinItemsError struct {
+ ResultErrorFields
+ }
+
+ // ArrayMaxItemsError is produced if an array contains more items than the allowed maximum
+ // ErrorDetails: max
+ ArrayMaxItemsError struct {
+ ResultErrorFields
+ }
+
+ // ItemsMustBeUniqueError is produced if an array requires unique items, but contains non-unique items
+ // ErrorDetails: type, i, j
+ ItemsMustBeUniqueError struct {
+ ResultErrorFields
+ }
+
+ // ArrayContainsError is produced if an array contains invalid items
+ // ErrorDetails:
+ ArrayContainsError struct {
+ ResultErrorFields
+ }
+
+ // ArrayMinPropertiesError is produced if an object contains less properties than the allowed minimum
+ // ErrorDetails: min
+ ArrayMinPropertiesError struct {
+ ResultErrorFields
+ }
+
+ // ArrayMaxPropertiesError is produced if an object contains more properties than the allowed maximum
+ // ErrorDetails: max
+ ArrayMaxPropertiesError struct {
+ ResultErrorFields
+ }
+
+ // AdditionalPropertyNotAllowedError is produced if an object has additional properties, but not allowed
+ // ErrorDetails: property
+ AdditionalPropertyNotAllowedError struct {
+ ResultErrorFields
+ }
+
+ // InvalidPropertyPatternError is produced if an pattern was found
+ // ErrorDetails: property, pattern
+ InvalidPropertyPatternError struct {
+ ResultErrorFields
+ }
+
+ // InvalidPropertyNameError is produced if an invalid-named property was found
+ // ErrorDetails: property
+ InvalidPropertyNameError struct {
+ ResultErrorFields
+ }
+
+ // StringLengthGTEError is produced if a string is shorter than the minimum required length
+ // ErrorDetails: min
+ StringLengthGTEError struct {
+ ResultErrorFields
+ }
+
+ // StringLengthLTEError is produced if a string is longer than the maximum allowed length
+ // ErrorDetails: max
+ StringLengthLTEError struct {
+ ResultErrorFields
+ }
+
+ // DoesNotMatchPatternError is produced if a string does not match the defined pattern
+ // ErrorDetails: pattern
+ DoesNotMatchPatternError struct {
+ ResultErrorFields
+ }
+
+ // DoesNotMatchFormatError is produced if a string does not match the defined format
+ // ErrorDetails: format
+ DoesNotMatchFormatError struct {
+ ResultErrorFields
+ }
+
+ // MultipleOfError is produced if a number is not a multiple of the defined multipleOf
+ // ErrorDetails: multiple
+ MultipleOfError struct {
+ ResultErrorFields
+ }
+
+ // NumberGTEError is produced if a number is lower than the allowed minimum
+ // ErrorDetails: min
+ NumberGTEError struct {
+ ResultErrorFields
+ }
+
+ // NumberGTError is produced if a number is lower than, or equal to the specified minimum, and exclusiveMinimum is set
+ // ErrorDetails: min
+ NumberGTError struct {
+ ResultErrorFields
+ }
+
+ // NumberLTEError is produced if a number is higher than the allowed maximum
+ // ErrorDetails: max
+ NumberLTEError struct {
+ ResultErrorFields
+ }
+
+ // NumberLTError is produced if a number is higher than, or equal to the specified maximum, and exclusiveMaximum is set
+ // ErrorDetails: max
+ NumberLTError struct {
+ ResultErrorFields
+ }
+
+ // ConditionThenError is produced if a condition's "then" validation is invalid
+ // ErrorDetails: -
+ ConditionThenError struct {
+ ResultErrorFields
+ }
+
+ // ConditionElseError is produced if a condition's "else" condition is invalid
+ // ErrorDetails: -
+ ConditionElseError struct {
+ ResultErrorFields
+ }
+)
+
+// newError takes a ResultError type and sets the type, context, description, details, value, and field
+func newError(err ResultError, context *JsonContext, value interface{}, locale locale, details ErrorDetails) {
+ var t string
+ var d string
+ switch err.(type) {
+ case *FalseError:
+ t = "false"
+ d = locale.False()
+ case *RequiredError:
+ t = "required"
+ d = locale.Required()
+ case *InvalidTypeError:
+ t = "invalid_type"
+ d = locale.InvalidType()
+ case *NumberAnyOfError:
+ t = "number_any_of"
+ d = locale.NumberAnyOf()
+ case *NumberOneOfError:
+ t = "number_one_of"
+ d = locale.NumberOneOf()
+ case *NumberAllOfError:
+ t = "number_all_of"
+ d = locale.NumberAllOf()
+ case *NumberNotError:
+ t = "number_not"
+ d = locale.NumberNot()
+ case *MissingDependencyError:
+ t = "missing_dependency"
+ d = locale.MissingDependency()
+ case *InternalError:
+ t = "internal"
+ d = locale.Internal()
+ case *ConstError:
+ t = "const"
+ d = locale.Const()
+ case *EnumError:
+ t = "enum"
+ d = locale.Enum()
+ case *ArrayNoAdditionalItemsError:
+ t = "array_no_additional_items"
+ d = locale.ArrayNoAdditionalItems()
+ case *ArrayMinItemsError:
+ t = "array_min_items"
+ d = locale.ArrayMinItems()
+ case *ArrayMaxItemsError:
+ t = "array_max_items"
+ d = locale.ArrayMaxItems()
+ case *ItemsMustBeUniqueError:
+ t = "unique"
+ d = locale.Unique()
+ case *ArrayContainsError:
+ t = "contains"
+ d = locale.ArrayContains()
+ case *ArrayMinPropertiesError:
+ t = "array_min_properties"
+ d = locale.ArrayMinProperties()
+ case *ArrayMaxPropertiesError:
+ t = "array_max_properties"
+ d = locale.ArrayMaxProperties()
+ case *AdditionalPropertyNotAllowedError:
+ t = "additional_property_not_allowed"
+ d = locale.AdditionalPropertyNotAllowed()
+ case *InvalidPropertyPatternError:
+ t = "invalid_property_pattern"
+ d = locale.InvalidPropertyPattern()
+ case *InvalidPropertyNameError:
+ t = "invalid_property_name"
+ d = locale.InvalidPropertyName()
+ case *StringLengthGTEError:
+ t = "string_gte"
+ d = locale.StringGTE()
+ case *StringLengthLTEError:
+ t = "string_lte"
+ d = locale.StringLTE()
+ case *DoesNotMatchPatternError:
+ t = "pattern"
+ d = locale.DoesNotMatchPattern()
+ case *DoesNotMatchFormatError:
+ t = "format"
+ d = locale.DoesNotMatchFormat()
+ case *MultipleOfError:
+ t = "multiple_of"
+ d = locale.MultipleOf()
+ case *NumberGTEError:
+ t = "number_gte"
+ d = locale.NumberGTE()
+ case *NumberGTError:
+ t = "number_gt"
+ d = locale.NumberGT()
+ case *NumberLTEError:
+ t = "number_lte"
+ d = locale.NumberLTE()
+ case *NumberLTError:
+ t = "number_lt"
+ d = locale.NumberLT()
+ case *ConditionThenError:
+ t = "condition_then"
+ d = locale.ConditionThen()
+ case *ConditionElseError:
+ t = "condition_else"
+ d = locale.ConditionElse()
+ }
+
+ err.SetType(t)
+ err.SetContext(context)
+ err.SetValue(value)
+ err.SetDetails(details)
+ err.SetDescriptionFormat(d)
+ details["field"] = err.Field()
+
+ if _, exists := details["context"]; !exists && context != nil {
+ details["context"] = context.String()
+ }
+
+ err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details))
+}
+
+// formatErrorDescription takes a string in the default text/template
+// format and converts it to a string with replacements. The fields come
+// from the ErrorDetails struct and vary for each type of error.
+func formatErrorDescription(s string, details ErrorDetails) string {
+
+ var tpl *template.Template
+ var descrAsBuffer bytes.Buffer
+ var err error
+
+ errorTemplates.RLock()
+ tpl = errorTemplates.Lookup(s)
+ errorTemplates.RUnlock()
+
+ if tpl == nil {
+ errorTemplates.Lock()
+ tpl = errorTemplates.New(s)
+
+ if ErrorTemplateFuncs != nil {
+ tpl.Funcs(ErrorTemplateFuncs)
+ }
+
+ tpl, err = tpl.Parse(s)
+ errorTemplates.Unlock()
+
+ if err != nil {
+ return err.Error()
+ }
+ }
+
+ err = tpl.Execute(&descrAsBuffer, details)
+ if err != nil {
+ return err.Error()
+ }
+
+ return descrAsBuffer.String()
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go
new file mode 100644
index 0000000..873ffc7
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go
@@ -0,0 +1,368 @@
+package gojsonschema
+
+import (
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+)
+
+type (
+ // FormatChecker is the interface all formatters added to FormatCheckerChain must implement
+ FormatChecker interface {
+ // IsFormat checks if input has the correct format and type
+ IsFormat(input interface{}) bool
+ }
+
+ // FormatCheckerChain holds the formatters
+ FormatCheckerChain struct {
+ formatters map[string]FormatChecker
+ }
+
+ // EmailFormatChecker verifies email address formats
+ EmailFormatChecker struct{}
+
+ // IPV4FormatChecker verifies IP addresses in the IPv4 format
+ IPV4FormatChecker struct{}
+
+ // IPV6FormatChecker verifies IP addresses in the IPv6 format
+ IPV6FormatChecker struct{}
+
+ // DateTimeFormatChecker verifies date/time formats per RFC3339 5.6
+ //
+ // Valid formats:
+ // Partial Time: HH:MM:SS
+ // Full Date: YYYY-MM-DD
+ // Full Time: HH:MM:SSZ-07:00
+ // Date Time: YYYY-MM-DDTHH:MM:SSZ-0700
+ //
+ // Where
+ // YYYY = 4DIGIT year
+ // MM = 2DIGIT month ; 01-12
+ // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year
+ // HH = 2DIGIT hour ; 00-23
+ // MM = 2DIGIT ; 00-59
+ // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules
+ // T = Literal
+ // Z = Literal
+ //
+ // Note: Nanoseconds are also suported in all formats
+ //
+ // http://tools.ietf.org/html/rfc3339#section-5.6
+ DateTimeFormatChecker struct{}
+
+ // DateFormatChecker verifies date formats
+ //
+ // Valid format:
+ // Full Date: YYYY-MM-DD
+ //
+ // Where
+ // YYYY = 4DIGIT year
+ // MM = 2DIGIT month ; 01-12
+ // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year
+ DateFormatChecker struct{}
+
+ // TimeFormatChecker verifies time formats
+ //
+ // Valid formats:
+ // Partial Time: HH:MM:SS
+ // Full Time: HH:MM:SSZ-07:00
+ //
+ // Where
+ // HH = 2DIGIT hour ; 00-23
+ // MM = 2DIGIT ; 00-59
+ // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules
+ // T = Literal
+ // Z = Literal
+ TimeFormatChecker struct{}
+
+ // URIFormatChecker validates a URI with a valid Scheme per RFC3986
+ URIFormatChecker struct{}
+
+ // URIReferenceFormatChecker validates a URI or relative-reference per RFC3986
+ URIReferenceFormatChecker struct{}
+
+ // URITemplateFormatChecker validates a URI template per RFC6570
+ URITemplateFormatChecker struct{}
+
+ // HostnameFormatChecker validates a hostname is in the correct format
+ HostnameFormatChecker struct{}
+
+ // UUIDFormatChecker validates a UUID is in the correct format
+ UUIDFormatChecker struct{}
+
+ // RegexFormatChecker validates a regex is in the correct format
+ RegexFormatChecker struct{}
+
+ // JSONPointerFormatChecker validates a JSON Pointer per RFC6901
+ JSONPointerFormatChecker struct{}
+
+ // RelativeJSONPointerFormatChecker validates a relative JSON Pointer is in the correct format
+ RelativeJSONPointerFormatChecker struct{}
+)
+
+var (
+ // FormatCheckers holds the valid formatters, and is a public variable
+ // so library users can add custom formatters
+ FormatCheckers = FormatCheckerChain{
+ formatters: map[string]FormatChecker{
+ "date": DateFormatChecker{},
+ "time": TimeFormatChecker{},
+ "date-time": DateTimeFormatChecker{},
+ "hostname": HostnameFormatChecker{},
+ "email": EmailFormatChecker{},
+ "idn-email": EmailFormatChecker{},
+ "ipv4": IPV4FormatChecker{},
+ "ipv6": IPV6FormatChecker{},
+ "uri": URIFormatChecker{},
+ "uri-reference": URIReferenceFormatChecker{},
+ "iri": URIFormatChecker{},
+ "iri-reference": URIReferenceFormatChecker{},
+ "uri-template": URITemplateFormatChecker{},
+ "uuid": UUIDFormatChecker{},
+ "regex": RegexFormatChecker{},
+ "json-pointer": JSONPointerFormatChecker{},
+ "relative-json-pointer": RelativeJSONPointerFormatChecker{},
+ },
+ }
+
+ // Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname
+ rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`)
+
+ // Use a regex to make sure curly brackets are balanced properly after validating it as a AURI
+ rxURITemplate = regexp.MustCompile("^([^{]*({[^}]*})?)*$")
+
+ rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$")
+
+ rxJSONPointer = regexp.MustCompile("^(?:/(?:[^~/]|~0|~1)*)*$")
+
+ rxRelJSONPointer = regexp.MustCompile("^(?:0|[1-9][0-9]*)(?:#|(?:/(?:[^~/]|~0|~1)*)*)$")
+
+ lock = new(sync.RWMutex)
+)
+
+// Add adds a FormatChecker to the FormatCheckerChain
+// The name used will be the value used for the format key in your json schema
+func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain {
+ lock.Lock()
+ c.formatters[name] = f
+ lock.Unlock()
+
+ return c
+}
+
+// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists)
+func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain {
+ lock.Lock()
+ delete(c.formatters, name)
+ lock.Unlock()
+
+ return c
+}
+
+// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name
+func (c *FormatCheckerChain) Has(name string) bool {
+ lock.RLock()
+ _, ok := c.formatters[name]
+ lock.RUnlock()
+
+ return ok
+}
+
+// IsFormat will check an input against a FormatChecker with the given name
+// to see if it is the correct format
+func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool {
+ lock.RLock()
+ f, ok := c.formatters[name]
+ lock.RUnlock()
+
+ // If a format is unrecognized it should always pass validation
+ if !ok {
+ return true
+ }
+
+ return f.IsFormat(input)
+}
+
+// IsFormat checks if input is a correctly formatted e-mail address
+func (f EmailFormatChecker) IsFormat(input interface{}) bool {
+ asString, ok := input.(string)
+ if !ok {
+ return false
+ }
+
+ _, err := mail.ParseAddress(asString)
+ return err == nil
+}
+
+// IsFormat checks if input is a correctly formatted IPv4-address
+func (f IPV4FormatChecker) IsFormat(input interface{}) bool {
+ asString, ok := input.(string)
+ if !ok {
+ return false
+ }
+
+ // Credit: https://github.com/asaskevich/govalidator
+ ip := net.ParseIP(asString)
+ return ip != nil && strings.Contains(asString, ".")
+}
+
+// IsFormat checks if input is a correctly formatted IPv6=address
+func (f IPV6FormatChecker) IsFormat(input interface{}) bool {
+ asString, ok := input.(string)
+ if !ok {
+ return false
+ }
+
+ // Credit: https://github.com/asaskevich/govalidator
+ ip := net.ParseIP(asString)
+ return ip != nil && strings.Contains(asString, ":")
+}
+
+// IsFormat checks if input is a correctly formatted date/time per RFC3339 5.6
+func (f DateTimeFormatChecker) IsFormat(input interface{}) bool {
+ asString, ok := input.(string)
+ if !ok {
+ return false
+ }
+
+ formats := []string{
+ "15:04:05",
+ "15:04:05Z07:00",
+ "2006-01-02",
+ time.RFC3339,
+ time.RFC3339Nano,
+ }
+
+ for _, format := range formats {
+ if _, err := time.Parse(format, asString); err == nil {
+ return true
+ }
+ }
+
+ return false
+}
+
+// IsFormat checks if input is a correctly formatted date (YYYY-MM-DD)
+func (f DateFormatChecker) IsFormat(input interface{}) bool {
+ asString, ok := input.(string)
+ if !ok {
+ return false
+ }
+ _, err := time.Parse("2006-01-02", asString)
+ return err == nil
+}
+
+// IsFormat checks if input correctly formatted time (HH:MM:SS or HH:MM:SSZ-07:00)
+func (f TimeFormatChecker) IsFormat(input interface{}) bool {
+ asString, ok := input.(string)
+ if !ok {
+ return false
+ }
+
+ if _, err := time.Parse("15:04:05Z07:00", asString); err == nil {
+ return true
+ }
+
+ _, err := time.Parse("15:04:05", asString)
+ return err == nil
+}
+
+// IsFormat checks if input is correctly formatted URI with a valid Scheme per RFC3986
+func (f URIFormatChecker) IsFormat(input interface{}) bool {
+ asString, ok := input.(string)
+ if !ok {
+ return false
+ }
+
+ u, err := url.Parse(asString)
+
+ if err != nil || u.Scheme == "" {
+ return false
+ }
+
+ return !strings.Contains(asString, `\`)
+}
+
+// IsFormat checks if input is a correctly formatted URI or relative-reference per RFC3986
+func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool {
+ asString, ok := input.(string)
+ if !ok {
+ return false
+ }
+
+ _, err := url.Parse(asString)
+ return err == nil && !strings.Contains(asString, `\`)
+}
+
+// IsFormat checks if input is a correctly formatted URI template per RFC6570
+func (f URITemplateFormatChecker) IsFormat(input interface{}) bool {
+ asString, ok := input.(string)
+ if !ok {
+ return false
+ }
+
+ u, err := url.Parse(asString)
+ if err != nil || strings.Contains(asString, `\`) {
+ return false
+ }
+
+ return rxURITemplate.MatchString(u.Path)
+}
+
+// IsFormat checks if input is a correctly formatted hostname
+func (f HostnameFormatChecker) IsFormat(input interface{}) bool {
+ asString, ok := input.(string)
+ if !ok {
+ return false
+ }
+
+ return rxHostname.MatchString(asString) && len(asString) < 256
+}
+
+// IsFormat checks if input is a correctly formatted UUID
+func (f UUIDFormatChecker) IsFormat(input interface{}) bool {
+ asString, ok := input.(string)
+ if !ok {
+ return false
+ }
+
+ return rxUUID.MatchString(asString)
+}
+
+// IsFormat checks if input is a correctly formatted regular expression
+func (f RegexFormatChecker) IsFormat(input interface{}) bool {
+ asString, ok := input.(string)
+ if !ok {
+ return false
+ }
+
+ if asString == "" {
+ return true
+ }
+ _, err := regexp.Compile(asString)
+ return err == nil
+}
+
+// IsFormat checks if input is a correctly formatted JSON Pointer per RFC6901
+func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool {
+ asString, ok := input.(string)
+ if !ok {
+ return false
+ }
+
+ return rxJSONPointer.MatchString(asString)
+}
+
+// IsFormat checks if input is a correctly formatted relative JSON Pointer
+func (f RelativeJSONPointerFormatChecker) IsFormat(input interface{}) bool {
+ asString, ok := input.(string)
+ if !ok {
+ return false
+ }
+
+ return rxRelJSONPointer.MatchString(asString)
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/glide.yaml b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml
new file mode 100644
index 0000000..ab6fb86
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml
@@ -0,0 +1,13 @@
+package: github.com/xeipuuv/gojsonschema
+license: Apache 2.0
+import:
+- package: github.com/xeipuuv/gojsonschema
+
+- package: github.com/xeipuuv/gojsonpointer
+
+- package: github.com/xeipuuv/gojsonreference
+
+testImport:
+- package: github.com/stretchr/testify
+ subpackages:
+ - assert
diff --git a/vendor/github.com/xeipuuv/gojsonschema/go.mod b/vendor/github.com/xeipuuv/gojsonschema/go.mod
new file mode 100644
index 0000000..b709d7f
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/go.mod
@@ -0,0 +1,7 @@
+module github.com/xeipuuv/gojsonschema
+
+require (
+ github.com/stretchr/testify v1.3.0
+ github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
+ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415
+)
diff --git a/vendor/github.com/xeipuuv/gojsonschema/go.sum b/vendor/github.com/xeipuuv/gojsonschema/go.sum
new file mode 100644
index 0000000..0e865ac
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/go.sum
@@ -0,0 +1,11 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
diff --git a/vendor/github.com/xeipuuv/gojsonschema/internalLog.go b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go
new file mode 100644
index 0000000..4ef7a8d
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go
@@ -0,0 +1,37 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Very simple log wrapper.
+// Used for debugging/testing purposes.
+//
+// created 01-01-2015
+
+package gojsonschema
+
+import (
+ "log"
+)
+
+const internalLogEnabled = false
+
+func internalLog(format string, v ...interface{}) {
+ log.Printf(format, v...)
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go
new file mode 100644
index 0000000..0e97970
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go
@@ -0,0 +1,73 @@
+// Copyright 2013 MongoDB, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author tolsen
+// author-github https://github.com/tolsen
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context
+//
+// created 04-09-2013
+
+package gojsonschema
+
+import "bytes"
+
+// JsonContext implements a persistent linked-list of strings
+type JsonContext struct {
+ head string
+ tail *JsonContext
+}
+
+// NewJsonContext creates a new JsonContext
+func NewJsonContext(head string, tail *JsonContext) *JsonContext {
+ return &JsonContext{head, tail}
+}
+
+// String displays the context in reverse.
+// This plays well with the data structure's persistent nature with
+// Cons and a json document's tree structure.
+func (c *JsonContext) String(del ...string) string {
+ byteArr := make([]byte, 0, c.stringLen())
+ buf := bytes.NewBuffer(byteArr)
+ c.writeStringToBuffer(buf, del)
+
+ return buf.String()
+}
+
+func (c *JsonContext) stringLen() int {
+ length := 0
+ if c.tail != nil {
+ length = c.tail.stringLen() + 1 // add 1 for "."
+ }
+
+ length += len(c.head)
+ return length
+}
+
+func (c *JsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) {
+ if c.tail != nil {
+ c.tail.writeStringToBuffer(buf, del)
+
+ if len(del) > 0 {
+ buf.WriteString(del[0])
+ } else {
+ buf.WriteString(".")
+ }
+ }
+
+ buf.WriteString(c.head)
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go
new file mode 100644
index 0000000..5d88af2
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go
@@ -0,0 +1,386 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Different strategies to load JSON files.
+// Includes References (file and HTTP), JSON strings and Go types.
+//
+// created 01-02-2015
+
+package gojsonschema
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/xeipuuv/gojsonreference"
+)
+
+var osFS = osFileSystem(os.Open)
+
+// JSONLoader defines the JSON loader interface
+type JSONLoader interface {
+ JsonSource() interface{}
+ LoadJSON() (interface{}, error)
+ JsonReference() (gojsonreference.JsonReference, error)
+ LoaderFactory() JSONLoaderFactory
+}
+
+// JSONLoaderFactory defines the JSON loader factory interface
+type JSONLoaderFactory interface {
+ // New creates a new JSON loader for the given source
+ New(source string) JSONLoader
+}
+
+// DefaultJSONLoaderFactory is the default JSON loader factory
+type DefaultJSONLoaderFactory struct {
+}
+
+// FileSystemJSONLoaderFactory is a JSON loader factory that uses http.FileSystem
+type FileSystemJSONLoaderFactory struct {
+ fs http.FileSystem
+}
+
+// New creates a new JSON loader for the given source
+func (d DefaultJSONLoaderFactory) New(source string) JSONLoader {
+ return &jsonReferenceLoader{
+ fs: osFS,
+ source: source,
+ }
+}
+
+// New creates a new JSON loader for the given source
+func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader {
+ return &jsonReferenceLoader{
+ fs: f.fs,
+ source: source,
+ }
+}
+
+// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem.
+type osFileSystem func(string) (*os.File, error)
+
+// Opens a file with the given name
+func (o osFileSystem) Open(name string) (http.File, error) {
+ return o(name)
+}
+
+// JSON Reference loader
+// references are used to load JSONs from files and HTTP
+
+type jsonReferenceLoader struct {
+ fs http.FileSystem
+ source string
+}
+
+func (l *jsonReferenceLoader) JsonSource() interface{} {
+ return l.source
+}
+
+func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) {
+ return gojsonreference.NewJsonReference(l.JsonSource().(string))
+}
+
+func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory {
+ return &FileSystemJSONLoaderFactory{
+ fs: l.fs,
+ }
+}
+
+// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system.
+func NewReferenceLoader(source string) JSONLoader {
+ return &jsonReferenceLoader{
+ fs: osFS,
+ source: source,
+ }
+}
+
+// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system.
+func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) JSONLoader {
+ return &jsonReferenceLoader{
+ fs: fs,
+ source: source,
+ }
+}
+
+func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) {
+
+ var err error
+
+ reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string))
+ if err != nil {
+ return nil, err
+ }
+
+ refToURL := reference
+ refToURL.GetUrl().Fragment = ""
+
+ var document interface{}
+
+ if reference.HasFileScheme {
+
+ filename := strings.TrimPrefix(refToURL.String(), "file://")
+ filename, err = url.QueryUnescape(filename)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if runtime.GOOS == "windows" {
+ // on Windows, a file URL may have an extra leading slash, use slashes
+ // instead of backslashes, and have spaces escaped
+ filename = strings.TrimPrefix(filename, "/")
+ filename = filepath.FromSlash(filename)
+ }
+
+ document, err = l.loadFromFile(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ } else {
+
+ document, err = l.loadFromHTTP(refToURL.String())
+ if err != nil {
+ return nil, err
+ }
+
+ }
+
+ return document, nil
+
+}
+
+func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) {
+
+ // returned cached versions for metaschemas for drafts 4, 6 and 7
+ // for performance and allow for easier offline use
+ if metaSchema := drafts.GetMetaSchema(address); metaSchema != "" {
+ return decodeJSONUsingNumber(strings.NewReader(metaSchema))
+ }
+
+ resp, err := http.Get(address)
+ if err != nil {
+ return nil, err
+ }
+
+ // must return HTTP Status 200 OK
+ if resp.StatusCode != http.StatusOK {
+ return nil, errors.New(formatErrorDescription(Locale.HttpBadStatus(), ErrorDetails{"status": resp.Status}))
+ }
+
+ bodyBuff, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ return decodeJSONUsingNumber(bytes.NewReader(bodyBuff))
+}
+
+func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) {
+ f, err := l.fs.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ bodyBuff, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+
+ return decodeJSONUsingNumber(bytes.NewReader(bodyBuff))
+
+}
+
+// JSON string loader
+
+type jsonStringLoader struct {
+ source string
+}
+
+func (l *jsonStringLoader) JsonSource() interface{} {
+ return l.source
+}
+
+func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) {
+ return gojsonreference.NewJsonReference("#")
+}
+
+func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory {
+ return &DefaultJSONLoaderFactory{}
+}
+
+// NewStringLoader creates a new JSONLoader, taking a string as source
+func NewStringLoader(source string) JSONLoader {
+ return &jsonStringLoader{source: source}
+}
+
+func (l *jsonStringLoader) LoadJSON() (interface{}, error) {
+
+ return decodeJSONUsingNumber(strings.NewReader(l.JsonSource().(string)))
+
+}
+
+// JSON bytes loader
+
+type jsonBytesLoader struct {
+ source []byte
+}
+
+func (l *jsonBytesLoader) JsonSource() interface{} {
+ return l.source
+}
+
+func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) {
+ return gojsonreference.NewJsonReference("#")
+}
+
+func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory {
+ return &DefaultJSONLoaderFactory{}
+}
+
+// NewBytesLoader creates a new JSONLoader, taking a `[]byte` as source
+func NewBytesLoader(source []byte) JSONLoader {
+ return &jsonBytesLoader{source: source}
+}
+
+func (l *jsonBytesLoader) LoadJSON() (interface{}, error) {
+ return decodeJSONUsingNumber(bytes.NewReader(l.JsonSource().([]byte)))
+}
+
+// JSON Go (types) loader
+// used to load JSONs from the code as maps, interface{}, structs ...
+
+type jsonGoLoader struct {
+ source interface{}
+}
+
+func (l *jsonGoLoader) JsonSource() interface{} {
+ return l.source
+}
+
+func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) {
+ return gojsonreference.NewJsonReference("#")
+}
+
+func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory {
+ return &DefaultJSONLoaderFactory{}
+}
+
+// NewGoLoader creates a new JSONLoader from a given Go struct
+func NewGoLoader(source interface{}) JSONLoader {
+ return &jsonGoLoader{source: source}
+}
+
+func (l *jsonGoLoader) LoadJSON() (interface{}, error) {
+
+ // convert it to a compliant JSON first to avoid types "mismatches"
+
+ jsonBytes, err := json.Marshal(l.JsonSource())
+ if err != nil {
+ return nil, err
+ }
+
+ return decodeJSONUsingNumber(bytes.NewReader(jsonBytes))
+
+}
+
+type jsonIOLoader struct {
+ buf *bytes.Buffer
+}
+
+// NewReaderLoader creates a new JSON loader using the provided io.Reader
+func NewReaderLoader(source io.Reader) (JSONLoader, io.Reader) {
+ buf := &bytes.Buffer{}
+ return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf)
+}
+
+// NewWriterLoader creates a new JSON loader using the provided io.Writer
+func NewWriterLoader(source io.Writer) (JSONLoader, io.Writer) {
+ buf := &bytes.Buffer{}
+ return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf)
+}
+
+func (l *jsonIOLoader) JsonSource() interface{} {
+ return l.buf.String()
+}
+
+func (l *jsonIOLoader) LoadJSON() (interface{}, error) {
+ return decodeJSONUsingNumber(l.buf)
+}
+
+func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) {
+ return gojsonreference.NewJsonReference("#")
+}
+
+func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory {
+ return &DefaultJSONLoaderFactory{}
+}
+
+// JSON raw loader
+// In case the JSON is already marshalled to interface{} use this loader
+// This is used for testing as otherwise there is no guarantee the JSON is marshalled
+// "properly" by using https://golang.org/pkg/encoding/json/#Decoder.UseNumber
+type jsonRawLoader struct {
+ source interface{}
+}
+
+// NewRawLoader creates a new JSON raw loader for the given source
+func NewRawLoader(source interface{}) JSONLoader {
+ return &jsonRawLoader{source: source}
+}
+func (l *jsonRawLoader) JsonSource() interface{} {
+ return l.source
+}
+func (l *jsonRawLoader) LoadJSON() (interface{}, error) {
+ return l.source, nil
+}
+func (l *jsonRawLoader) JsonReference() (gojsonreference.JsonReference, error) {
+ return gojsonreference.NewJsonReference("#")
+}
+func (l *jsonRawLoader) LoaderFactory() JSONLoaderFactory {
+ return &DefaultJSONLoaderFactory{}
+}
+
+func decodeJSONUsingNumber(r io.Reader) (interface{}, error) {
+
+ var document interface{}
+
+ decoder := json.NewDecoder(r)
+ decoder.UseNumber()
+
+ err := decoder.Decode(&document)
+ if err != nil {
+ return nil, err
+ }
+
+ return document, nil
+
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/locales.go b/vendor/github.com/xeipuuv/gojsonschema/locales.go
new file mode 100644
index 0000000..a416225
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/locales.go
@@ -0,0 +1,472 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Contains const string and messages.
+//
+// created 01-01-2015
+
+package gojsonschema
+
+type (
+ // locale is an interface for defining custom error strings
+ locale interface {
+
+ // False returns a format-string for "false" schema validation errors
+ False() string
+
+ // Required returns a format-string for "required" schema validation errors
+ Required() string
+
+ // InvalidType returns a format-string for "invalid type" schema validation errors
+ InvalidType() string
+
+ // NumberAnyOf returns a format-string for "anyOf" schema validation errors
+ NumberAnyOf() string
+
+ // NumberOneOf returns a format-string for "oneOf" schema validation errors
+ NumberOneOf() string
+
+ // NumberAllOf returns a format-string for "allOf" schema validation errors
+ NumberAllOf() string
+
+ // NumberNot returns a format-string to format a NumberNotError
+ NumberNot() string
+
+ // MissingDependency returns a format-string for "missing dependency" schema validation errors
+ MissingDependency() string
+
+ // Internal returns a format-string for internal errors
+ Internal() string
+
+ // Const returns a format-string to format a ConstError
+ Const() string
+
+ // Enum returns a format-string to format an EnumError
+ Enum() string
+
+ // ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema
+ ArrayNotEnoughItems() string
+
+ // ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError
+ ArrayNoAdditionalItems() string
+
+ // ArrayMinItems returns a format-string to format an ArrayMinItemsError
+ ArrayMinItems() string
+
+ // ArrayMaxItems returns a format-string to format an ArrayMaxItemsError
+ ArrayMaxItems() string
+
+ // Unique returns a format-string to format an ItemsMustBeUniqueError
+ Unique() string
+
+ // ArrayContains returns a format-string to format an ArrayContainsError
+ ArrayContains() string
+
+ // ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError
+ ArrayMinProperties() string
+
+ // ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError
+ ArrayMaxProperties() string
+
+ // AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError
+ AdditionalPropertyNotAllowed() string
+
+ // InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError
+ InvalidPropertyPattern() string
+
+ // InvalidPropertyName returns a format-string to format an InvalidPropertyNameError
+ InvalidPropertyName() string
+
+ // StringGTE returns a format-string to format an StringLengthGTEError
+ StringGTE() string
+
+ // StringLTE returns a format-string to format an StringLengthLTEError
+ StringLTE() string
+
+ // DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError
+ DoesNotMatchPattern() string
+
+ // DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError
+ DoesNotMatchFormat() string
+
+ // MultipleOf returns a format-string to format an MultipleOfError
+ MultipleOf() string
+
+ // NumberGTE returns a format-string to format an NumberGTEError
+ NumberGTE() string
+
+ // NumberGT returns a format-string to format an NumberGTError
+ NumberGT() string
+
+ // NumberLTE returns a format-string to format an NumberLTEError
+ NumberLTE() string
+
+ // NumberLT returns a format-string to format an NumberLTError
+ NumberLT() string
+
+ // Schema validations
+
+ // RegexPattern returns a format-string to format a regex-pattern error
+ RegexPattern() string
+
+ // GreaterThanZero returns a format-string to format an error where a number must be greater than zero
+ GreaterThanZero() string
+
+ // MustBeOfA returns a format-string to format an error where a value is of the wrong type
+ MustBeOfA() string
+
+ // MustBeOfAn returns a format-string to format an error where a value is of the wrong type
+ MustBeOfAn() string
+
+ // CannotBeUsedWithout returns a format-string to format a "cannot be used without" error
+ CannotBeUsedWithout() string
+
+ // CannotBeGT returns a format-string to format an error where a value are greater than allowed
+ CannotBeGT() string
+
+ // MustBeOfType returns a format-string to format an error where a value does not match the required type
+ MustBeOfType() string
+
+ // MustBeValidRegex returns a format-string to format an error where a regex is invalid
+ MustBeValidRegex() string
+
+ // MustBeValidFormat returns a format-string to format an error where a value does not match the expected format
+ MustBeValidFormat() string
+
+ // MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0
+ MustBeGTEZero() string
+
+ // KeyCannotBeGreaterThan returns a format-string to format an error where a key is greater than the maximum allowed
+ KeyCannotBeGreaterThan() string
+
+ // KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type
+ KeyItemsMustBeOfType() string
+
+ // KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique
+ KeyItemsMustBeUnique() string
+
+ // ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error
+ ReferenceMustBeCanonical() string
+
+ // NotAValidType returns a format-string to format an invalid type error
+ NotAValidType() string
+
+ // Duplicated returns a format-string to format an error where types are duplicated
+ Duplicated() string
+
+ // HttpBadStatus returns a format-string for errors when loading a schema using HTTP
+ HttpBadStatus() string
+
+ // ParseError returns a format-string for JSON parsing errors
+ ParseError() string
+
+ // ConditionThen returns a format-string for ConditionThenError errors
+ ConditionThen() string
+
+ // ConditionElse returns a format-string for ConditionElseError errors
+ ConditionElse() string
+
+ // ErrorFormat returns a format string for errors
+ ErrorFormat() string
+ }
+
+ // DefaultLocale is the default locale for this package
+ DefaultLocale struct{}
+)
+
+// False returns a format-string for "false" schema validation errors
+func (l DefaultLocale) False() string {
+ return "False always fails validation"
+}
+
+// Required returns a format-string for "required" schema validation errors
+func (l DefaultLocale) Required() string {
+ return `{{.property}} is required`
+}
+
+// InvalidType returns a format-string for "invalid type" schema validation errors
+func (l DefaultLocale) InvalidType() string {
+ return `Invalid type. Expected: {{.expected}}, given: {{.given}}`
+}
+
+// NumberAnyOf returns a format-string for "anyOf" schema validation errors
+func (l DefaultLocale) NumberAnyOf() string {
+ return `Must validate at least one schema (anyOf)`
+}
+
+// NumberOneOf returns a format-string for "oneOf" schema validation errors
+func (l DefaultLocale) NumberOneOf() string {
+ return `Must validate one and only one schema (oneOf)`
+}
+
+// NumberAllOf returns a format-string for "allOf" schema validation errors
+func (l DefaultLocale) NumberAllOf() string {
+ return `Must validate all the schemas (allOf)`
+}
+
+// NumberNot returns a format-string to format a NumberNotError
+func (l DefaultLocale) NumberNot() string {
+ return `Must not validate the schema (not)`
+}
+
+// MissingDependency returns a format-string for "missing dependency" schema validation errors
+func (l DefaultLocale) MissingDependency() string {
+ return `Has a dependency on {{.dependency}}`
+}
+
+// Internal returns a format-string for internal errors
+func (l DefaultLocale) Internal() string {
+ return `Internal Error {{.error}}`
+}
+
+// Const returns a format-string to format a ConstError
+func (l DefaultLocale) Const() string {
+ return `{{.field}} does not match: {{.allowed}}`
+}
+
+// Enum returns a format-string to format an EnumError
+func (l DefaultLocale) Enum() string {
+ return `{{.field}} must be one of the following: {{.allowed}}`
+}
+
+// ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError
+func (l DefaultLocale) ArrayNoAdditionalItems() string {
+ return `No additional items allowed on array`
+}
+
+// ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema
+func (l DefaultLocale) ArrayNotEnoughItems() string {
+ return `Not enough items on array to match positional list of schema`
+}
+
+// ArrayMinItems returns a format-string to format an ArrayMinItemsError
+func (l DefaultLocale) ArrayMinItems() string {
+ return `Array must have at least {{.min}} items`
+}
+
+// ArrayMaxItems returns a format-string to format an ArrayMaxItemsError
+func (l DefaultLocale) ArrayMaxItems() string {
+ return `Array must have at most {{.max}} items`
+}
+
+// Unique returns a format-string to format an ItemsMustBeUniqueError
+func (l DefaultLocale) Unique() string {
+ return `{{.type}} items[{{.i}},{{.j}}] must be unique`
+}
+
+// ArrayContains returns a format-string to format an ArrayContainsError
+func (l DefaultLocale) ArrayContains() string {
+ return `At least one of the items must match`
+}
+
+// ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError
+func (l DefaultLocale) ArrayMinProperties() string {
+ return `Must have at least {{.min}} properties`
+}
+
+// ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError
+func (l DefaultLocale) ArrayMaxProperties() string {
+ return `Must have at most {{.max}} properties`
+}
+
+// AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError
+func (l DefaultLocale) AdditionalPropertyNotAllowed() string {
+ return `Additional property {{.property}} is not allowed`
+}
+
+// InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError
+func (l DefaultLocale) InvalidPropertyPattern() string {
+ return `Property "{{.property}}" does not match pattern {{.pattern}}`
+}
+
+// InvalidPropertyName returns a format-string to format an InvalidPropertyNameError
+func (l DefaultLocale) InvalidPropertyName() string {
+ return `Property name of "{{.property}}" does not match`
+}
+
+// StringGTE returns a format-string to format an StringLengthGTEError
+func (l DefaultLocale) StringGTE() string {
+ return `String length must be greater than or equal to {{.min}}`
+}
+
+// StringLTE returns a format-string to format an StringLengthLTEError
+func (l DefaultLocale) StringLTE() string {
+ return `String length must be less than or equal to {{.max}}`
+}
+
+// DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError
+func (l DefaultLocale) DoesNotMatchPattern() string {
+ return `Does not match pattern '{{.pattern}}'`
+}
+
+// DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError
+func (l DefaultLocale) DoesNotMatchFormat() string {
+ return `Does not match format '{{.format}}'`
+}
+
+// MultipleOf returns a format-string to format an MultipleOfError
+func (l DefaultLocale) MultipleOf() string {
+ return `Must be a multiple of {{.multiple}}`
+}
+
+// NumberGTE returns the format string to format a NumberGTEError
+func (l DefaultLocale) NumberGTE() string {
+ return `Must be greater than or equal to {{.min}}`
+}
+
+// NumberGT returns the format string to format a NumberGTError
+func (l DefaultLocale) NumberGT() string {
+ return `Must be greater than {{.min}}`
+}
+
+// NumberLTE returns the format string to format a NumberLTEError
+func (l DefaultLocale) NumberLTE() string {
+ return `Must be less than or equal to {{.max}}`
+}
+
+// NumberLT returns the format string to format a NumberLTError
+func (l DefaultLocale) NumberLT() string {
+ return `Must be less than {{.max}}`
+}
+
+// Schema validators
+
+// RegexPattern returns a format-string to format a regex-pattern error
+func (l DefaultLocale) RegexPattern() string {
+ return `Invalid regex pattern '{{.pattern}}'`
+}
+
+// GreaterThanZero returns a format-string to format an error where a number must be greater than zero
+func (l DefaultLocale) GreaterThanZero() string {
+ return `{{.number}} must be strictly greater than 0`
+}
+
+// MustBeOfA returns a format-string to format an error where a value is of the wrong type
+func (l DefaultLocale) MustBeOfA() string {
+ return `{{.x}} must be of a {{.y}}`
+}
+
+// MustBeOfAn returns a format-string to format an error where a value is of the wrong type
+func (l DefaultLocale) MustBeOfAn() string {
+ return `{{.x}} must be of an {{.y}}`
+}
+
+// CannotBeUsedWithout returns a format-string to format a "cannot be used without" error
+func (l DefaultLocale) CannotBeUsedWithout() string {
+ return `{{.x}} cannot be used without {{.y}}`
+}
+
+// CannotBeGT returns a format-string to format an error where a value are greater than allowed
+func (l DefaultLocale) CannotBeGT() string {
+ return `{{.x}} cannot be greater than {{.y}}`
+}
+
+// MustBeOfType returns a format-string to format an error where a value does not match the required type
+func (l DefaultLocale) MustBeOfType() string {
+ return `{{.key}} must be of type {{.type}}`
+}
+
+// MustBeValidRegex returns a format-string to format an error where a regex is invalid
+func (l DefaultLocale) MustBeValidRegex() string {
+ return `{{.key}} must be a valid regex`
+}
+
+// MustBeValidFormat returns a format-string to format an error where a value does not match the expected format
+func (l DefaultLocale) MustBeValidFormat() string {
+ return `{{.key}} must be a valid format {{.given}}`
+}
+
+// MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0
+func (l DefaultLocale) MustBeGTEZero() string {
+ return `{{.key}} must be greater than or equal to 0`
+}
+
+// KeyCannotBeGreaterThan returns a format-string to format an error where a value is greater than the maximum allowed
+func (l DefaultLocale) KeyCannotBeGreaterThan() string {
+ return `{{.key}} cannot be greater than {{.y}}`
+}
+
+// KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type
+func (l DefaultLocale) KeyItemsMustBeOfType() string {
+ return `{{.key}} items must be {{.type}}`
+}
+
+// KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique
+func (l DefaultLocale) KeyItemsMustBeUnique() string {
+ return `{{.key}} items must be unique`
+}
+
+// ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error
+func (l DefaultLocale) ReferenceMustBeCanonical() string {
+ return `Reference {{.reference}} must be canonical`
+}
+
+// NotAValidType returns a format-string to format an invalid type error
+func (l DefaultLocale) NotAValidType() string {
+ return `has a primitive type that is NOT VALID -- given: {{.given}} Expected valid values are:{{.expected}}`
+}
+
+// Duplicated returns a format-string to format an error where types are duplicated
+func (l DefaultLocale) Duplicated() string {
+ return `{{.type}} type is duplicated`
+}
+
+// HttpBadStatus returns a format-string for errors when loading a schema using HTTP
+func (l DefaultLocale) HttpBadStatus() string {
+ return `Could not read schema from HTTP, response status is {{.status}}`
+}
+
+// ErrorFormat returns a format string for errors
+// Replacement options: field, description, context, value
+func (l DefaultLocale) ErrorFormat() string {
+ return `{{.field}}: {{.description}}`
+}
+
+// ParseError returns a format-string for JSON parsing errors
+func (l DefaultLocale) ParseError() string {
+ return `Expected: {{.expected}}, given: Invalid JSON`
+}
+
+// ConditionThen returns a format-string for ConditionThenError errors
+// If/Else
+func (l DefaultLocale) ConditionThen() string {
+ return `Must validate "then" as "if" was valid`
+}
+
+// ConditionElse returns a format-string for ConditionElseError errors
+func (l DefaultLocale) ConditionElse() string {
+ return `Must validate "else" as "if" was not valid`
+}
+
+// constants
+const (
+ STRING_NUMBER = "number"
+ STRING_ARRAY_OF_STRINGS = "array of strings"
+ STRING_ARRAY_OF_SCHEMAS = "array of schemas"
+ STRING_SCHEMA = "valid schema"
+ STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings"
+ STRING_PROPERTIES = "properties"
+ STRING_DEPENDENCY = "dependency"
+ STRING_PROPERTY = "property"
+ STRING_UNDEFINED = "undefined"
+ STRING_CONTEXT_ROOT = "(root)"
+ STRING_ROOT_SCHEMA_PROPERTY = "(root)"
+)
diff --git a/vendor/github.com/xeipuuv/gojsonschema/result.go b/vendor/github.com/xeipuuv/gojsonschema/result.go
new file mode 100644
index 0000000..0a01791
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/result.go
@@ -0,0 +1,220 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Result and ResultError implementations.
+//
+// created 01-01-2015
+
+package gojsonschema
+
+import (
+ "fmt"
+ "strings"
+)
+
+type (
+ // ErrorDetails is a map of details specific to each error.
+ // While the values will vary, every error will contain a "field" value
+ ErrorDetails map[string]interface{}
+
+ // ResultError is the interface that library errors must implement
+ ResultError interface {
+ // Field returns the field name without the root context
+ // i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName
+ Field() string
+ // SetType sets the error-type
+ SetType(string)
+ // Type returns the error-type
+ Type() string
+ // SetContext sets the JSON-context for the error
+ SetContext(*JsonContext)
+ // Context returns the JSON-context of the error
+ Context() *JsonContext
+ // SetDescription sets a description for the error
+ SetDescription(string)
+ // Description returns the description of the error
+ Description() string
+ // SetDescriptionFormat sets the format for the description in the default text/template format
+ SetDescriptionFormat(string)
+ // DescriptionFormat returns the format for the description in the default text/template format
+ DescriptionFormat() string
+ // SetValue sets the value related to the error
+ SetValue(interface{})
+ // Value returns the value related to the error
+ Value() interface{}
+ // SetDetails sets the details specific to the error
+ SetDetails(ErrorDetails)
+ // Details returns details about the error
+ Details() ErrorDetails
+ // String returns a string representation of the error
+ String() string
+ }
+
+ // ResultErrorFields holds the fields for each ResultError implementation.
+ // ResultErrorFields implements the ResultError interface, so custom errors
+ // can be defined by just embedding this type
+ ResultErrorFields struct {
+ errorType string // A string with the type of error (i.e. invalid_type)
+ context *JsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ...
+ description string // A human readable error message
+ descriptionFormat string // A format for human readable error message
+ value interface{} // Value given by the JSON file that is the source of the error
+ details ErrorDetails
+ }
+
+ // Result holds the result of a validation
+ Result struct {
+ errors []ResultError
+ // Scores how well the validation matched. Useful in generating
+ // better error messages for anyOf and oneOf.
+ score int
+ }
+)
+
+// Field returns the field name without the root context
+// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName
+func (v *ResultErrorFields) Field() string {
+ return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".")
+}
+
+// SetType sets the error-type
+func (v *ResultErrorFields) SetType(errorType string) {
+ v.errorType = errorType
+}
+
+// Type returns the error-type
+func (v *ResultErrorFields) Type() string {
+ return v.errorType
+}
+
+// SetContext sets the JSON-context for the error
+func (v *ResultErrorFields) SetContext(context *JsonContext) {
+ v.context = context
+}
+
+// Context returns the JSON-context of the error
+func (v *ResultErrorFields) Context() *JsonContext {
+ return v.context
+}
+
+// SetDescription sets a description for the error
+func (v *ResultErrorFields) SetDescription(description string) {
+ v.description = description
+}
+
+// Description returns the description of the error
+func (v *ResultErrorFields) Description() string {
+ return v.description
+}
+
+// SetDescriptionFormat sets the format for the description in the default text/template format
+func (v *ResultErrorFields) SetDescriptionFormat(descriptionFormat string) {
+ v.descriptionFormat = descriptionFormat
+}
+
+// DescriptionFormat returns the format for the description in the default text/template format
+func (v *ResultErrorFields) DescriptionFormat() string {
+ return v.descriptionFormat
+}
+
+// SetValue sets the value related to the error
+func (v *ResultErrorFields) SetValue(value interface{}) {
+ v.value = value
+}
+
+// Value returns the value related to the error
+func (v *ResultErrorFields) Value() interface{} {
+ return v.value
+}
+
+// SetDetails sets the details specific to the error
+func (v *ResultErrorFields) SetDetails(details ErrorDetails) {
+ v.details = details
+}
+
+// Details returns details about the error
+func (v *ResultErrorFields) Details() ErrorDetails {
+ return v.details
+}
+
+// String returns a string representation of the error
+func (v ResultErrorFields) String() string {
+ // as a fallback, the value is displayed go style
+ valueString := fmt.Sprintf("%v", v.value)
+
+ // marshal the go value value to json
+ if v.value == nil {
+ valueString = TYPE_NULL
+ } else {
+ if vs, err := marshalToJSONString(v.value); err == nil {
+ if vs == nil {
+ valueString = TYPE_NULL
+ } else {
+ valueString = *vs
+ }
+ }
+ }
+
+ return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{
+ "context": v.context.String(),
+ "description": v.description,
+ "value": valueString,
+ "field": v.Field(),
+ })
+}
+
+// Valid indicates if no errors were found
+func (v *Result) Valid() bool {
+ return len(v.errors) == 0
+}
+
+// Errors returns the errors that were found
+func (v *Result) Errors() []ResultError {
+ return v.errors
+}
+
+// AddError appends a fully filled error to the error set
+// SetDescription() will be called with the result of the parsed err.DescriptionFormat()
+func (v *Result) AddError(err ResultError, details ErrorDetails) {
+ if _, exists := details["context"]; !exists && err.Context() != nil {
+ details["context"] = err.Context().String()
+ }
+
+ err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details))
+
+ v.errors = append(v.errors, err)
+}
+
+func (v *Result) addInternalError(err ResultError, context *JsonContext, value interface{}, details ErrorDetails) {
+ newError(err, context, value, Locale, details)
+ v.errors = append(v.errors, err)
+ v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function
+}
+
+// Used to copy errors from a sub-schema to the main one
+func (v *Result) mergeErrors(otherResult *Result) {
+ v.errors = append(v.errors, otherResult.Errors()...)
+ v.score += otherResult.score
+}
+
+func (v *Result) incrementScore() {
+ v.score++
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/schema.go b/vendor/github.com/xeipuuv/gojsonschema/schema.go
new file mode 100644
index 0000000..9e93cd7
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/schema.go
@@ -0,0 +1,1087 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Defines Schema, the main entry to every subSchema.
+// Contains the parsing logic and error checking.
+//
+// created 26-02-2013
+
+package gojsonschema
+
+import (
+ "errors"
+ "math/big"
+ "reflect"
+ "regexp"
+ "text/template"
+
+ "github.com/xeipuuv/gojsonreference"
+)
+
+var (
+ // Locale is the default locale to use
+ // Library users can overwrite with their own implementation
+ Locale locale = DefaultLocale{}
+
+ // ErrorTemplateFuncs allows you to define custom template funcs for use in localization.
+ ErrorTemplateFuncs template.FuncMap
+)
+
+// NewSchema instances a schema using the given JSONLoader
+func NewSchema(l JSONLoader) (*Schema, error) {
+ return NewSchemaLoader().Compile(l)
+}
+
+// Schema holds a schema
+type Schema struct {
+ documentReference gojsonreference.JsonReference
+ rootSchema *subSchema
+ pool *schemaPool
+ referencePool *schemaReferencePool
+}
+
+func (d *Schema) parse(document interface{}, draft Draft) error {
+ d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY, draft: &draft}
+ return d.parseSchema(document, d.rootSchema)
+}
+
+// SetRootSchemaName sets the root-schema name
+func (d *Schema) SetRootSchemaName(name string) {
+ d.rootSchema.property = name
+}
+
+// Parses a subSchema
+//
+// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring
+// Not much magic involved here, most of the job is to validate the key names and their values,
+// then the values are copied into subSchema struct
+//
+func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error {
+
+ if currentSchema.draft == nil {
+ if currentSchema.parent == nil {
+ return errors.New("Draft not set")
+ }
+ currentSchema.draft = currentSchema.parent.draft
+ }
+
+ // As of draft 6 "true" is equivalent to an empty schema "{}" and false equals "{"not":{}}"
+ if *currentSchema.draft >= Draft6 && isKind(documentNode, reflect.Bool) {
+ b := documentNode.(bool)
+ currentSchema.pass = &b
+ return nil
+ }
+
+ if !isKind(documentNode, reflect.Map) {
+ return errors.New(formatErrorDescription(
+ Locale.ParseError(),
+ ErrorDetails{
+ "expected": STRING_SCHEMA,
+ },
+ ))
+ }
+
+ m := documentNode.(map[string]interface{})
+
+ if currentSchema.parent == nil {
+ currentSchema.ref = &d.documentReference
+ currentSchema.id = &d.documentReference
+ }
+
+ if currentSchema.id == nil && currentSchema.parent != nil {
+ currentSchema.id = currentSchema.parent.id
+ }
+
+ // In draft 6 the id keyword was renamed to $id
+ // Hybrid mode uses the old id by default
+ var keyID string
+
+ switch *currentSchema.draft {
+ case Draft4:
+ keyID = KEY_ID
+ case Hybrid:
+ keyID = KEY_ID_NEW
+ if existsMapKey(m, KEY_ID) {
+ keyID = KEY_ID
+ }
+ default:
+ keyID = KEY_ID_NEW
+ }
+ if existsMapKey(m, keyID) && !isKind(m[keyID], reflect.String) {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_STRING,
+ "given": keyID,
+ },
+ ))
+ }
+ if k, ok := m[keyID].(string); ok {
+ jsonReference, err := gojsonreference.NewJsonReference(k)
+ if err != nil {
+ return err
+ }
+ if currentSchema == d.rootSchema {
+ currentSchema.id = &jsonReference
+ } else {
+ ref, err := currentSchema.parent.id.Inherits(jsonReference)
+ if err != nil {
+ return err
+ }
+ currentSchema.id = ref
+ }
+ }
+
+ // definitions
+ if existsMapKey(m, KEY_DEFINITIONS) {
+ if isKind(m[KEY_DEFINITIONS], reflect.Map, reflect.Bool) {
+ for _, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) {
+ if isKind(dv, reflect.Map, reflect.Bool) {
+
+ newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema}
+
+ err := d.parseSchema(dv, newSchema)
+
+ if err != nil {
+ return err
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": STRING_ARRAY_OF_SCHEMAS,
+ "given": KEY_DEFINITIONS,
+ },
+ ))
+ }
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": STRING_ARRAY_OF_SCHEMAS,
+ "given": KEY_DEFINITIONS,
+ },
+ ))
+ }
+
+ }
+
+ // title
+ if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_STRING,
+ "given": KEY_TITLE,
+ },
+ ))
+ }
+ if k, ok := m[KEY_TITLE].(string); ok {
+ currentSchema.title = &k
+ }
+
+ // description
+ if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_STRING,
+ "given": KEY_DESCRIPTION,
+ },
+ ))
+ }
+ if k, ok := m[KEY_DESCRIPTION].(string); ok {
+ currentSchema.description = &k
+ }
+
+ // $ref
+ if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_STRING,
+ "given": KEY_REF,
+ },
+ ))
+ }
+
+ if k, ok := m[KEY_REF].(string); ok {
+
+ jsonReference, err := gojsonreference.NewJsonReference(k)
+ if err != nil {
+ return err
+ }
+
+ currentSchema.ref = &jsonReference
+
+ if sch, ok := d.referencePool.Get(currentSchema.ref.String()); ok {
+ currentSchema.refSchema = sch
+ } else {
+ err := d.parseReference(documentNode, currentSchema)
+
+ if err != nil {
+ return err
+ }
+
+ return nil
+ }
+ }
+
+ // type
+ if existsMapKey(m, KEY_TYPE) {
+ if isKind(m[KEY_TYPE], reflect.String) {
+ if k, ok := m[KEY_TYPE].(string); ok {
+ err := currentSchema.types.Add(k)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ if isKind(m[KEY_TYPE], reflect.Slice) {
+ arrayOfTypes := m[KEY_TYPE].([]interface{})
+ for _, typeInArray := range arrayOfTypes {
+ if reflect.ValueOf(typeInArray).Kind() != reflect.String {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS,
+ "given": KEY_TYPE,
+ },
+ ))
+ }
+ if err := currentSchema.types.Add(typeInArray.(string)); err != nil {
+ return err
+ }
+ }
+
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS,
+ "given": KEY_TYPE,
+ },
+ ))
+ }
+ }
+ }
+
+ // properties
+ if existsMapKey(m, KEY_PROPERTIES) {
+ err := d.parseProperties(m[KEY_PROPERTIES], currentSchema)
+ if err != nil {
+ return err
+ }
+ }
+
+ // additionalProperties
+ if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) {
+ if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) {
+ currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool)
+ } else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) {
+ newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.additionalProperties = newSchema
+ err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema)
+ if err != nil {
+ return errors.New(err.Error())
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA,
+ "given": KEY_ADDITIONAL_PROPERTIES,
+ },
+ ))
+ }
+ }
+
+ // patternProperties
+ if existsMapKey(m, KEY_PATTERN_PROPERTIES) {
+ if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) {
+ patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{})
+ if len(patternPropertiesMap) > 0 {
+ currentSchema.patternProperties = make(map[string]*subSchema)
+ for k, v := range patternPropertiesMap {
+ _, err := regexp.MatchString(k, "")
+ if err != nil {
+ return errors.New(formatErrorDescription(
+ Locale.RegexPattern(),
+ ErrorDetails{"pattern": k},
+ ))
+ }
+ newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref}
+ err = d.parseSchema(v, newSchema)
+ if err != nil {
+ return errors.New(err.Error())
+ }
+ currentSchema.patternProperties[k] = newSchema
+ }
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": STRING_SCHEMA,
+ "given": KEY_PATTERN_PROPERTIES,
+ },
+ ))
+ }
+ }
+
+ // propertyNames
+ if existsMapKey(m, KEY_PROPERTY_NAMES) && *currentSchema.draft >= Draft6 {
+ if isKind(m[KEY_PROPERTY_NAMES], reflect.Map, reflect.Bool) {
+ newSchema := &subSchema{property: KEY_PROPERTY_NAMES, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.propertyNames = newSchema
+ err := d.parseSchema(m[KEY_PROPERTY_NAMES], newSchema)
+ if err != nil {
+ return err
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": STRING_SCHEMA,
+ "given": KEY_PATTERN_PROPERTIES,
+ },
+ ))
+ }
+ }
+
+ // dependencies
+ if existsMapKey(m, KEY_DEPENDENCIES) {
+ err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema)
+ if err != nil {
+ return err
+ }
+ }
+
+ // items
+ if existsMapKey(m, KEY_ITEMS) {
+ if isKind(m[KEY_ITEMS], reflect.Slice) {
+ for _, itemElement := range m[KEY_ITEMS].([]interface{}) {
+ if isKind(itemElement, reflect.Map, reflect.Bool) {
+ newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS}
+ newSchema.ref = currentSchema.ref
+ currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema)
+ err := d.parseSchema(itemElement, newSchema)
+ if err != nil {
+ return err
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS,
+ "given": KEY_ITEMS,
+ },
+ ))
+ }
+ currentSchema.itemsChildrenIsSingleSchema = false
+ }
+ } else if isKind(m[KEY_ITEMS], reflect.Map, reflect.Bool) {
+ newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS}
+ newSchema.ref = currentSchema.ref
+ currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema)
+ err := d.parseSchema(m[KEY_ITEMS], newSchema)
+ if err != nil {
+ return err
+ }
+ currentSchema.itemsChildrenIsSingleSchema = true
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS,
+ "given": KEY_ITEMS,
+ },
+ ))
+ }
+ }
+
+ // additionalItems
+ if existsMapKey(m, KEY_ADDITIONAL_ITEMS) {
+ if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) {
+ currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool)
+ } else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) {
+ newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.additionalItems = newSchema
+ err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema)
+ if err != nil {
+ return errors.New(err.Error())
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA,
+ "given": KEY_ADDITIONAL_ITEMS,
+ },
+ ))
+ }
+ }
+
+ // validation : number / integer
+
+ if existsMapKey(m, KEY_MULTIPLE_OF) {
+ multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF])
+ if multipleOfValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": STRING_NUMBER,
+ "given": KEY_MULTIPLE_OF,
+ },
+ ))
+ }
+ if multipleOfValue.Cmp(big.NewRat(0, 1)) <= 0 {
+ return errors.New(formatErrorDescription(
+ Locale.GreaterThanZero(),
+ ErrorDetails{"number": KEY_MULTIPLE_OF},
+ ))
+ }
+ currentSchema.multipleOf = multipleOfValue
+ }
+
+ if existsMapKey(m, KEY_MINIMUM) {
+ minimumValue := mustBeNumber(m[KEY_MINIMUM])
+ if minimumValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfA(),
+ ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER},
+ ))
+ }
+ currentSchema.minimum = minimumValue
+ }
+
+ if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) {
+ switch *currentSchema.draft {
+ case Draft4:
+ if !isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_BOOLEAN,
+ "given": KEY_EXCLUSIVE_MINIMUM,
+ },
+ ))
+ }
+ if currentSchema.minimum == nil {
+ return errors.New(formatErrorDescription(
+ Locale.CannotBeUsedWithout(),
+ ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM},
+ ))
+ }
+ if m[KEY_EXCLUSIVE_MINIMUM].(bool) {
+ currentSchema.exclusiveMinimum = currentSchema.minimum
+ currentSchema.minimum = nil
+ }
+ case Hybrid:
+ if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) {
+ if currentSchema.minimum == nil {
+ return errors.New(formatErrorDescription(
+ Locale.CannotBeUsedWithout(),
+ ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM},
+ ))
+ }
+ if m[KEY_EXCLUSIVE_MINIMUM].(bool) {
+ currentSchema.exclusiveMinimum = currentSchema.minimum
+ currentSchema.minimum = nil
+ }
+ } else if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) {
+ currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM])
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER,
+ "given": KEY_EXCLUSIVE_MINIMUM,
+ },
+ ))
+ }
+ default:
+ if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) {
+ currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM])
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_NUMBER,
+ "given": KEY_EXCLUSIVE_MINIMUM,
+ },
+ ))
+ }
+ }
+ }
+
+ if existsMapKey(m, KEY_MAXIMUM) {
+ maximumValue := mustBeNumber(m[KEY_MAXIMUM])
+ if maximumValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfA(),
+ ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER},
+ ))
+ }
+ currentSchema.maximum = maximumValue
+ }
+
+ if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) {
+ switch *currentSchema.draft {
+ case Draft4:
+ if !isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_BOOLEAN,
+ "given": KEY_EXCLUSIVE_MAXIMUM,
+ },
+ ))
+ }
+ if currentSchema.maximum == nil {
+ return errors.New(formatErrorDescription(
+ Locale.CannotBeUsedWithout(),
+ ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM},
+ ))
+ }
+ if m[KEY_EXCLUSIVE_MAXIMUM].(bool) {
+ currentSchema.exclusiveMaximum = currentSchema.maximum
+ currentSchema.maximum = nil
+ }
+ case Hybrid:
+ if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) {
+ if currentSchema.maximum == nil {
+ return errors.New(formatErrorDescription(
+ Locale.CannotBeUsedWithout(),
+ ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM},
+ ))
+ }
+ if m[KEY_EXCLUSIVE_MAXIMUM].(bool) {
+ currentSchema.exclusiveMaximum = currentSchema.maximum
+ currentSchema.maximum = nil
+ }
+ } else if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) {
+ currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM])
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER,
+ "given": KEY_EXCLUSIVE_MAXIMUM,
+ },
+ ))
+ }
+ default:
+ if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) {
+ currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM])
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_NUMBER,
+ "given": KEY_EXCLUSIVE_MAXIMUM,
+ },
+ ))
+ }
+ }
+ }
+
+ // validation : string
+
+ if existsMapKey(m, KEY_MIN_LENGTH) {
+ minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH])
+ if minLengthIntegerValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER},
+ ))
+ }
+ if *minLengthIntegerValue < 0 {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeGTEZero(),
+ ErrorDetails{"key": KEY_MIN_LENGTH},
+ ))
+ }
+ currentSchema.minLength = minLengthIntegerValue
+ }
+
+ if existsMapKey(m, KEY_MAX_LENGTH) {
+ maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH])
+ if maxLengthIntegerValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER},
+ ))
+ }
+ if *maxLengthIntegerValue < 0 {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeGTEZero(),
+ ErrorDetails{"key": KEY_MAX_LENGTH},
+ ))
+ }
+ currentSchema.maxLength = maxLengthIntegerValue
+ }
+
+ if currentSchema.minLength != nil && currentSchema.maxLength != nil {
+ if *currentSchema.minLength > *currentSchema.maxLength {
+ return errors.New(formatErrorDescription(
+ Locale.CannotBeGT(),
+ ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH},
+ ))
+ }
+ }
+
+ if existsMapKey(m, KEY_PATTERN) {
+ if isKind(m[KEY_PATTERN], reflect.String) {
+ regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string))
+ if err != nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeValidRegex(),
+ ErrorDetails{"key": KEY_PATTERN},
+ ))
+ }
+ currentSchema.pattern = regexpObject
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfA(),
+ ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING},
+ ))
+ }
+ }
+
+ if existsMapKey(m, KEY_FORMAT) {
+ formatString, ok := m[KEY_FORMAT].(string)
+ if !ok {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfType(),
+ ErrorDetails{"key": KEY_FORMAT, "type": TYPE_STRING},
+ ))
+ }
+ currentSchema.format = formatString
+ }
+
+ // validation : object
+
+ if existsMapKey(m, KEY_MIN_PROPERTIES) {
+ minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES])
+ if minPropertiesIntegerValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER},
+ ))
+ }
+ if *minPropertiesIntegerValue < 0 {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeGTEZero(),
+ ErrorDetails{"key": KEY_MIN_PROPERTIES},
+ ))
+ }
+ currentSchema.minProperties = minPropertiesIntegerValue
+ }
+
+ if existsMapKey(m, KEY_MAX_PROPERTIES) {
+ maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES])
+ if maxPropertiesIntegerValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER},
+ ))
+ }
+ if *maxPropertiesIntegerValue < 0 {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeGTEZero(),
+ ErrorDetails{"key": KEY_MAX_PROPERTIES},
+ ))
+ }
+ currentSchema.maxProperties = maxPropertiesIntegerValue
+ }
+
+ if currentSchema.minProperties != nil && currentSchema.maxProperties != nil {
+ if *currentSchema.minProperties > *currentSchema.maxProperties {
+ return errors.New(formatErrorDescription(
+ Locale.KeyCannotBeGreaterThan(),
+ ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES},
+ ))
+ }
+ }
+
+ if existsMapKey(m, KEY_REQUIRED) {
+ if isKind(m[KEY_REQUIRED], reflect.Slice) {
+ requiredValues := m[KEY_REQUIRED].([]interface{})
+ for _, requiredValue := range requiredValues {
+ if isKind(requiredValue, reflect.String) {
+ if isStringInSlice(currentSchema.required, requiredValue.(string)) {
+ return errors.New(formatErrorDescription(
+ Locale.KeyItemsMustBeUnique(),
+ ErrorDetails{"key": KEY_REQUIRED},
+ ))
+ }
+ currentSchema.required = append(currentSchema.required, requiredValue.(string))
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.KeyItemsMustBeOfType(),
+ ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING},
+ ))
+ }
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY},
+ ))
+ }
+ }
+
+ // validation : array
+
+ if existsMapKey(m, KEY_MIN_ITEMS) {
+ minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS])
+ if minItemsIntegerValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER},
+ ))
+ }
+ if *minItemsIntegerValue < 0 {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeGTEZero(),
+ ErrorDetails{"key": KEY_MIN_ITEMS},
+ ))
+ }
+ currentSchema.minItems = minItemsIntegerValue
+ }
+
+ if existsMapKey(m, KEY_MAX_ITEMS) {
+ maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS])
+ if maxItemsIntegerValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER},
+ ))
+ }
+ if *maxItemsIntegerValue < 0 {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeGTEZero(),
+ ErrorDetails{"key": KEY_MAX_ITEMS},
+ ))
+ }
+ currentSchema.maxItems = maxItemsIntegerValue
+ }
+
+ if existsMapKey(m, KEY_UNIQUE_ITEMS) {
+ if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) {
+ currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool)
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfA(),
+ ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN},
+ ))
+ }
+ }
+
+ if existsMapKey(m, KEY_CONTAINS) && *currentSchema.draft >= Draft6 {
+ newSchema := &subSchema{property: KEY_CONTAINS, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.contains = newSchema
+ err := d.parseSchema(m[KEY_CONTAINS], newSchema)
+ if err != nil {
+ return err
+ }
+ }
+
+ // validation : all
+
+ if existsMapKey(m, KEY_CONST) && *currentSchema.draft >= Draft6 {
+ is, err := marshalWithoutNumber(m[KEY_CONST])
+ if err != nil {
+ return err
+ }
+ currentSchema._const = is
+ }
+
+ if existsMapKey(m, KEY_ENUM) {
+ if isKind(m[KEY_ENUM], reflect.Slice) {
+ for _, v := range m[KEY_ENUM].([]interface{}) {
+ is, err := marshalWithoutNumber(v)
+ if err != nil {
+ return err
+ }
+ if isStringInSlice(currentSchema.enum, *is) {
+ return errors.New(formatErrorDescription(
+ Locale.KeyItemsMustBeUnique(),
+ ErrorDetails{"key": KEY_ENUM},
+ ))
+ }
+ currentSchema.enum = append(currentSchema.enum, *is)
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY},
+ ))
+ }
+ }
+
+ // validation : subSchema
+
+ if existsMapKey(m, KEY_ONE_OF) {
+ if isKind(m[KEY_ONE_OF], reflect.Slice) {
+ for _, v := range m[KEY_ONE_OF].([]interface{}) {
+ newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.oneOf = append(currentSchema.oneOf, newSchema)
+ err := d.parseSchema(v, newSchema)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY},
+ ))
+ }
+ }
+
+ if existsMapKey(m, KEY_ANY_OF) {
+ if isKind(m[KEY_ANY_OF], reflect.Slice) {
+ for _, v := range m[KEY_ANY_OF].([]interface{}) {
+ newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.anyOf = append(currentSchema.anyOf, newSchema)
+ err := d.parseSchema(v, newSchema)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY},
+ ))
+ }
+ }
+
+ if existsMapKey(m, KEY_ALL_OF) {
+ if isKind(m[KEY_ALL_OF], reflect.Slice) {
+ for _, v := range m[KEY_ALL_OF].([]interface{}) {
+ newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.allOf = append(currentSchema.allOf, newSchema)
+ err := d.parseSchema(v, newSchema)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY},
+ ))
+ }
+ }
+
+ if existsMapKey(m, KEY_NOT) {
+ if isKind(m[KEY_NOT], reflect.Map, reflect.Bool) {
+ newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.not = newSchema
+ err := d.parseSchema(m[KEY_NOT], newSchema)
+ if err != nil {
+ return err
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT},
+ ))
+ }
+ }
+
+ if *currentSchema.draft >= Draft7 {
+ if existsMapKey(m, KEY_IF) {
+ if isKind(m[KEY_IF], reflect.Map, reflect.Bool) {
+ newSchema := &subSchema{property: KEY_IF, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema._if = newSchema
+ err := d.parseSchema(m[KEY_IF], newSchema)
+ if err != nil {
+ return err
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_IF, "y": TYPE_OBJECT},
+ ))
+ }
+ }
+
+ if existsMapKey(m, KEY_THEN) {
+ if isKind(m[KEY_THEN], reflect.Map, reflect.Bool) {
+ newSchema := &subSchema{property: KEY_THEN, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema._then = newSchema
+ err := d.parseSchema(m[KEY_THEN], newSchema)
+ if err != nil {
+ return err
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_THEN, "y": TYPE_OBJECT},
+ ))
+ }
+ }
+
+ if existsMapKey(m, KEY_ELSE) {
+ if isKind(m[KEY_ELSE], reflect.Map, reflect.Bool) {
+ newSchema := &subSchema{property: KEY_ELSE, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema._else = newSchema
+ err := d.parseSchema(m[KEY_ELSE], newSchema)
+ if err != nil {
+ return err
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_ELSE, "y": TYPE_OBJECT},
+ ))
+ }
+ }
+ }
+
+ return nil
+}
+
+func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema) error {
+ var (
+ refdDocumentNode interface{}
+ dsp *schemaPoolDocument
+ err error
+ )
+
+ newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref}
+
+ d.referencePool.Add(currentSchema.ref.String(), newSchema)
+
+ dsp, err = d.pool.GetDocument(*currentSchema.ref)
+ if err != nil {
+ return err
+ }
+ newSchema.id = currentSchema.ref
+
+ refdDocumentNode = dsp.Document
+ newSchema.draft = dsp.Draft
+
+ if err != nil {
+ return err
+ }
+
+ if !isKind(refdDocumentNode, reflect.Map, reflect.Bool) {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfType(),
+ ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT},
+ ))
+ }
+
+ err = d.parseSchema(refdDocumentNode, newSchema)
+ if err != nil {
+ return err
+ }
+
+ currentSchema.refSchema = newSchema
+
+ return nil
+
+}
+
+func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error {
+
+ if !isKind(documentNode, reflect.Map) {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfType(),
+ ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT},
+ ))
+ }
+
+ m := documentNode.(map[string]interface{})
+ for k := range m {
+ schemaProperty := k
+ newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.propertiesChildren = append(currentSchema.propertiesChildren, newSchema)
+ err := d.parseSchema(m[k], newSchema)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error {
+
+ if !isKind(documentNode, reflect.Map) {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfType(),
+ ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT},
+ ))
+ }
+
+ m := documentNode.(map[string]interface{})
+ currentSchema.dependencies = make(map[string]interface{})
+
+ for k := range m {
+ switch reflect.ValueOf(m[k]).Kind() {
+
+ case reflect.Slice:
+ values := m[k].([]interface{})
+ var valuesToRegister []string
+
+ for _, value := range values {
+ if !isKind(value, reflect.String) {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfType(),
+ ErrorDetails{
+ "key": STRING_DEPENDENCY,
+ "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS,
+ },
+ ))
+ }
+ valuesToRegister = append(valuesToRegister, value.(string))
+ currentSchema.dependencies[k] = valuesToRegister
+ }
+
+ case reflect.Map, reflect.Bool:
+ depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref}
+ err := d.parseSchema(m[k], depSchema)
+ if err != nil {
+ return err
+ }
+ currentSchema.dependencies[k] = depSchema
+
+ default:
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfType(),
+ ErrorDetails{
+ "key": STRING_DEPENDENCY,
+ "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS,
+ },
+ ))
+ }
+
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go b/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go
new file mode 100644
index 0000000..20db0c1
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go
@@ -0,0 +1,206 @@
+// Copyright 2018 johandorland ( https://github.com/johandorland )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gojsonschema
+
+import (
+ "bytes"
+ "errors"
+
+ "github.com/xeipuuv/gojsonreference"
+)
+
+// SchemaLoader is used to load schemas
+type SchemaLoader struct {
+ pool *schemaPool
+ AutoDetect bool
+ Validate bool
+ Draft Draft
+}
+
+// NewSchemaLoader creates a new NewSchemaLoader
+func NewSchemaLoader() *SchemaLoader {
+
+ ps := &SchemaLoader{
+ pool: &schemaPool{
+ schemaPoolDocuments: make(map[string]*schemaPoolDocument),
+ },
+ AutoDetect: true,
+ Validate: false,
+ Draft: Hybrid,
+ }
+ ps.pool.autoDetect = &ps.AutoDetect
+
+ return ps
+}
+
+func (sl *SchemaLoader) validateMetaschema(documentNode interface{}) error {
+
+ var (
+ schema string
+ err error
+ )
+ if sl.AutoDetect {
+ schema, _, err = parseSchemaURL(documentNode)
+ if err != nil {
+ return err
+ }
+ }
+
+ // If no explicit "$schema" is used, use the default metaschema associated with the draft used
+ if schema == "" {
+ if sl.Draft == Hybrid {
+ return nil
+ }
+ schema = drafts.GetSchemaURL(sl.Draft)
+ }
+
+ //Disable validation when loading the metaschema to prevent an infinite recursive loop
+ sl.Validate = false
+
+ metaSchema, err := sl.Compile(NewReferenceLoader(schema))
+
+ if err != nil {
+ return err
+ }
+
+ sl.Validate = true
+
+ result := metaSchema.validateDocument(documentNode)
+
+ if !result.Valid() {
+ var res bytes.Buffer
+ for _, err := range result.Errors() {
+ res.WriteString(err.String())
+ res.WriteString("\n")
+ }
+ return errors.New(res.String())
+ }
+
+ return nil
+}
+
+// AddSchemas adds an arbritrary amount of schemas to the schema cache. As this function does not require
+// an explicit URL, every schema should contain an $id, so that it can be referenced by the main schema
+func (sl *SchemaLoader) AddSchemas(loaders ...JSONLoader) error {
+ emptyRef, _ := gojsonreference.NewJsonReference("")
+
+ for _, loader := range loaders {
+ doc, err := loader.LoadJSON()
+
+ if err != nil {
+ return err
+ }
+
+ if sl.Validate {
+ if err := sl.validateMetaschema(doc); err != nil {
+ return err
+ }
+ }
+
+ // Directly use the Recursive function, so that it get only added to the schema pool by $id
+ // and not by the ref of the document as it's empty
+ if err = sl.pool.parseReferences(doc, emptyRef, false); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//AddSchema adds a schema under the provided URL to the schema cache
+func (sl *SchemaLoader) AddSchema(url string, loader JSONLoader) error {
+
+ ref, err := gojsonreference.NewJsonReference(url)
+
+ if err != nil {
+ return err
+ }
+
+ doc, err := loader.LoadJSON()
+
+ if err != nil {
+ return err
+ }
+
+ if sl.Validate {
+ if err := sl.validateMetaschema(doc); err != nil {
+ return err
+ }
+ }
+
+ return sl.pool.parseReferences(doc, ref, true)
+}
+
+// Compile loads and compiles a schema
+func (sl *SchemaLoader) Compile(rootSchema JSONLoader) (*Schema, error) {
+
+ ref, err := rootSchema.JsonReference()
+
+ if err != nil {
+ return nil, err
+ }
+
+ d := Schema{}
+ d.pool = sl.pool
+ d.pool.jsonLoaderFactory = rootSchema.LoaderFactory()
+ d.documentReference = ref
+ d.referencePool = newSchemaReferencePool()
+
+ var doc interface{}
+ if ref.String() != "" {
+ // Get document from schema pool
+ spd, err := d.pool.GetDocument(d.documentReference)
+ if err != nil {
+ return nil, err
+ }
+ doc = spd.Document
+ } else {
+ // Load JSON directly
+ doc, err = rootSchema.LoadJSON()
+ if err != nil {
+ return nil, err
+ }
+ // References need only be parsed if loading JSON directly
+ // as pool.GetDocument already does this for us if loading by reference
+ err = sl.pool.parseReferences(doc, ref, true)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if sl.Validate {
+ if err := sl.validateMetaschema(doc); err != nil {
+ return nil, err
+ }
+ }
+
+ draft := sl.Draft
+ if sl.AutoDetect {
+ _, detectedDraft, err := parseSchemaURL(doc)
+ if err != nil {
+ return nil, err
+ }
+ if detectedDraft != nil {
+ draft = *detectedDraft
+ }
+ }
+
+ err = d.parse(doc, draft)
+ if err != nil {
+ return nil, err
+ }
+
+ return &d, nil
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go
new file mode 100644
index 0000000..35b1cc6
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go
@@ -0,0 +1,215 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Defines resources pooling.
+// Eases referencing and avoids downloading the same resource twice.
+//
+// created 26-02-2013
+
+package gojsonschema
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/xeipuuv/gojsonreference"
+)
+
+type schemaPoolDocument struct {
+ Document interface{}
+ Draft *Draft
+}
+
+type schemaPool struct {
+ schemaPoolDocuments map[string]*schemaPoolDocument
+ jsonLoaderFactory JSONLoaderFactory
+ autoDetect *bool
+}
+
+func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.JsonReference, pooled bool) error {
+
+ var (
+ draft *Draft
+ err error
+ reference = ref.String()
+ )
+ // Only the root document should be added to the schema pool if pooled is true
+ if _, ok := p.schemaPoolDocuments[reference]; pooled && ok {
+ return fmt.Errorf("Reference already exists: \"%s\"", reference)
+ }
+
+ if *p.autoDetect {
+ _, draft, err = parseSchemaURL(document)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = p.parseReferencesRecursive(document, ref, draft)
+
+ if pooled {
+ p.schemaPoolDocuments[reference] = &schemaPoolDocument{Document: document, Draft: draft}
+ }
+
+ return err
+}
+
+func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonreference.JsonReference, draft *Draft) error {
+ // parseReferencesRecursive parses a JSON document and resolves all $id and $ref references.
+ // For $ref references it takes into account the $id scope it is in and replaces
+ // the reference by the absolute resolved reference
+
+ // When encountering errors it fails silently. Error handling is done when the schema
+ // is syntactically parsed and any error encountered here should also come up there.
+ switch m := document.(type) {
+ case []interface{}:
+ for _, v := range m {
+ p.parseReferencesRecursive(v, ref, draft)
+ }
+ case map[string]interface{}:
+ localRef := &ref
+
+ keyID := KEY_ID_NEW
+ if existsMapKey(m, KEY_ID) {
+ keyID = KEY_ID
+ }
+ if existsMapKey(m, keyID) && isKind(m[keyID], reflect.String) {
+ jsonReference, err := gojsonreference.NewJsonReference(m[keyID].(string))
+ if err == nil {
+ localRef, err = ref.Inherits(jsonReference)
+ if err == nil {
+ if _, ok := p.schemaPoolDocuments[localRef.String()]; ok {
+ return fmt.Errorf("Reference already exists: \"%s\"", localRef.String())
+ }
+ p.schemaPoolDocuments[localRef.String()] = &schemaPoolDocument{Document: document, Draft: draft}
+ }
+ }
+ }
+
+ if existsMapKey(m, KEY_REF) && isKind(m[KEY_REF], reflect.String) {
+ jsonReference, err := gojsonreference.NewJsonReference(m[KEY_REF].(string))
+ if err == nil {
+ absoluteRef, err := localRef.Inherits(jsonReference)
+ if err == nil {
+ m[KEY_REF] = absoluteRef.String()
+ }
+ }
+ }
+
+ for k, v := range m {
+ // const and enums should be interpreted literally, so ignore them
+ if k == KEY_CONST || k == KEY_ENUM {
+ continue
+ }
+ // Something like a property or a dependency is not a valid schema, as it might describe properties named "$ref", "$id" or "const", etc
+ // Therefore don't treat it like a schema.
+ if k == KEY_PROPERTIES || k == KEY_DEPENDENCIES || k == KEY_PATTERN_PROPERTIES {
+ if child, ok := v.(map[string]interface{}); ok {
+ for _, v := range child {
+ p.parseReferencesRecursive(v, *localRef, draft)
+ }
+ }
+ } else {
+ p.parseReferencesRecursive(v, *localRef, draft)
+ }
+ }
+ }
+ return nil
+}
+
+func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) {
+
+ var (
+ spd *schemaPoolDocument
+ draft *Draft
+ ok bool
+ err error
+ )
+
+ if internalLogEnabled {
+ internalLog("Get Document ( %s )", reference.String())
+ }
+
+ // Create a deep copy, so we can remove the fragment part later on without altering the original
+ refToURL, _ := gojsonreference.NewJsonReference(reference.String())
+
+ // First check if the given fragment is a location independent identifier
+ // http://json-schema.org/latest/json-schema-core.html#rfc.section.8.2.3
+
+ if spd, ok = p.schemaPoolDocuments[refToURL.String()]; ok {
+ if internalLogEnabled {
+ internalLog(" From pool")
+ }
+ return spd, nil
+ }
+
+ // If the given reference is not a location independent identifier,
+ // strip the fragment and look for a document with it's base URI
+
+ refToURL.GetUrl().Fragment = ""
+
+ if cachedSpd, ok := p.schemaPoolDocuments[refToURL.String()]; ok {
+ document, _, err := reference.GetPointer().Get(cachedSpd.Document)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if internalLogEnabled {
+ internalLog(" From pool")
+ }
+
+ spd = &schemaPoolDocument{Document: document, Draft: cachedSpd.Draft}
+ p.schemaPoolDocuments[reference.String()] = spd
+
+ return spd, nil
+ }
+
+ // It is not possible to load anything remotely that is not canonical...
+ if !reference.IsCanonical() {
+ return nil, errors.New(formatErrorDescription(
+ Locale.ReferenceMustBeCanonical(),
+ ErrorDetails{"reference": reference.String()},
+ ))
+ }
+
+ jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String())
+ document, err := jsonReferenceLoader.LoadJSON()
+
+ if err != nil {
+ return nil, err
+ }
+
+ // add the whole document to the pool for potential re-use
+ p.parseReferences(document, refToURL, true)
+
+ _, draft, _ = parseSchemaURL(document)
+
+ // resolve the potential fragment and also cache it
+ document, _, err = reference.GetPointer().Get(document)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return &schemaPoolDocument{Document: document, Draft: draft}, nil
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go
new file mode 100644
index 0000000..6e5e1b5
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go
@@ -0,0 +1,68 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Pool of referenced schemas.
+//
+// created 25-06-2013
+
+package gojsonschema
+
+import (
+ "fmt"
+)
+
+type schemaReferencePool struct {
+ documents map[string]*subSchema
+}
+
+func newSchemaReferencePool() *schemaReferencePool {
+
+ p := &schemaReferencePool{}
+ p.documents = make(map[string]*subSchema)
+
+ return p
+}
+
+func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) {
+
+ if internalLogEnabled {
+ internalLog(fmt.Sprintf("Schema Reference ( %s )", ref))
+ }
+
+ if sch, ok := p.documents[ref]; ok {
+ if internalLogEnabled {
+ internalLog(fmt.Sprintf(" From pool"))
+ }
+ return sch, true
+ }
+
+ return nil, false
+}
+
+func (p *schemaReferencePool) Add(ref string, sch *subSchema) {
+
+ if internalLogEnabled {
+ internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref))
+ }
+ if _, ok := p.documents[ref]; !ok {
+ p.documents[ref] = sch
+ }
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaType.go b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go
new file mode 100644
index 0000000..36b447a
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go
@@ -0,0 +1,83 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Helper structure to handle schema types, and the combination of them.
+//
+// created 28-02-2013
+
+package gojsonschema
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+type jsonSchemaType struct {
+ types []string
+}
+
+// Is the schema typed ? that is containing at least one type
+// When not typed, the schema does not need any type validation
+func (t *jsonSchemaType) IsTyped() bool {
+ return len(t.types) > 0
+}
+
+func (t *jsonSchemaType) Add(etype string) error {
+
+ if !isStringInSlice(JSON_TYPES, etype) {
+ return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"given": "/" + etype + "/", "expected": JSON_TYPES}))
+ }
+
+ if t.Contains(etype) {
+ return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype}))
+ }
+
+ t.types = append(t.types, etype)
+
+ return nil
+}
+
+func (t *jsonSchemaType) Contains(etype string) bool {
+
+ for _, v := range t.types {
+ if v == etype {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (t *jsonSchemaType) String() string {
+
+ if len(t.types) == 0 {
+ return STRING_UNDEFINED // should never happen
+ }
+
+ // Displayed as a list [type1,type2,...]
+ if len(t.types) > 1 {
+ return fmt.Sprintf("[%s]", strings.Join(t.types, ","))
+ }
+
+ // Only one type: name only
+ return t.types[0]
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/subSchema.go b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go
new file mode 100644
index 0000000..ec77981
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go
@@ -0,0 +1,149 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Defines the structure of a sub-subSchema.
+// A sub-subSchema can contain other sub-schemas.
+//
+// created 27-02-2013
+
+package gojsonschema
+
+import (
+ "github.com/xeipuuv/gojsonreference"
+ "math/big"
+ "regexp"
+)
+
+// Constants
+const (
+ KEY_SCHEMA = "$schema"
+ KEY_ID = "id"
+ KEY_ID_NEW = "$id"
+ KEY_REF = "$ref"
+ KEY_TITLE = "title"
+ KEY_DESCRIPTION = "description"
+ KEY_TYPE = "type"
+ KEY_ITEMS = "items"
+ KEY_ADDITIONAL_ITEMS = "additionalItems"
+ KEY_PROPERTIES = "properties"
+ KEY_PATTERN_PROPERTIES = "patternProperties"
+ KEY_ADDITIONAL_PROPERTIES = "additionalProperties"
+ KEY_PROPERTY_NAMES = "propertyNames"
+ KEY_DEFINITIONS = "definitions"
+ KEY_MULTIPLE_OF = "multipleOf"
+ KEY_MINIMUM = "minimum"
+ KEY_MAXIMUM = "maximum"
+ KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum"
+ KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum"
+ KEY_MIN_LENGTH = "minLength"
+ KEY_MAX_LENGTH = "maxLength"
+ KEY_PATTERN = "pattern"
+ KEY_FORMAT = "format"
+ KEY_MIN_PROPERTIES = "minProperties"
+ KEY_MAX_PROPERTIES = "maxProperties"
+ KEY_DEPENDENCIES = "dependencies"
+ KEY_REQUIRED = "required"
+ KEY_MIN_ITEMS = "minItems"
+ KEY_MAX_ITEMS = "maxItems"
+ KEY_UNIQUE_ITEMS = "uniqueItems"
+ KEY_CONTAINS = "contains"
+ KEY_CONST = "const"
+ KEY_ENUM = "enum"
+ KEY_ONE_OF = "oneOf"
+ KEY_ANY_OF = "anyOf"
+ KEY_ALL_OF = "allOf"
+ KEY_NOT = "not"
+ KEY_IF = "if"
+ KEY_THEN = "then"
+ KEY_ELSE = "else"
+)
+
+type subSchema struct {
+ draft *Draft
+
+ // basic subSchema meta properties
+ id *gojsonreference.JsonReference
+ title *string
+ description *string
+
+ property string
+
+ // Quick pass/fail for boolean schemas
+ pass *bool
+
+ // Types associated with the subSchema
+ types jsonSchemaType
+
+ // Reference url
+ ref *gojsonreference.JsonReference
+ // Schema referenced
+ refSchema *subSchema
+
+ // hierarchy
+ parent *subSchema
+ itemsChildren []*subSchema
+ itemsChildrenIsSingleSchema bool
+ propertiesChildren []*subSchema
+
+ // validation : number / integer
+ multipleOf *big.Rat
+ maximum *big.Rat
+ exclusiveMaximum *big.Rat
+ minimum *big.Rat
+ exclusiveMinimum *big.Rat
+
+ // validation : string
+ minLength *int
+ maxLength *int
+ pattern *regexp.Regexp
+ format string
+
+ // validation : object
+ minProperties *int
+ maxProperties *int
+ required []string
+
+ dependencies map[string]interface{}
+ additionalProperties interface{}
+ patternProperties map[string]*subSchema
+ propertyNames *subSchema
+
+ // validation : array
+ minItems *int
+ maxItems *int
+ uniqueItems bool
+ contains *subSchema
+
+ additionalItems interface{}
+
+ // validation : all
+ _const *string //const is a golang keyword
+ enum []string
+
+ // validation : subSchema
+ oneOf []*subSchema
+ anyOf []*subSchema
+ allOf []*subSchema
+ not *subSchema
+ _if *subSchema // if/else are golang keywords
+ _then *subSchema
+ _else *subSchema
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/types.go b/vendor/github.com/xeipuuv/gojsonschema/types.go
new file mode 100644
index 0000000..0e6fd51
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/types.go
@@ -0,0 +1,62 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Contains const types for schema and JSON.
+//
+// created 28-02-2013
+
+package gojsonschema
+
+// Type constants
+const (
+ TYPE_ARRAY = `array`
+ TYPE_BOOLEAN = `boolean`
+ TYPE_INTEGER = `integer`
+ TYPE_NUMBER = `number`
+ TYPE_NULL = `null`
+ TYPE_OBJECT = `object`
+ TYPE_STRING = `string`
+)
+
+// JSON_TYPES hosts the list of type that are supported in JSON
+var JSON_TYPES []string
+
+// SCHEMA_TYPES hosts the list of type that are supported in schemas
+var SCHEMA_TYPES []string
+
+func init() {
+ JSON_TYPES = []string{
+ TYPE_ARRAY,
+ TYPE_BOOLEAN,
+ TYPE_INTEGER,
+ TYPE_NUMBER,
+ TYPE_NULL,
+ TYPE_OBJECT,
+ TYPE_STRING}
+
+ SCHEMA_TYPES = []string{
+ TYPE_ARRAY,
+ TYPE_BOOLEAN,
+ TYPE_INTEGER,
+ TYPE_NUMBER,
+ TYPE_OBJECT,
+ TYPE_STRING}
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/utils.go b/vendor/github.com/xeipuuv/gojsonschema/utils.go
new file mode 100644
index 0000000..a17d22e
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/utils.go
@@ -0,0 +1,197 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Various utility functions.
+//
+// created 26-02-2013
+
+package gojsonschema
+
+import (
+ "encoding/json"
+ "math/big"
+ "reflect"
+)
+
+func isKind(what interface{}, kinds ...reflect.Kind) bool {
+ target := what
+ if isJSONNumber(what) {
+ // JSON Numbers are strings!
+ target = *mustBeNumber(what)
+ }
+ targetKind := reflect.ValueOf(target).Kind()
+ for _, kind := range kinds {
+ if targetKind == kind {
+ return true
+ }
+ }
+ return false
+}
+
+func existsMapKey(m map[string]interface{}, k string) bool {
+ _, ok := m[k]
+ return ok
+}
+
+func isStringInSlice(s []string, what string) bool {
+ for i := range s {
+ if s[i] == what {
+ return true
+ }
+ }
+ return false
+}
+
+// indexStringInSlice returns the index of the first instance of 'what' in s or -1 if it is not found in s.
+func indexStringInSlice(s []string, what string) int {
+ for i := range s {
+ if s[i] == what {
+ return i
+ }
+ }
+ return -1
+}
+
+func marshalToJSONString(value interface{}) (*string, error) {
+
+ mBytes, err := json.Marshal(value)
+ if err != nil {
+ return nil, err
+ }
+
+ sBytes := string(mBytes)
+ return &sBytes, nil
+}
+
+func marshalWithoutNumber(value interface{}) (*string, error) {
+
+ // The JSON is decoded using https://golang.org/pkg/encoding/json/#Decoder.UseNumber
+ // This means the numbers are internally still represented as strings and therefore 1.00 is unequal to 1
+ // One way to eliminate these differences is to decode and encode the JSON one more time without Decoder.UseNumber
+ // so that these differences in representation are removed
+
+ jsonString, err := marshalToJSONString(value)
+ if err != nil {
+ return nil, err
+ }
+
+ var document interface{}
+
+ err = json.Unmarshal([]byte(*jsonString), &document)
+ if err != nil {
+ return nil, err
+ }
+
+ return marshalToJSONString(document)
+}
+
+func isJSONNumber(what interface{}) bool {
+
+ switch what.(type) {
+
+ case json.Number:
+ return true
+ }
+
+ return false
+}
+
+func checkJSONInteger(what interface{}) (isInt bool) {
+
+ jsonNumber := what.(json.Number)
+
+ bigFloat, isValidNumber := new(big.Rat).SetString(string(jsonNumber))
+
+ return isValidNumber && bigFloat.IsInt()
+
+}
+
+// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER
+const (
+ maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1
+ minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1
+)
+
+func mustBeInteger(what interface{}) *int {
+
+ if isJSONNumber(what) {
+
+ number := what.(json.Number)
+
+ isInt := checkJSONInteger(number)
+
+ if isInt {
+
+ int64Value, err := number.Int64()
+ if err != nil {
+ return nil
+ }
+
+ int32Value := int(int64Value)
+ return &int32Value
+ }
+
+ }
+
+ return nil
+}
+
+func mustBeNumber(what interface{}) *big.Rat {
+
+ if isJSONNumber(what) {
+ number := what.(json.Number)
+ float64Value, success := new(big.Rat).SetString(string(number))
+ if success {
+ return float64Value
+ }
+ }
+
+ return nil
+
+}
+
+func convertDocumentNode(val interface{}) interface{} {
+
+ if lval, ok := val.([]interface{}); ok {
+
+ res := []interface{}{}
+ for _, v := range lval {
+ res = append(res, convertDocumentNode(v))
+ }
+
+ return res
+
+ }
+
+ if mval, ok := val.(map[interface{}]interface{}); ok {
+
+ res := map[string]interface{}{}
+
+ for k, v := range mval {
+ res[k.(string)] = convertDocumentNode(v)
+ }
+
+ return res
+
+ }
+
+ return val
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/validation.go b/vendor/github.com/xeipuuv/gojsonschema/validation.go
new file mode 100644
index 0000000..74091bc
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/validation.go
@@ -0,0 +1,858 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Extends Schema and subSchema, implements the validation phase.
+//
+// created 28-02-2013
+
+package gojsonschema
+
+import (
+ "encoding/json"
+ "math/big"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Validate loads and validates a JSON schema
+func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) {
+ // load schema
+ schema, err := NewSchema(ls)
+ if err != nil {
+ return nil, err
+ }
+ return schema.Validate(ld)
+}
+
+// Validate loads and validates a JSON document
+func (v *Schema) Validate(l JSONLoader) (*Result, error) {
+ root, err := l.LoadJSON()
+ if err != nil {
+ return nil, err
+ }
+ return v.validateDocument(root), nil
+}
+
+func (v *Schema) validateDocument(root interface{}) *Result {
+ result := &Result{}
+ context := NewJsonContext(STRING_CONTEXT_ROOT, nil)
+ v.rootSchema.validateRecursive(v.rootSchema, root, result, context)
+ return result
+}
+
+func (v *subSchema) subValidateWithContext(document interface{}, context *JsonContext) *Result {
+ result := &Result{}
+ v.validateRecursive(v, document, result, context)
+ return result
+}
+
+// Walker function to validate the json recursively against the subSchema
+func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) {
+
+ if internalLogEnabled {
+ internalLog("validateRecursive %s", context.String())
+ internalLog(" %v", currentNode)
+ }
+
+ // Handle true/false schema as early as possible as all other fields will be nil
+ if currentSubSchema.pass != nil {
+ if !*currentSubSchema.pass {
+ result.addInternalError(
+ new(FalseError),
+ context,
+ currentNode,
+ ErrorDetails{},
+ )
+ }
+ return
+ }
+
+ // Handle referenced schemas, returns directly when a $ref is found
+ if currentSubSchema.refSchema != nil {
+ v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context)
+ return
+ }
+
+ // Check for null value
+ if currentNode == nil {
+ if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) {
+ result.addInternalError(
+ new(InvalidTypeError),
+ context,
+ currentNode,
+ ErrorDetails{
+ "expected": currentSubSchema.types.String(),
+ "given": TYPE_NULL,
+ },
+ )
+ return
+ }
+
+ currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context)
+ v.validateCommon(currentSubSchema, currentNode, result, context)
+
+ } else { // Not a null value
+
+ if isJSONNumber(currentNode) {
+
+ value := currentNode.(json.Number)
+
+ isInt := checkJSONInteger(value)
+
+ validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isInt && currentSubSchema.types.Contains(TYPE_INTEGER))
+
+ if currentSubSchema.types.IsTyped() && !validType {
+
+ givenType := TYPE_INTEGER
+ if !isInt {
+ givenType = TYPE_NUMBER
+ }
+
+ result.addInternalError(
+ new(InvalidTypeError),
+ context,
+ currentNode,
+ ErrorDetails{
+ "expected": currentSubSchema.types.String(),
+ "given": givenType,
+ },
+ )
+ return
+ }
+
+ currentSubSchema.validateSchema(currentSubSchema, value, result, context)
+ v.validateNumber(currentSubSchema, value, result, context)
+ v.validateCommon(currentSubSchema, value, result, context)
+ v.validateString(currentSubSchema, value, result, context)
+
+ } else {
+
+ rValue := reflect.ValueOf(currentNode)
+ rKind := rValue.Kind()
+
+ switch rKind {
+
+ // Slice => JSON array
+
+ case reflect.Slice:
+
+ if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) {
+ result.addInternalError(
+ new(InvalidTypeError),
+ context,
+ currentNode,
+ ErrorDetails{
+ "expected": currentSubSchema.types.String(),
+ "given": TYPE_ARRAY,
+ },
+ )
+ return
+ }
+
+ castCurrentNode := currentNode.([]interface{})
+
+ currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context)
+
+ v.validateArray(currentSubSchema, castCurrentNode, result, context)
+ v.validateCommon(currentSubSchema, castCurrentNode, result, context)
+
+ // Map => JSON object
+
+ case reflect.Map:
+ if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) {
+ result.addInternalError(
+ new(InvalidTypeError),
+ context,
+ currentNode,
+ ErrorDetails{
+ "expected": currentSubSchema.types.String(),
+ "given": TYPE_OBJECT,
+ },
+ )
+ return
+ }
+
+ castCurrentNode, ok := currentNode.(map[string]interface{})
+ if !ok {
+ castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{})
+ }
+
+ currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context)
+
+ v.validateObject(currentSubSchema, castCurrentNode, result, context)
+ v.validateCommon(currentSubSchema, castCurrentNode, result, context)
+
+ for _, pSchema := range currentSubSchema.propertiesChildren {
+ nextNode, ok := castCurrentNode[pSchema.property]
+ if ok {
+ subContext := NewJsonContext(pSchema.property, context)
+ v.validateRecursive(pSchema, nextNode, result, subContext)
+ }
+ }
+
+ // Simple JSON values : string, number, boolean
+
+ case reflect.Bool:
+
+ if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) {
+ result.addInternalError(
+ new(InvalidTypeError),
+ context,
+ currentNode,
+ ErrorDetails{
+ "expected": currentSubSchema.types.String(),
+ "given": TYPE_BOOLEAN,
+ },
+ )
+ return
+ }
+
+ value := currentNode.(bool)
+
+ currentSubSchema.validateSchema(currentSubSchema, value, result, context)
+ v.validateNumber(currentSubSchema, value, result, context)
+ v.validateCommon(currentSubSchema, value, result, context)
+ v.validateString(currentSubSchema, value, result, context)
+
+ case reflect.String:
+
+ if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) {
+ result.addInternalError(
+ new(InvalidTypeError),
+ context,
+ currentNode,
+ ErrorDetails{
+ "expected": currentSubSchema.types.String(),
+ "given": TYPE_STRING,
+ },
+ )
+ return
+ }
+
+ value := currentNode.(string)
+
+ currentSubSchema.validateSchema(currentSubSchema, value, result, context)
+ v.validateNumber(currentSubSchema, value, result, context)
+ v.validateCommon(currentSubSchema, value, result, context)
+ v.validateString(currentSubSchema, value, result, context)
+
+ }
+
+ }
+
+ }
+
+ result.incrementScore()
+}
+
+// Different kinds of validation there, subSchema / common / array / object / string...
+func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) {
+
+ if internalLogEnabled {
+ internalLog("validateSchema %s", context.String())
+ internalLog(" %v", currentNode)
+ }
+
+ if len(currentSubSchema.anyOf) > 0 {
+
+ validatedAnyOf := false
+ var bestValidationResult *Result
+
+ for _, anyOfSchema := range currentSubSchema.anyOf {
+ if !validatedAnyOf {
+ validationResult := anyOfSchema.subValidateWithContext(currentNode, context)
+ validatedAnyOf = validationResult.Valid()
+
+ if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) {
+ bestValidationResult = validationResult
+ }
+ }
+ }
+ if !validatedAnyOf {
+
+ result.addInternalError(new(NumberAnyOfError), context, currentNode, ErrorDetails{})
+
+ if bestValidationResult != nil {
+ // add error messages of closest matching subSchema as
+ // that's probably the one the user was trying to match
+ result.mergeErrors(bestValidationResult)
+ }
+ }
+ }
+
+ if len(currentSubSchema.oneOf) > 0 {
+
+ nbValidated := 0
+ var bestValidationResult *Result
+
+ for _, oneOfSchema := range currentSubSchema.oneOf {
+ validationResult := oneOfSchema.subValidateWithContext(currentNode, context)
+ if validationResult.Valid() {
+ nbValidated++
+ } else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) {
+ bestValidationResult = validationResult
+ }
+ }
+
+ if nbValidated != 1 {
+
+ result.addInternalError(new(NumberOneOfError), context, currentNode, ErrorDetails{})
+
+ if nbValidated == 0 {
+ // add error messages of closest matching subSchema as
+ // that's probably the one the user was trying to match
+ result.mergeErrors(bestValidationResult)
+ }
+ }
+
+ }
+
+ if len(currentSubSchema.allOf) > 0 {
+ nbValidated := 0
+
+ for _, allOfSchema := range currentSubSchema.allOf {
+ validationResult := allOfSchema.subValidateWithContext(currentNode, context)
+ if validationResult.Valid() {
+ nbValidated++
+ }
+ result.mergeErrors(validationResult)
+ }
+
+ if nbValidated != len(currentSubSchema.allOf) {
+ result.addInternalError(new(NumberAllOfError), context, currentNode, ErrorDetails{})
+ }
+ }
+
+ if currentSubSchema.not != nil {
+ validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context)
+ if validationResult.Valid() {
+ result.addInternalError(new(NumberNotError), context, currentNode, ErrorDetails{})
+ }
+ }
+
+ if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 {
+ if isKind(currentNode, reflect.Map) {
+ for elementKey := range currentNode.(map[string]interface{}) {
+ if dependency, ok := currentSubSchema.dependencies[elementKey]; ok {
+ switch dependency := dependency.(type) {
+
+ case []string:
+ for _, dependOnKey := range dependency {
+ if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved {
+ result.addInternalError(
+ new(MissingDependencyError),
+ context,
+ currentNode,
+ ErrorDetails{"dependency": dependOnKey},
+ )
+ }
+ }
+
+ case *subSchema:
+ dependency.validateRecursive(dependency, currentNode, result, context)
+ }
+ }
+ }
+ }
+ }
+
+ if currentSubSchema._if != nil {
+ validationResultIf := currentSubSchema._if.subValidateWithContext(currentNode, context)
+ if currentSubSchema._then != nil && validationResultIf.Valid() {
+ validationResultThen := currentSubSchema._then.subValidateWithContext(currentNode, context)
+ if !validationResultThen.Valid() {
+ result.addInternalError(new(ConditionThenError), context, currentNode, ErrorDetails{})
+ result.mergeErrors(validationResultThen)
+ }
+ }
+ if currentSubSchema._else != nil && !validationResultIf.Valid() {
+ validationResultElse := currentSubSchema._else.subValidateWithContext(currentNode, context)
+ if !validationResultElse.Valid() {
+ result.addInternalError(new(ConditionElseError), context, currentNode, ErrorDetails{})
+ result.mergeErrors(validationResultElse)
+ }
+ }
+ }
+
+ result.incrementScore()
+}
+
+func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) {
+
+ if internalLogEnabled {
+ internalLog("validateCommon %s", context.String())
+ internalLog(" %v", value)
+ }
+
+ // const:
+ if currentSubSchema._const != nil {
+ vString, err := marshalWithoutNumber(value)
+ if err != nil {
+ result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err})
+ }
+ if *vString != *currentSubSchema._const {
+ result.addInternalError(new(ConstError),
+ context,
+ value,
+ ErrorDetails{
+ "allowed": *currentSubSchema._const,
+ },
+ )
+ }
+ }
+
+ // enum:
+ if len(currentSubSchema.enum) > 0 {
+ vString, err := marshalWithoutNumber(value)
+ if err != nil {
+ result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err})
+ }
+ if !isStringInSlice(currentSubSchema.enum, *vString) {
+ result.addInternalError(
+ new(EnumError),
+ context,
+ value,
+ ErrorDetails{
+ "allowed": strings.Join(currentSubSchema.enum, ", "),
+ },
+ )
+ }
+ }
+
+ result.incrementScore()
+}
+
+func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *JsonContext) {
+
+ if internalLogEnabled {
+ internalLog("validateArray %s", context.String())
+ internalLog(" %v", value)
+ }
+
+ nbValues := len(value)
+
+ // TODO explain
+ if currentSubSchema.itemsChildrenIsSingleSchema {
+ for i := range value {
+ subContext := NewJsonContext(strconv.Itoa(i), context)
+ validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext)
+ result.mergeErrors(validationResult)
+ }
+ } else {
+ if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 {
+
+ nbItems := len(currentSubSchema.itemsChildren)
+
+ // while we have both schemas and values, check them against each other
+ for i := 0; i != nbItems && i != nbValues; i++ {
+ subContext := NewJsonContext(strconv.Itoa(i), context)
+ validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext)
+ result.mergeErrors(validationResult)
+ }
+
+ if nbItems < nbValues {
+ // we have less schemas than elements in the instance array,
+ // but that might be ok if "additionalItems" is specified.
+
+ switch currentSubSchema.additionalItems.(type) {
+ case bool:
+ if !currentSubSchema.additionalItems.(bool) {
+ result.addInternalError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{})
+ }
+ case *subSchema:
+ additionalItemSchema := currentSubSchema.additionalItems.(*subSchema)
+ for i := nbItems; i != nbValues; i++ {
+ subContext := NewJsonContext(strconv.Itoa(i), context)
+ validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext)
+ result.mergeErrors(validationResult)
+ }
+ }
+ }
+ }
+ }
+
+ // minItems & maxItems
+ if currentSubSchema.minItems != nil {
+ if nbValues < int(*currentSubSchema.minItems) {
+ result.addInternalError(
+ new(ArrayMinItemsError),
+ context,
+ value,
+ ErrorDetails{"min": *currentSubSchema.minItems},
+ )
+ }
+ }
+ if currentSubSchema.maxItems != nil {
+ if nbValues > int(*currentSubSchema.maxItems) {
+ result.addInternalError(
+ new(ArrayMaxItemsError),
+ context,
+ value,
+ ErrorDetails{"max": *currentSubSchema.maxItems},
+ )
+ }
+ }
+
+ // uniqueItems:
+ if currentSubSchema.uniqueItems {
+ var stringifiedItems = make(map[string]int)
+ for j, v := range value {
+ vString, err := marshalWithoutNumber(v)
+ if err != nil {
+ result.addInternalError(new(InternalError), context, value, ErrorDetails{"err": err})
+ }
+ if i, ok := stringifiedItems[*vString]; ok {
+ result.addInternalError(
+ new(ItemsMustBeUniqueError),
+ context,
+ value,
+ ErrorDetails{"type": TYPE_ARRAY, "i": i, "j": j},
+ )
+ }
+ stringifiedItems[*vString] = j
+ }
+ }
+
+ // contains:
+
+ if currentSubSchema.contains != nil {
+ validatedOne := false
+ var bestValidationResult *Result
+
+ for i, v := range value {
+ subContext := NewJsonContext(strconv.Itoa(i), context)
+
+ validationResult := currentSubSchema.contains.subValidateWithContext(v, subContext)
+ if validationResult.Valid() {
+ validatedOne = true
+ break
+ } else {
+ if bestValidationResult == nil || validationResult.score > bestValidationResult.score {
+ bestValidationResult = validationResult
+ }
+ }
+ }
+ if !validatedOne {
+ result.addInternalError(
+ new(ArrayContainsError),
+ context,
+ value,
+ ErrorDetails{},
+ )
+ if bestValidationResult != nil {
+ result.mergeErrors(bestValidationResult)
+ }
+ }
+ }
+
+ result.incrementScore()
+}
+
+func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *JsonContext) {
+
+ if internalLogEnabled {
+ internalLog("validateObject %s", context.String())
+ internalLog(" %v", value)
+ }
+
+ // minProperties & maxProperties:
+ if currentSubSchema.minProperties != nil {
+ if len(value) < int(*currentSubSchema.minProperties) {
+ result.addInternalError(
+ new(ArrayMinPropertiesError),
+ context,
+ value,
+ ErrorDetails{"min": *currentSubSchema.minProperties},
+ )
+ }
+ }
+ if currentSubSchema.maxProperties != nil {
+ if len(value) > int(*currentSubSchema.maxProperties) {
+ result.addInternalError(
+ new(ArrayMaxPropertiesError),
+ context,
+ value,
+ ErrorDetails{"max": *currentSubSchema.maxProperties},
+ )
+ }
+ }
+
+ // required:
+ for _, requiredProperty := range currentSubSchema.required {
+ _, ok := value[requiredProperty]
+ if ok {
+ result.incrementScore()
+ } else {
+ result.addInternalError(
+ new(RequiredError),
+ context,
+ value,
+ ErrorDetails{"property": requiredProperty},
+ )
+ }
+ }
+
+ // additionalProperty & patternProperty:
+ for pk := range value {
+
+ // Check whether this property is described by "properties"
+ found := false
+ for _, spValue := range currentSubSchema.propertiesChildren {
+ if pk == spValue.property {
+ found = true
+ }
+ }
+
+ // Check whether this property is described by "patternProperties"
+ ppMatch := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context)
+
+ // If it is not described by neither "properties" nor "patternProperties" it must pass "additionalProperties"
+ if !found && !ppMatch {
+ switch ap := currentSubSchema.additionalProperties.(type) {
+ case bool:
+ // Handle the boolean case separately as it's cleaner to return a specific error than failing to pass the false schema
+ if !ap {
+ result.addInternalError(
+ new(AdditionalPropertyNotAllowedError),
+ context,
+ value[pk],
+ ErrorDetails{"property": pk},
+ )
+
+ }
+ case *subSchema:
+ validationResult := ap.subValidateWithContext(value[pk], NewJsonContext(pk, context))
+ result.mergeErrors(validationResult)
+ }
+ }
+ }
+
+ // propertyNames:
+ if currentSubSchema.propertyNames != nil {
+ for pk := range value {
+ validationResult := currentSubSchema.propertyNames.subValidateWithContext(pk, context)
+ if !validationResult.Valid() {
+ result.addInternalError(new(InvalidPropertyNameError),
+ context,
+ value, ErrorDetails{
+ "property": pk,
+ })
+ result.mergeErrors(validationResult)
+ }
+ }
+ }
+
+ result.incrementScore()
+}
+
+func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *JsonContext) bool {
+
+ if internalLogEnabled {
+ internalLog("validatePatternProperty %s", context.String())
+ internalLog(" %s %v", key, value)
+ }
+
+ validated := false
+
+ for pk, pv := range currentSubSchema.patternProperties {
+ if matches, _ := regexp.MatchString(pk, key); matches {
+ validated = true
+ subContext := NewJsonContext(key, context)
+ validationResult := pv.subValidateWithContext(value, subContext)
+ result.mergeErrors(validationResult)
+ }
+ }
+
+ if !validated {
+ return false
+ }
+
+ result.incrementScore()
+ return true
+}
+
+func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) {
+
+ // Ignore JSON numbers
+ if isJSONNumber(value) {
+ return
+ }
+
+ // Ignore non strings
+ if !isKind(value, reflect.String) {
+ return
+ }
+
+ if internalLogEnabled {
+ internalLog("validateString %s", context.String())
+ internalLog(" %v", value)
+ }
+
+ stringValue := value.(string)
+
+ // minLength & maxLength:
+ if currentSubSchema.minLength != nil {
+ if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) {
+ result.addInternalError(
+ new(StringLengthGTEError),
+ context,
+ value,
+ ErrorDetails{"min": *currentSubSchema.minLength},
+ )
+ }
+ }
+ if currentSubSchema.maxLength != nil {
+ if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) {
+ result.addInternalError(
+ new(StringLengthLTEError),
+ context,
+ value,
+ ErrorDetails{"max": *currentSubSchema.maxLength},
+ )
+ }
+ }
+
+ // pattern:
+ if currentSubSchema.pattern != nil {
+ if !currentSubSchema.pattern.MatchString(stringValue) {
+ result.addInternalError(
+ new(DoesNotMatchPatternError),
+ context,
+ value,
+ ErrorDetails{"pattern": currentSubSchema.pattern},
+ )
+
+ }
+ }
+
+ // format
+ if currentSubSchema.format != "" {
+ if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) {
+ result.addInternalError(
+ new(DoesNotMatchFormatError),
+ context,
+ value,
+ ErrorDetails{"format": currentSubSchema.format},
+ )
+ }
+ }
+
+ result.incrementScore()
+}
+
+func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) {
+
+ // Ignore non numbers
+ if !isJSONNumber(value) {
+ return
+ }
+
+ if internalLogEnabled {
+ internalLog("validateNumber %s", context.String())
+ internalLog(" %v", value)
+ }
+
+ number := value.(json.Number)
+ float64Value, _ := new(big.Rat).SetString(string(number))
+
+ // multipleOf:
+ if currentSubSchema.multipleOf != nil {
+ if q := new(big.Rat).Quo(float64Value, currentSubSchema.multipleOf); !q.IsInt() {
+ result.addInternalError(
+ new(MultipleOfError),
+ context,
+ number,
+ ErrorDetails{
+ "multiple": new(big.Float).SetRat(currentSubSchema.multipleOf),
+ },
+ )
+ }
+ }
+
+ //maximum & exclusiveMaximum:
+ if currentSubSchema.maximum != nil {
+ if float64Value.Cmp(currentSubSchema.maximum) == 1 {
+ result.addInternalError(
+ new(NumberLTEError),
+ context,
+ number,
+ ErrorDetails{
+ "max": new(big.Float).SetRat(currentSubSchema.maximum),
+ },
+ )
+ }
+ }
+ if currentSubSchema.exclusiveMaximum != nil {
+ if float64Value.Cmp(currentSubSchema.exclusiveMaximum) >= 0 {
+ result.addInternalError(
+ new(NumberLTError),
+ context,
+ number,
+ ErrorDetails{
+ "max": new(big.Float).SetRat(currentSubSchema.exclusiveMaximum),
+ },
+ )
+ }
+ }
+
+ //minimum & exclusiveMinimum:
+ if currentSubSchema.minimum != nil {
+ if float64Value.Cmp(currentSubSchema.minimum) == -1 {
+ result.addInternalError(
+ new(NumberGTEError),
+ context,
+ number,
+ ErrorDetails{
+ "min": new(big.Float).SetRat(currentSubSchema.minimum),
+ },
+ )
+ }
+ }
+ if currentSubSchema.exclusiveMinimum != nil {
+ if float64Value.Cmp(currentSubSchema.exclusiveMinimum) <= 0 {
+ result.addInternalError(
+ new(NumberGTError),
+ context,
+ number,
+ ErrorDetails{
+ "min": new(big.Float).SetRat(currentSubSchema.exclusiveMinimum),
+ },
+ )
+ }
+ }
+
+ // format
+ if currentSubSchema.format != "" {
+ if !FormatCheckers.IsFormat(currentSubSchema.format, float64Value) {
+ result.addInternalError(
+ new(DoesNotMatchFormatError),
+ context,
+ value,
+ ErrorDetails{"format": currentSubSchema.format},
+ )
+ }
+ }
+
+ result.incrementScore()
+}
diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml
new file mode 100644
index 0000000..7348c50
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+
+go:
+ - "1.4.x"
+ - "1.5.x"
+ - "1.6.x"
+ - "1.7.x"
+ - "1.8.x"
+ - "1.9.x"
+ - "1.10.x"
+ - "1.11.x"
+ - "1.12.x"
+ - "1.13.x"
+ - "1.14.x"
+ - "tip"
+
+go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
new file mode 100644
index 0000000..8da58fb
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+ apic.go
+ emitterc.go
+ parserc.go
+ readerc.go
+ scannerc.go
+ writerc.go
+ yamlh.go
+ yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE
new file mode 100644
index 0000000..866d74a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md
new file mode 100644
index 0000000..b50c6e8
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/README.md
@@ -0,0 +1,133 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v2*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v2
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v2"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go
new file mode 100644
index 0000000..acf7140
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/apic.go
@@ -0,0 +1,744 @@
+package yaml
+
+import (
+ "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+var disableLineWrapping = false
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+ if disableLineWrapping {
+ emitter.best_width = -1
+ }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+// mark yaml_mark_t = { 0, 0, 0 }
+// anchor_copy *yaml_char_t = NULL
+//
+// assert(event) // Non-NULL event object is expected.
+// assert(anchor) // Non-NULL anchor is expected.
+//
+// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+// anchor_copy = yaml_strdup(anchor)
+// if (!anchor_copy)
+// return 0
+//
+// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+// return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go
new file mode 100644
index 0000000..129bc2a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/decode.go
@@ -0,0 +1,815 @@
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const (
+ documentNode = 1 << iota
+ mappingNode
+ sequenceNode
+ scalarNode
+ aliasNode
+)
+
+type node struct {
+ kind int
+ line, column int
+ tag string
+ // For an alias node, alias holds the resolved alias.
+ alias *node
+ value string
+ implicit bool
+ children []*node
+ anchors map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *node
+ doneInit bool
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+ yaml_parser_set_input_string(&p.parser, b)
+ return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ yaml_parser_set_input_reader(&p.parser, r)
+ return &p
+}
+
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ return p.event.typ
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+ if anchor != nil {
+ p.doc.anchors[string(anchor)] = n
+ }
+}
+
+func (p *parser) parse() *node {
+ p.init()
+ switch p.peek() {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ default:
+ panic("attempted to parse unknown event: " + p.event.typ.String())
+ }
+}
+
+func (p *parser) node(kind int) *node {
+ return &node{
+ kind: kind,
+ line: p.event.start_mark.line,
+ column: p.event.start_mark.column,
+ }
+}
+
+func (p *parser) document() *node {
+ n := p.node(documentNode)
+ n.anchors = make(map[string]*node)
+ p.doc = n
+ p.expect(yaml_DOCUMENT_START_EVENT)
+ n.children = append(n.children, p.parse())
+ p.expect(yaml_DOCUMENT_END_EVENT)
+ return n
+}
+
+func (p *parser) alias() *node {
+ n := p.node(aliasNode)
+ n.value = string(p.event.anchor)
+ n.alias = p.doc.anchors[n.value]
+ if n.alias == nil {
+ failf("unknown anchor '%s' referenced", n.value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
+ return n
+}
+
+func (p *parser) scalar() *node {
+ n := p.node(scalarNode)
+ n.value = string(p.event.value)
+ n.tag = string(p.event.tag)
+ n.implicit = p.event.implicit
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SCALAR_EVENT)
+ return n
+}
+
+func (p *parser) sequence() *node {
+ n := p.node(sequenceNode)
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
+ n.children = append(n.children, p.parse())
+ }
+ p.expect(yaml_SEQUENCE_END_EVENT)
+ return n
+}
+
+func (p *parser) mapping() *node {
+ n := p.node(mappingNode)
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
+ n.children = append(n.children, p.parse(), p.parse())
+ }
+ p.expect(yaml_MAPPING_END_EVENT)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *node
+ aliases map[*node]bool
+ mapType reflect.Type
+ terrors []string
+ strict bool
+
+ decodeCount int
+ aliasCount int
+ aliasDepth int
+}
+
+var (
+ mapItemType = reflect.TypeOf(MapItem{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = defaultMapType.Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder(strict bool) *decoder {
+ d := &decoder{mapType: defaultMapType, strict: strict}
+ d.aliases = make(map[*node]bool)
+ return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+ if n.tag != "" {
+ tag = n.tag
+ }
+ value := n.value
+ if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+const (
+ // 400,000 decode operations is ~500kb of dense object declarations, or
+ // ~5kb of dense object declarations with 10000% alias expansion
+ alias_ratio_range_low = 400000
+
+ // 4,000,000 decode operations is ~5MB of dense object declarations, or
+ // ~4.5MB of dense object declarations with 10% alias expansion
+ alias_ratio_range_high = 4000000
+
+ // alias_ratio_range is the range over which we scale allowed alias ratios
+ alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+ switch {
+ case decodeCount <= alias_ratio_range_low:
+ // allow 99% to come from alias expansion for small-to-medium documents
+ return 0.99
+ case decodeCount >= alias_ratio_range_high:
+ // allow 10% to come from alias expansion for very large documents
+ return 0.10
+ default:
+ // scale smoothly from 99% down to 10% over the range.
+ // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+ // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+ return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+ }
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+ d.decodeCount++
+ if d.aliasDepth > 0 {
+ d.aliasCount++
+ }
+ if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+ failf("document contains excessive aliasing")
+ }
+ switch n.kind {
+ case documentNode:
+ return d.document(n, out)
+ case aliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.kind {
+ case scalarNode:
+ good = d.scalar(n, out)
+ case mappingNode:
+ good = d.mapping(n, out)
+ case sequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+ }
+ return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+ if len(n.children) == 1 {
+ d.doc = n
+ d.unmarshal(n.children[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
+ failf("anchor '%s' value contains itself", n.value)
+ }
+ d.aliases[n] = true
+ d.aliasDepth++
+ good = d.unmarshal(n.alias, out)
+ d.aliasDepth--
+ delete(d.aliases, n)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) bool {
+ var tag string
+ var resolved interface{}
+ if n.tag == "" && !n.implicit {
+ tag = yaml_STR_TAG
+ resolved = n.value
+ } else {
+ tag, resolved = resolve(n.tag, n.value)
+ if tag == yaml_BINARY_TAG {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ if out.Kind() == reflect.Map && !out.CanAddr() {
+ resetMap(out)
+ } else {
+ out.Set(reflect.Zero(out.Type()))
+ }
+ return true
+ }
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ // We've resolved to exactly the type we want, so use that.
+ out.Set(resolvedv)
+ return true
+ }
+ // Perhaps we can use the value as a TextUnmarshaler to
+ // set its value.
+ if out.CanAddr() {
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+ if ok {
+ var text []byte
+ if tag == yaml_BINARY_TAG {
+ text = []byte(resolved.(string))
+ } else {
+ // We let any value be unmarshaled into TextUnmarshaler.
+ // That might be more lax than we'd like, but the
+ // TextUnmarshaler itself should bowl out any dubious values.
+ text = []byte(n.value)
+ }
+ err := u.UnmarshalText(text)
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == yaml_BINARY_TAG {
+ out.SetString(resolved.(string))
+ return true
+ }
+ if resolved != nil {
+ out.SetString(n.value)
+ return true
+ }
+ case reflect.Interface:
+ if resolved == nil {
+ out.Set(reflect.Zero(out.Type()))
+ } else if tag == yaml_TIMESTAMP_TAG {
+ // It looks like a timestamp but for backward compatibility
+ // reasons we set it as a string, so that code that unmarshals
+ // timestamp-like values into interface{} will continue to
+ // see a string and not a time.Time.
+ // TODO(v3) Drop this.
+ out.Set(reflect.ValueOf(n.value))
+ } else {
+ out.Set(reflect.ValueOf(resolved))
+ }
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch resolved := resolved.(type) {
+ case int:
+ if !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case int64:
+ if !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ return true
+ }
+ case uint64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ return true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ return true
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ return true
+ case int64:
+ out.SetFloat(float64(resolved))
+ return true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ return true
+ case float64:
+ out.SetFloat(resolved)
+ return true
+ }
+ case reflect.Struct:
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ out.Set(resolvedv)
+ return true
+ }
+ case reflect.Ptr:
+ if out.Type().Elem() == reflect.TypeOf(resolved) {
+ // TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+ elem := reflect.New(out.Type().Elem())
+ elem.Elem().Set(reflect.ValueOf(resolved))
+ out.Set(elem)
+ return true
+ }
+ }
+ d.terror(n, tag, out)
+ return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+ l := len(n.children)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Array:
+ if l != out.Len() {
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
+ }
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, yaml_SEQ_TAG, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.children[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ if out.Kind() != reflect.Array {
+ out.Set(out.Slice(0, j))
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Slice:
+ return d.mappingSlice(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ if d.mapType.Kind() == reflect.Map {
+ iface := out
+ out = reflect.MakeMap(d.mapType)
+ iface.Set(out)
+ } else {
+ slicev := reflect.New(d.mapType).Elem()
+ if !d.mappingSlice(n, slicev) {
+ return false
+ }
+ out.Set(slicev)
+ return true
+ }
+ default:
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ mapType := d.mapType
+ if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+ d.mapType = outt
+ }
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.children[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.children[i+1], e) {
+ d.setMapIndex(n.children[i+1], out, k, e)
+ }
+ }
+ }
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
+ if d.strict && out.MapIndex(k) != zeroValue {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
+ return
+ }
+ out.SetMapIndex(k, v)
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+ outt := out.Type()
+ if outt.Elem() != mapItemType {
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+
+ mapType := d.mapType
+ d.mapType = outt
+
+ var slice []MapItem
+ var l = len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ item := MapItem{}
+ k := reflect.ValueOf(&item.Key).Elem()
+ if d.unmarshal(n.children[i], k) {
+ v := reflect.ValueOf(&item.Value).Elem()
+ if d.unmarshal(n.children[i+1], v) {
+ slice = append(slice, item)
+ }
+ }
+ }
+ out.Set(reflect.ValueOf(slice))
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+ name := settableValueOf("")
+ l := len(n.children)
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+ elemType = inlineMap.Type().Elem()
+ }
+
+ var doneFields []bool
+ if d.strict {
+ doneFields = make([]bool, len(sinfo.FieldsList))
+ }
+ for i := 0; i < l; i += 2 {
+ ni := n.children[i]
+ if isMerge(ni) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ if d.strict {
+ if doneFields[info.Id] {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
+ continue
+ }
+ doneFields[info.Id] = true
+ }
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = out.FieldByIndex(info.Inline)
+ }
+ d.unmarshal(n.children[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.children[i+1], value)
+ d.setMapIndex(n.children[i+1], inlineMap, name, value)
+ } else if d.strict {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+ switch n.kind {
+ case mappingNode:
+ d.unmarshal(n, out)
+ case aliasNode:
+ if n.alias != nil && n.alias.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case sequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.children) - 1; i >= 0; i-- {
+ ni := n.children[i]
+ if ni.kind == aliasNode {
+ if ni.alias != nil && ni.alias.kind != mappingNode {
+ failWantMap()
+ }
+ } else if ni.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *node) bool {
+ return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go
new file mode 100644
index 0000000..a1c2cc5
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ emitter.column = 0
+ emitter.line++
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ emitter.column = 0
+ emitter.line++
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+ return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ emitter.indention = true
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go
new file mode 100644
index 0000000..0ee738e
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/encode.go
@@ -0,0 +1,390 @@
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+// jsonNumber is the interface of the encoding/json.Number datatype.
+// Repeating the interface here avoids a dependency on encoding/json, and also
+// supports other libraries like jsoniter, which use a similar datatype with
+// the same interface. Detecting this interface is useful when dealing with
+// structures containing json.Number, which is a string under the hood. The
+// encoder should prefer the use of Int64(), Float64() and string(), in that
+// order, when encoding this type.
+type jsonNumber interface {
+ Float64() (float64, error)
+ Int64() (int64, error)
+ String() string
+}
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+ // doneInit holds whether the initial stream_start_event has been
+ // emitted.
+ doneInit bool
+}
+
+func newEncoder() *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, w)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func (e *encoder) init() {
+ if e.doneInit {
+ return
+ }
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+ e.emit()
+ e.doneInit = true
+}
+
+func (e *encoder) finish() {
+ e.emitter.open_ended = false
+ yaml_stream_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+ e.init()
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+ e.marshal(tag, in)
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ switch m := iface.(type) {
+ case jsonNumber:
+ integer, err := m.Int64()
+ if err == nil {
+ // In this case the json.Number is a valid int64
+ in = reflect.ValueOf(integer)
+ break
+ }
+ float, err := m.Float64()
+ if err == nil {
+ // In this case the json.Number is a valid float64
+ in = reflect.ValueOf(float)
+ break
+ }
+ // fallback case - no number could be obtained
+ in = reflect.ValueOf(m.String())
+ case time.Time, *time.Time:
+ // Although time.Time implements TextMarshaler,
+ // we don't want to treat it as a string for YAML
+ // purposes because YAML has special support for
+ // timestamps.
+ case Marshaler:
+ v, err := m.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ in = reflect.ValueOf(v)
+ case encoding.TextMarshaler:
+ text, err := m.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ case nil:
+ e.nilv()
+ return
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ e.marshal(tag, in.Elem())
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ if in.Type() == ptrTimeType {
+ e.timev(tag, in.Elem())
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Struct:
+ if in.Type() == timeType {
+ e.timev(tag, in)
+ } else {
+ e.structv(tag, in)
+ }
+ case reflect.Slice, reflect.Array:
+ if in.Type().Elem() == mapItemType {
+ e.itemsv(tag, in)
+ } else {
+ e.slicev(tag, in)
+ }
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if in.Type() == durationType {
+ e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+ } else {
+ e.intv(tag, in)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+ for _, item := range slice {
+ e.marshal("", reflect.ValueOf(item.Key))
+ e.marshal("", reflect.ValueOf(item.Value))
+ }
+ })
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = in.FieldByIndex(info.Inline)
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+ f()
+ yaml_mapping_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ canUsePlain := true
+ switch {
+ case !utf8.ValidString(s):
+ if tag == yaml_BINARY_TAG {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = yaml_BINARY_TAG
+ s = encodeBase64(s)
+ case tag == "":
+ // Check to see if it would resolve to a specific
+ // tag when encoded unquoted. If it doesn't,
+ // there's no need to quote it.
+ rtag, _ := resolve("", s)
+ canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
+ }
+ // Note: it's possible for user code to emit invalid YAML
+ // if they explicitly specify a tag and a string containing
+ // text that's incompatible with that tag.
+ switch {
+ case strings.Contains(s, "\n"):
+ style = yaml_LITERAL_SCALAR_STYLE
+ case canUsePlain:
+ style = yaml_PLAIN_SCALAR_STYLE
+ default:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+ t := in.Interface().(time.Time)
+ s := t.Format(time.RFC3339Nano)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // Issue #352: When formatting, use the precision of the underlying value
+ precision := 64
+ if in.Kind() == reflect.Float32 {
+ precision = 32
+ }
+
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+ implicit := tag == ""
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.emit()
+}
diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod
new file mode 100644
index 0000000..2cbb85a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/go.mod
@@ -0,0 +1,5 @@
+module gopkg.in/yaml.v2
+
+go 1.15
+
+require gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405
diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go
new file mode 100644
index 0000000..81d05df
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/parserc.go
@@ -0,0 +1,1095 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ return &parser.tokens[parser.tokens_head]
+ }
+ return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected ", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go
new file mode 100644
index 0000000..7c1f5fa
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/readerc.go
@@ -0,0 +1,412 @@
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go
new file mode 100644
index 0000000..4120e0c
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/resolve.go
@@ -0,0 +1,258 @@
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+ {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+ {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+ {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+ {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+ {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+ {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", yaml_MERGE_TAG, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ // TODO This can easily be made faster and produce less garbage.
+ if strings.HasPrefix(tag, longTagPrefix) {
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+ return
+ case yaml_FLOAT_TAG:
+ if rtag == yaml_INT_TAG {
+ switch v := out.(type) {
+ case int64:
+ rtag = yaml_FLOAT_TAG
+ out = float64(v)
+ return
+ case int:
+ rtag = yaml_FLOAT_TAG
+ out = float64(v)
+ return
+ }
+ }
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == yaml_TIMESTAMP_TAG {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return yaml_TIMESTAMP_TAG, t
+ }
+ }
+
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ }
+ default:
+ panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ return yaml_STR_TAG, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go
new file mode 100644
index 0000000..0b9bb60
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -0,0 +1,2711 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ if parser.tokens_head != len(parser.tokens) {
+ // If queue is non-empty, check if any potential simple key may
+ // occupy the head position.
+ head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+ if !ok {
+ break
+ } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
+ return false
+ } else if !valid {
+ break
+ }
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+ if !simple_key.possible {
+ return false, true
+ }
+
+ // The 1.2 specification says:
+ //
+ // "If the ? indicator is omitted, parsing needs to see past the
+ // implicit key to recognize it as such. To limit the amount of
+ // lookahead required, the “:” indicator must appear at most 1024
+ // Unicode characters beyond the start of the key. In addition, the key
+ // is restricted to a single line."
+ //
+ if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return false, yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ return false, true
+ }
+ return true, true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ }
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
+ }
+ return true
+}
+
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+ possible: false,
+ required: false,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ })
+
+ // Increase the flow level.
+ parser.flow_level++
+ if parser.flow_level > max_flow_level {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+ }
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ last := len(parser.simple_keys) - 1
+ delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+ parser.simple_keys = parser.simple_keys[:last]
+ }
+ return true
+}
+
+// max_indents limits the indents stack size
+const max_indents = 10000
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+ if len(parser.indents) > max_indents {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_indents))
+ }
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ parser.simple_keys_by_tok = make(map[int]int)
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+ return false
+
+ } else if valid {
+
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+ delete(parser.simple_keys_by_tok, simple_key.token_number)
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab characters that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violates indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go
new file mode 100644
index 0000000..4c45e66
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/sorter.go
@@ -0,0 +1,113 @@
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ return bl
+ }
+ var ai, bi int
+ var an, bn int64
+ if ar[i] == '0' || br[i] == '0' {
+ for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ if ar[j] != '0' {
+ an = 1
+ bn = 1
+ break
+ }
+ }
+ }
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go
new file mode 100644
index 0000000..a2dde60
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/writerc.go
@@ -0,0 +1,26 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go
new file mode 100644
index 0000000..3081388
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yaml.go
@@ -0,0 +1,478 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+ Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// UnmarshalStrict is like Unmarshal except that any fields that are found
+// in the data that do not have corresponding struct members, or mapping
+// keys that are duplicates, will result in
+// an error.
+func UnmarshalStrict(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, true)
+}
+
+// A Decoder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ strict bool
+ parser *parser
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// SetStrict sets whether strict decoding behaviour is enabled when
+// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
+func (dec *Decoder) SetStrict(strict bool) {
+ dec.strict = strict
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder(dec.strict)
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder(strict)
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[][,[,]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be excluded if IsZero returns true.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ //return nil, errors.New("Option ,inline needs a struct value or map field")
+ return nil, errors.New("Option ,inline needs a struct value field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+// FutureLineWrap globally disables line wrapping when encoding long strings.
+// This is a temporary and thus deprecated method introduced to faciliate
+// migration towards v3, which offers more control of line lengths on
+// individual encodings, and has a default matching the behavior introduced
+// by this function.
+//
+// The default formatting of v2 was erroneously changed in v2.3.0 and reverted
+// in v2.4.0, at which point this function was introduced to help migration.
+func FutureLineWrap() {
+ disableLineWrapping = true
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go
new file mode 100644
index 0000000..f6a9c8e
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -0,0 +1,739 @@
+package yaml
+
+import (
+ "fmt"
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+ yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return ""
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+)
+
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return ""
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+ simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_writer io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
new file mode 100644
index 0000000..8110ce3
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return ( // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return ( // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return ( // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/vendor/gopkg.in/yaml.v3/.travis.yml b/vendor/gopkg.in/yaml.v3/.travis.yml
new file mode 100644
index 0000000..04d4dae
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/.travis.yml
@@ -0,0 +1,16 @@
+language: go
+
+go:
+ - "1.4.x"
+ - "1.5.x"
+ - "1.6.x"
+ - "1.7.x"
+ - "1.8.x"
+ - "1.9.x"
+ - "1.10.x"
+ - "1.11.x"
+ - "1.12.x"
+ - "1.13.x"
+ - "tip"
+
+go_import_path: gopkg.in/yaml.v3
diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE
new file mode 100644
index 0000000..2683e4b
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/LICENSE
@@ -0,0 +1,50 @@
+
+This project is covered by two different licenses: MIT and Apache.
+
+#### MIT License ####
+
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original MIT license, with the additional
+copyright staring in 2011 when the project was ported over:
+
+ apic.go emitterc.go parserc.go readerc.go scannerc.go
+ writerc.go yamlh.go yamlprivateh.go
+
+Copyright (c) 2006-2010 Kirill Simonov
+Copyright (c) 2006-2011 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+### Apache License ###
+
+All the remaining project files are covered by the Apache license:
+
+Copyright (c) 2011-2019 Canonical Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE
new file mode 100644
index 0000000..866d74a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md
new file mode 100644
index 0000000..08eb1ba
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/README.md
@@ -0,0 +1,150 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.2, but preserves some behavior
+from 1.1 for backwards compatibility.
+
+Specifically, as of v3 of the yaml package:
+
+ - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being
+ decoded into a typed bool value. Otherwise they behave as a string. Booleans
+ in YAML 1.2 are _true/false_ only.
+ - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_
+ as specified in YAML 1.2, because most parsers still use the old format.
+ Octals in the _0o777_ format are supported though, so new files work.
+ - Does not support base-60 floats. These are gone from YAML 1.2, and were
+ actually never supported by this package as it's clearly a poor choice.
+
+and offers backwards
+compatibility with YAML 1.1 in some cases.
+1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v3*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v3
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3)
+
+API stability
+-------------
+
+The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the MIT and Apache License 2.0 licenses.
+Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v3"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go
new file mode 100644
index 0000000..65846e6
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/apic.go
@@ -0,0 +1,746 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+// Create ALIAS.
+func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ anchor: anchor,
+ }
+ return true
+}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go
new file mode 100644
index 0000000..be63169
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/decode.go
@@ -0,0 +1,931 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *Node
+ anchors map[string]*Node
+ doneInit bool
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+ yaml_parser_set_input_string(&p.parser, b)
+ return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ yaml_parser_set_input_reader(&p.parser, r)
+ return &p
+}
+
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.anchors = make(map[string]*Node)
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ return p.event.typ
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *Node, anchor []byte) {
+ if anchor != nil {
+ n.Anchor = string(anchor)
+ p.anchors[n.Anchor] = n
+ }
+}
+
+func (p *parser) parse() *Node {
+ p.init()
+ switch p.peek() {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ case yaml_TAIL_COMMENT_EVENT:
+ panic("internal error: unexpected tail comment event (please report)")
+ default:
+ panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String())
+ }
+}
+
+func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node {
+ var style Style
+ if tag != "" && tag != "!" {
+ tag = shortTag(tag)
+ style = TaggedStyle
+ } else if defaultTag != "" {
+ tag = defaultTag
+ } else if kind == ScalarNode {
+ tag, _ = resolve("", value)
+ }
+ return &Node{
+ Kind: kind,
+ Tag: tag,
+ Value: value,
+ Style: style,
+ Line: p.event.start_mark.line + 1,
+ Column: p.event.start_mark.column + 1,
+ HeadComment: string(p.event.head_comment),
+ LineComment: string(p.event.line_comment),
+ FootComment: string(p.event.foot_comment),
+ }
+}
+
+func (p *parser) parseChild(parent *Node) *Node {
+ child := p.parse()
+ parent.Content = append(parent.Content, child)
+ return child
+}
+
+func (p *parser) document() *Node {
+ n := p.node(DocumentNode, "", "", "")
+ p.doc = n
+ p.expect(yaml_DOCUMENT_START_EVENT)
+ p.parseChild(n)
+ if p.peek() == yaml_DOCUMENT_END_EVENT {
+ n.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_DOCUMENT_END_EVENT)
+ return n
+}
+
+func (p *parser) alias() *Node {
+ n := p.node(AliasNode, "", "", string(p.event.anchor))
+ n.Alias = p.anchors[n.Value]
+ if n.Alias == nil {
+ failf("unknown anchor '%s' referenced", n.Value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
+ return n
+}
+
+func (p *parser) scalar() *Node {
+ var parsedStyle = p.event.scalar_style()
+ var nodeStyle Style
+ switch {
+ case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = DoubleQuotedStyle
+ case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = SingleQuotedStyle
+ case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0:
+ nodeStyle = LiteralStyle
+ case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0:
+ nodeStyle = FoldedStyle
+ }
+ var nodeValue = string(p.event.value)
+ var nodeTag = string(p.event.tag)
+ var defaultTag string
+ if nodeStyle == 0 {
+ if nodeValue == "<<" {
+ defaultTag = mergeTag
+ }
+ } else {
+ defaultTag = strTag
+ }
+ n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue)
+ n.Style |= nodeStyle
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SCALAR_EVENT)
+ return n
+}
+
+func (p *parser) sequence() *Node {
+ n := p.node(SequenceNode, seqTag, string(p.event.tag), "")
+ if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 {
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
+ p.parseChild(n)
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ p.expect(yaml_SEQUENCE_END_EVENT)
+ return n
+}
+
+func (p *parser) mapping() *Node {
+ n := p.node(MappingNode, mapTag, string(p.event.tag), "")
+ block := true
+ if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 {
+ block = false
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
+ k := p.parseChild(n)
+ if block && k.FootComment != "" {
+ // Must be a foot comment for the prior value when being dedented.
+ if len(n.Content) > 2 {
+ n.Content[len(n.Content)-3].FootComment = k.FootComment
+ k.FootComment = ""
+ }
+ }
+ v := p.parseChild(n)
+ if k.FootComment == "" && v.FootComment != "" {
+ k.FootComment = v.FootComment
+ v.FootComment = ""
+ }
+ if p.peek() == yaml_TAIL_COMMENT_EVENT {
+ if k.FootComment == "" {
+ k.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_TAIL_COMMENT_EVENT)
+ }
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 {
+ n.Content[len(n.Content)-2].FootComment = n.FootComment
+ n.FootComment = ""
+ }
+ p.expect(yaml_MAPPING_END_EVENT)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *Node
+ aliases map[*Node]bool
+ terrors []string
+
+ stringMapType reflect.Type
+ generalMapType reflect.Type
+
+ knownFields bool
+ uniqueKeys bool
+ decodeCount int
+ aliasCount int
+ aliasDepth int
+}
+
+var (
+ nodeType = reflect.TypeOf(Node{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ stringMapType = reflect.TypeOf(map[string]interface{}{})
+ generalMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = generalMapType.Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder() *decoder {
+ d := &decoder{
+ stringMapType: stringMapType,
+ generalMapType: generalMapType,
+ uniqueKeys: true,
+ }
+ d.aliases = make(map[*Node]bool)
+ return d
+}
+
+func (d *decoder) terror(n *Node, tag string, out reflect.Value) {
+ if n.Tag != "" {
+ tag = n.Tag
+ }
+ value := n.Value
+ if tag != seqTag && tag != mapTag {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) {
+ err := u.UnmarshalYAML(n)
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.ShortTag() == nullTag {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ outi := out.Addr().Interface()
+ if u, ok := outi.(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ if u, ok := outi.(obsoleteUnmarshaler); ok {
+ good = d.callObsoleteUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) {
+ if n.ShortTag() == nullTag {
+ return reflect.Value{}
+ }
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+const (
+ // 400,000 decode operations is ~500kb of dense object declarations, or
+ // ~5kb of dense object declarations with 10000% alias expansion
+ alias_ratio_range_low = 400000
+
+ // 4,000,000 decode operations is ~5MB of dense object declarations, or
+ // ~4.5MB of dense object declarations with 10% alias expansion
+ alias_ratio_range_high = 4000000
+
+ // alias_ratio_range is the range over which we scale allowed alias ratios
+ alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+ switch {
+ case decodeCount <= alias_ratio_range_low:
+ // allow 99% to come from alias expansion for small-to-medium documents
+ return 0.99
+ case decodeCount >= alias_ratio_range_high:
+ // allow 10% to come from alias expansion for very large documents
+ return 0.10
+ default:
+ // scale smoothly from 99% down to 10% over the range.
+ // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+ // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+ return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+ }
+}
+
+func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) {
+ d.decodeCount++
+ if d.aliasDepth > 0 {
+ d.aliasCount++
+ }
+ if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+ failf("document contains excessive aliasing")
+ }
+ if out.Type() == nodeType {
+ out.Set(reflect.ValueOf(n).Elem())
+ return true
+ }
+ switch n.Kind {
+ case DocumentNode:
+ return d.document(n, out)
+ case AliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.Kind {
+ case ScalarNode:
+ good = d.scalar(n, out)
+ case MappingNode:
+ good = d.mapping(n, out)
+ case SequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("internal error: unknown node kind: " + strconv.Itoa(int(n.Kind)))
+ }
+ return good
+}
+
+func (d *decoder) document(n *Node, out reflect.Value) (good bool) {
+ if len(n.Content) == 1 {
+ d.doc = n
+ d.unmarshal(n.Content[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *Node, out reflect.Value) (good bool) {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
+ failf("anchor '%s' value contains itself", n.Value)
+ }
+ d.aliases[n] = true
+ d.aliasDepth++
+ good = d.unmarshal(n.Alias, out)
+ d.aliasDepth--
+ delete(d.aliases, n)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) scalar(n *Node, out reflect.Value) bool {
+ var tag string
+ var resolved interface{}
+ if n.indicatedString() {
+ tag = strTag
+ resolved = n.Value
+ } else {
+ tag, resolved = resolve(n.Tag, n.Value)
+ if tag == binaryTag {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ if out.CanAddr() {
+ switch out.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ out.Set(reflect.Zero(out.Type()))
+ return true
+ }
+ }
+ return false
+ }
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ // We've resolved to exactly the type we want, so use that.
+ out.Set(resolvedv)
+ return true
+ }
+ // Perhaps we can use the value as a TextUnmarshaler to
+ // set its value.
+ if out.CanAddr() {
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+ if ok {
+ var text []byte
+ if tag == binaryTag {
+ text = []byte(resolved.(string))
+ } else {
+ // We let any value be unmarshaled into TextUnmarshaler.
+ // That might be more lax than we'd like, but the
+ // TextUnmarshaler itself should bowl out any dubious values.
+ text = []byte(n.Value)
+ }
+ err := u.UnmarshalText(text)
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == binaryTag {
+ out.SetString(resolved.(string))
+ return true
+ }
+ out.SetString(n.Value)
+ return true
+ case reflect.Interface:
+ out.Set(reflect.ValueOf(resolved))
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ // This used to work in v2, but it's very unfriendly.
+ isDuration := out.Type() == durationType
+
+ switch resolved := resolved.(type) {
+ case int:
+ if !isDuration && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case int64:
+ if !isDuration && !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ return true
+ }
+ case uint64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case float64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ return true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ return true
+ case string:
+ // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html).
+ // It only works if explicitly attempting to unmarshal into a typed bool value.
+ switch resolved {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON":
+ out.SetBool(true)
+ return true
+ case "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ out.SetBool(false)
+ return true
+ }
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ return true
+ case int64:
+ out.SetFloat(float64(resolved))
+ return true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ return true
+ case float64:
+ out.SetFloat(resolved)
+ return true
+ }
+ case reflect.Struct:
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ out.Set(resolvedv)
+ return true
+ }
+ case reflect.Ptr:
+ panic("yaml internal error: please report the issue")
+ }
+ d.terror(n, tag, out)
+ return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Array:
+ if l != out.Len() {
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
+ }
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, seqTag, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.Content[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ if out.Kind() != reflect.Array {
+ out.Set(out.Slice(0, j))
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+ if d.uniqueKeys {
+ nerrs := len(d.terrors)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ for j := i + 2; j < l; j += 2 {
+ nj := n.Content[j]
+ if ni.Kind == nj.Kind && ni.Value == nj.Value {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line))
+ }
+ }
+ }
+ if len(d.terrors) > nerrs {
+ return false
+ }
+ }
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ iface := out
+ if isStringMap(n) {
+ out = reflect.MakeMap(d.stringMapType)
+ } else {
+ out = reflect.MakeMap(d.generalMapType)
+ }
+ iface.Set(out)
+ default:
+ d.terror(n, mapTag, out)
+ return false
+ }
+
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ stringMapType := d.stringMapType
+ generalMapType := d.generalMapType
+ if outt.Elem() == ifaceType {
+ if outt.Key().Kind() == reflect.String {
+ d.stringMapType = outt
+ } else if outt.Key() == ifaceType {
+ d.generalMapType = outt
+ }
+ }
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.Content[i]) {
+ d.merge(n.Content[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.Content[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.Content[i+1], e) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+ d.stringMapType = stringMapType
+ d.generalMapType = generalMapType
+ return true
+}
+
+func isStringMap(n *Node) bool {
+ if n.Kind != MappingNode {
+ return false
+ }
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ if n.Content[i].ShortTag() != strTag {
+ return false
+ }
+ }
+ return true
+}
+
+func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+ elemType = inlineMap.Type().Elem()
+ }
+
+ for _, index := range sinfo.InlineUnmarshalers {
+ field := d.fieldByIndex(n, out, index)
+ d.prepare(n, field)
+ }
+
+ var doneFields []bool
+ if d.uniqueKeys {
+ doneFields = make([]bool, len(sinfo.FieldsList))
+ }
+ name := settableValueOf("")
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ if isMerge(ni) {
+ d.merge(n.Content[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ if d.uniqueKeys {
+ if doneFields[info.Id] {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
+ continue
+ }
+ doneFields[info.Id] = true
+ }
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = d.fieldByIndex(n, out, info.Inline)
+ }
+ d.unmarshal(n.Content[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.Content[i+1], value)
+ inlineMap.SetMapIndex(name, value)
+ } else if d.knownFields {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *Node, out reflect.Value) {
+ switch n.Kind {
+ case MappingNode:
+ d.unmarshal(n, out)
+ case AliasNode:
+ if n.Alias != nil && n.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case SequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.Content) - 1; i >= 0; i-- {
+ ni := n.Content[i]
+ if ni.Kind == AliasNode {
+ if ni.Alias != nil && ni.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ } else if ni.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *Node) bool {
+ return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag)
+}
diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go
new file mode 100644
index 0000000..ab2a066
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/emitterc.go
@@ -0,0 +1,1992 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and below and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and above and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ // [Go] If inside a block sequence item, discount the space taken by the indicator.
+ if emitter.best_indent > 2 && emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE {
+ emitter.indent -= 2
+ }
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+ emitter.space_above = true
+ emitter.foot_indent = -1
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical || true {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if len(emitter.head_comment) > 0 {
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_emit_node(emitter, event, true, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ // [Go] Force document foot separation.
+ emitter.foot_indent = 0
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.foot_indent = -1
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ if emitter.canonical && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.column == 0 || emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ // [Go] The original logic here would not indent the sequence when inside a mapping.
+ // In Go we always indent it, but take the sequence indicator out of the indentation.
+ indentless := emitter.best_indent == 2 && emitter.mapping_context && (emitter.column == 0 || !emitter.indention)
+ original := emitter.indent
+ if !yaml_emitter_increase_indent(emitter, false, indentless) {
+ return false
+ }
+ if emitter.indent > original+2 {
+ emitter.indent -= 2
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Write a head comment.
+func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.tail_comment) {
+ return false
+ }
+ emitter.tail_comment = emitter.tail_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ }
+
+ if len(emitter.head_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.head_comment) {
+ return false
+ }
+ emitter.head_comment = emitter.head_comment[:0]
+ return true
+}
+
+// Write an line comment.
+func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.line_comment) == 0 {
+ return true
+ }
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.line_comment) {
+ return false
+ }
+ emitter.line_comment = emitter.line_comment[:0]
+ return true
+}
+
+// Write a foot comment.
+func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.foot_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.foot_comment) {
+ return false
+ }
+ emitter.foot_comment = emitter.foot_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ return true
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+ tab_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if value[i] == '\t' {
+ tab_characters = true
+ } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || tab_characters || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ if len(event.head_comment) > 0 {
+ emitter.head_comment = event.head_comment
+ }
+ if len(event.line_comment) > 0 {
+ emitter.line_comment = event.line_comment
+ }
+ if len(event.foot_comment) > 0 {
+ emitter.foot_comment = event.foot_comment
+ }
+ if len(event.tail_comment) > 0 {
+ emitter.tail_comment = event.tail_comment
+ }
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if emitter.foot_indent == indent {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ //emitter.indention = true
+ emitter.space_above = false
+ emitter.foot_indent = -1
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if len(value) > 0 && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ if len(value) > 0 {
+ emitter.whitespace = false
+ }
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ //emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ //emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool {
+ breaks := false
+ pound := false
+ for i := 0; i < len(comment); {
+ if is_break(comment, i) {
+ if !write_break(emitter, comment, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ pound = false
+ } else {
+ if breaks && !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !pound {
+ if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) {
+ return false
+ }
+ pound = true
+ }
+ if !write(emitter, comment, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ if !breaks && !put_break(emitter) {
+ return false
+ }
+
+ emitter.whitespace = true
+ //emitter.indention = true
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go
new file mode 100644
index 0000000..1f37271
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/encode.go
@@ -0,0 +1,561 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+ indent int
+ doneInit bool
+}
+
+func newEncoder() *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, w)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func (e *encoder) init() {
+ if e.doneInit {
+ return
+ }
+ if e.indent == 0 {
+ e.indent = 4
+ }
+ e.emitter.best_indent = e.indent
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+ e.emit()
+ e.doneInit = true
+}
+
+func (e *encoder) finish() {
+ e.emitter.open_ended = false
+ yaml_stream_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+ e.init()
+ var node *Node
+ if in.IsValid() {
+ node, _ = in.Interface().(*Node)
+ }
+ if node != nil && node.Kind == DocumentNode {
+ e.nodev(in)
+ } else {
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+ e.marshal(tag, in)
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ tag = shortTag(tag)
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ switch value := iface.(type) {
+ case *Node:
+ e.nodev(in)
+ return
+ case time.Time:
+ e.timev(tag, in)
+ return
+ case *time.Time:
+ e.timev(tag, in.Elem())
+ return
+ case time.Duration:
+ e.stringv(tag, reflect.ValueOf(value.String()))
+ return
+ case Marshaler:
+ v, err := value.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ e.marshal(tag, reflect.ValueOf(v))
+ return
+ case encoding.TextMarshaler:
+ text, err := value.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ case nil:
+ e.nilv()
+ return
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ e.marshal(tag, in.Elem())
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ e.marshal(tag, in.Elem())
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice, reflect.Array:
+ e.slicev(tag, in)
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ e.intv(tag, in)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = e.fieldByIndex(in, info.Inline)
+ if !value.IsValid() {
+ continue
+ }
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+ f()
+ yaml_mapping_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+// isOldBool returns whether s is bool notation as defined in YAML 1.1.
+//
+// We continue to force strings that YAML 1.1 would interpret as booleans to be
+// rendered as quotes strings so that the marshalled output valid for YAML 1.1
+// parsing.
+func isOldBool(s string) (result bool) {
+ switch s {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
+ "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ return true
+ default:
+ return false
+ }
+}
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ canUsePlain := true
+ switch {
+ case !utf8.ValidString(s):
+ if tag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ s = encodeBase64(s)
+ case tag == "":
+ // Check to see if it would resolve to a specific
+ // tag when encoded unquoted. If it doesn't,
+ // there's no need to quote it.
+ rtag, _ := resolve("", s)
+ canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
+ }
+ // Note: it's possible for user code to emit invalid YAML
+ // if they explicitly specify a tag and a string containing
+ // text that's incompatible with that tag.
+ switch {
+ case strings.Contains(s, "\n"):
+ if e.flow {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else {
+ style = yaml_LITERAL_SCALAR_STYLE
+ }
+ case canUsePlain:
+ style = yaml_PLAIN_SCALAR_STYLE
+ default:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+ t := in.Interface().(time.Time)
+ s := t.Format(time.RFC3339Nano)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // Issue #352: When formatting, use the precision of the underlying value
+ precision := 64
+ if in.Kind() == reflect.Float32 {
+ precision = 32
+ }
+
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
+ // TODO Kill this function. Replace all initialize calls by their underlining Go literals.
+ implicit := tag == ""
+ if !implicit {
+ tag = longTag(tag)
+ }
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.event.head_comment = head
+ e.event.line_comment = line
+ e.event.foot_comment = foot
+ e.event.tail_comment = tail
+ e.emit()
+}
+
+func (e *encoder) nodev(in reflect.Value) {
+ e.node(in.Interface().(*Node), "")
+}
+
+func (e *encoder) node(node *Node, tail string) {
+ // If the tag was not explicitly requested, and dropping it won't change the
+ // implicit tag of the value, don't include it in the presentation.
+ var tag = node.Tag
+ var stag = shortTag(tag)
+ var rtag string
+ var forceQuoting bool
+ if tag != "" && node.Style&TaggedStyle == 0 {
+ if node.Kind == ScalarNode {
+ if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
+ tag = ""
+ } else {
+ rtag, _ = resolve("", node.Value)
+ if rtag == stag {
+ tag = ""
+ } else if stag == strTag {
+ tag = ""
+ forceQuoting = true
+ }
+ }
+ } else {
+ switch node.Kind {
+ case MappingNode:
+ rtag = mapTag
+ case SequenceNode:
+ rtag = seqTag
+ }
+ if rtag == stag {
+ tag = ""
+ }
+ }
+ }
+
+ switch node.Kind {
+ case DocumentNode:
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ yaml_document_end_event_initialize(&e.event, true)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case SequenceNode:
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case MappingNode:
+ style := yaml_BLOCK_MAPPING_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style)
+ e.event.tail_comment = []byte(tail)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+
+ // The tail logic below moves the foot comment of prior keys to the following key,
+ // since the value for each key may be a nested structure and the foot needs to be
+ // processed only the entirety of the value is streamed. The last tail is processed
+ // with the mapping end event.
+ var tail string
+ for i := 0; i+1 < len(node.Content); i += 2 {
+ k := node.Content[i]
+ foot := k.FootComment
+ if foot != "" {
+ kopy := *k
+ kopy.FootComment = ""
+ k = &kopy
+ }
+ e.node(k, tail)
+ tail = foot
+
+ v := node.Content[i+1]
+ e.node(v, "")
+ }
+
+ yaml_mapping_end_event_initialize(&e.event)
+ e.event.tail_comment = []byte(tail)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case AliasNode:
+ yaml_alias_event_initialize(&e.event, []byte(node.Value))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case ScalarNode:
+ value := node.Value
+ if !utf8.ValidString(value) {
+ if tag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ value = encodeBase64(value)
+ }
+
+ style := yaml_PLAIN_SCALAR_STYLE
+ switch {
+ case node.Style&DoubleQuotedStyle != 0:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ case node.Style&SingleQuotedStyle != 0:
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ case node.Style&LiteralStyle != 0:
+ style = yaml_LITERAL_SCALAR_STYLE
+ case node.Style&FoldedStyle != 0:
+ style = yaml_FOLDED_SCALAR_STYLE
+ case strings.Contains(value, "\n"):
+ style = yaml_LITERAL_SCALAR_STYLE
+ case forceQuoting:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
+ }
+}
diff --git a/vendor/gopkg.in/yaml.v3/go.mod b/vendor/gopkg.in/yaml.v3/go.mod
new file mode 100644
index 0000000..f407ea3
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/go.mod
@@ -0,0 +1,5 @@
+module "gopkg.in/yaml.v3"
+
+require (
+ "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
+)
diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go
new file mode 100644
index 0000000..aea9050
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/parserc.go
@@ -0,0 +1,1229 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ token := &parser.tokens[parser.tokens_head]
+ yaml_parser_unfold_comments(parser, token)
+ return token
+ }
+ return nil
+}
+
+// yaml_parser_unfold_comments walks through the comments queue and joins all
+// comments behind the position of the provided token into the respective
+// top-level comment slices in the parser.
+func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) {
+ for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index {
+ comment := &parser.comments[parser.comments_head]
+ if len(comment.head) > 0 {
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ // No heads on ends, so keep comment.head for a follow up token.
+ break
+ }
+ if len(parser.head_comment) > 0 {
+ parser.head_comment = append(parser.head_comment, '\n')
+ }
+ parser.head_comment = append(parser.head_comment, comment.head...)
+ }
+ if len(comment.foot) > 0 {
+ if len(parser.foot_comment) > 0 {
+ parser.foot_comment = append(parser.foot_comment, '\n')
+ }
+ parser.foot_comment = append(parser.foot_comment, comment.foot...)
+ }
+ if len(comment.line) > 0 {
+ if len(parser.line_comment) > 0 {
+ parser.line_comment = append(parser.line_comment, '\n')
+ }
+ parser.line_comment = append(parser.line_comment, comment.line...)
+ }
+ *comment = yaml_comment_t{}
+ parser.comments_head++
+ }
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ var head_comment []byte
+ if len(parser.head_comment) > 0 {
+ // [Go] Scan the header comment backwards, and if an empty line is found, break
+ // the header so the part before the last empty line goes into the
+ // document header, while the bottom of it goes into a follow up event.
+ for i := len(parser.head_comment) - 1; i > 0; i-- {
+ if parser.head_comment[i] == '\n' {
+ if i == len(parser.head_comment)-1 {
+ head_comment = parser.head_comment[:i]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ } else if parser.head_comment[i-1] == '\n' {
+ head_comment = parser.head_comment[:i-1]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ }
+ }
+ }
+ }
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+
+ head_comment: head_comment,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected ", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ if len(event.head_comment) > 0 && len(event.foot_comment) == 0 {
+ event.foot_comment = event.head_comment
+ event.head_comment = nil
+ }
+ return true
+}
+
+func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) {
+ event.head_comment = parser.head_comment
+ event.line_comment = parser.line_comment
+ event.foot_comment = parser.foot_comment
+ parser.head_comment = nil
+ parser.line_comment = nil
+ parser.foot_comment = nil
+ parser.tail_comment = nil
+ parser.stem_comment = nil
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ if parser.stem_comment != nil {
+ event.head_comment = parser.stem_comment
+ parser.stem_comment = nil
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ prior_head := len(parser.head_comment)
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if prior_head > 0 && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ // [Go] It's a sequence under a sequence entry, so the former head comment
+ // is for the list itself, not the first list item under it.
+ parser.stem_comment = parser.head_comment[:prior_head]
+ if len(parser.head_comment) == prior_head {
+ parser.head_comment = nil
+ } else {
+ // Copy suffix to prevent very strange bugs if someone ever appends
+ // further bytes to the prefix in the stem_comment slice above.
+ parser.head_comment = append([]byte(nil), parser.head_comment[prior_head+1:]...)
+ }
+
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // [Go] A tail comment was left from the prior mapping value processed. Emit an event
+ // as it needs to be processed with that value and not the following key.
+ if len(parser.tail_comment) > 0 {
+ *event = yaml_event_t{
+ typ: yaml_TAIL_COMMENT_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ foot_comment: parser.tail_comment,
+ }
+ parser.tail_comment = nil
+ return true
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go
new file mode 100644
index 0000000..b7de0a8
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/readerc.go
@@ -0,0 +1,434 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go
new file mode 100644
index 0000000..64ae888
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/resolve.go
@@ -0,0 +1,326 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, boolTag, []string{"true", "True", "TRUE"}},
+ {false, boolTag, []string{"false", "False", "FALSE"}},
+ {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", mergeTag, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const (
+ nullTag = "!!null"
+ boolTag = "!!bool"
+ strTag = "!!str"
+ intTag = "!!int"
+ floatTag = "!!float"
+ timestampTag = "!!timestamp"
+ seqTag = "!!seq"
+ mapTag = "!!map"
+ binaryTag = "!!binary"
+ mergeTag = "!!merge"
+)
+
+var longTags = make(map[string]string)
+var shortTags = make(map[string]string)
+
+func init() {
+ for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
+ ltag := longTag(stag)
+ longTags[stag] = ltag
+ shortTags[ltag] = stag
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ if strings.HasPrefix(tag, longTagPrefix) {
+ if stag, ok := shortTags[tag]; ok {
+ return stag
+ }
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ if ltag, ok := longTags[tag]; ok {
+ return ltag
+ }
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ tag = shortTag(tag)
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, strTag, binaryTag:
+ return
+ case floatTag:
+ if rtag == intTag {
+ switch v := out.(type) {
+ case int64:
+ rtag = floatTag
+ out = float64(v)
+ return
+ case int:
+ rtag = floatTag
+ out = float64(v)
+ return
+ }
+ }
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != strTag && tag != binaryTag {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == timestampTag {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return timestampTag, t
+ }
+ }
+
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ // Octals as introduced in version 1.2 of the spec.
+ // Octals from the 1.1 spec, spelled as 0777, are still
+ // decoded by default in v3 as well for compatibility.
+ // May be dropped in v4 depending on how usage evolves.
+ if strings.HasPrefix(plain, "0o") {
+ intv, err := strconv.ParseInt(plain[2:], 8, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 8, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0o") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ default:
+ panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ return strTag, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go
new file mode 100644
index 0000000..57e954c
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/scannerc.go
@@ -0,0 +1,3025 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ parser.newlines++
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ parser.newlines++
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.newlines++
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // [Go] The comment parsing logic requires a lookahead of two tokens
+ // so that foot comments may be parsed in time of associating them
+ // with the tokens that are parsed before them, and also for line
+ // comments to be transformed into head comments in some edge cases.
+ if parser.tokens_head < len(parser.tokens)-2 {
+ // If a potential simple key is at the head position, we need to fetch
+ // the next token to disambiguate it.
+ head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+ if !ok {
+ break
+ } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
+ return false
+ } else if !valid {
+ break
+ }
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ scan_mark := parser.mark
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // [Go] While unrolling indents, transform the head comments of prior
+ // indentation levels observed after scan_start into foot comments at
+ // the respective indexes.
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ comment_mark := parser.mark
+ if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') {
+ // Associate any following comments with the prior token.
+ comment_mark = parser.tokens[len(parser.tokens)-1].start_mark
+ }
+ defer func() {
+ if !ok {
+ return
+ }
+ if !yaml_parser_scan_line_comment(parser, comment_mark) {
+ ok = false
+ return
+ }
+ }()
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] TODO Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+ if !simple_key.possible {
+ return false, true
+ }
+
+ // The 1.2 specification says:
+ //
+ // "If the ? indicator is omitted, parsing needs to see past the
+ // implicit key to recognize it as such. To limit the amount of
+ // lookahead required, the “:” indicator must appear at most 1024
+ // Unicode characters beyond the start of the key. In addition, the key
+ // is restricted to a single line."
+ //
+ if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return false, yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ return false, true
+ }
+ return true, true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ }
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
+ }
+ return true
+}
+
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+ possible: false,
+ required: false,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ })
+
+ // Increase the flow level.
+ parser.flow_level++
+ if parser.flow_level > max_flow_level {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+ }
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ last := len(parser.simple_keys) - 1
+ delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+ parser.simple_keys = parser.simple_keys[:last]
+ }
+ return true
+}
+
+// max_indents limits the indents stack size
+const max_indents = 10000
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+ if len(parser.indents) > max_indents {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_indents))
+ }
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ block_mark := scan_mark
+ block_mark.index--
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+
+ // [Go] Reposition the end token before potential following
+ // foot comments of parent blocks. For that, search
+ // backwards for recent comments that were at the same
+ // indent as the block that is ending now.
+ stop_index := block_mark.index
+ for i := len(parser.comments) - 1; i >= 0; i-- {
+ comment := &parser.comments[i]
+
+ if comment.end_mark.index < stop_index {
+ // Don't go back beyond the start of the comment/whitespace scan, unless column < 0.
+ // If requested indent column is < 0, then the document is over and everything else
+ // is a foot anyway.
+ break
+ }
+ if comment.start_mark.column == parser.indent+1 {
+ // This is a good match. But maybe there's a former comment
+ // at that same indent level, so keep searching.
+ block_mark = comment.start_mark
+ }
+
+ // While the end of the former comment matches with
+ // the start of the following one, we know there's
+ // nothing in between and scanning is still safe.
+ stop_index = comment.scan_mark.index
+ }
+
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: block_mark,
+ end_mark: block_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ parser.simple_keys_by_tok = make(map[int]int)
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+ return false
+
+ } else if valid {
+
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+ delete(parser.simple_keys_by_tok, simple_key.token_number)
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ scan_mark := parser.mark
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if we just had a line comment under a sequence entry that
+ // looks more like a header to the following content. Similar to this:
+ //
+ // - # The comment
+ // - Some data
+ //
+ // If so, transform the line comment to a head comment and reposition.
+ if len(parser.comments) > 0 && len(parser.tokens) > 1 {
+ tokenA := parser.tokens[len(parser.tokens)-2]
+ tokenB := parser.tokens[len(parser.tokens)-1]
+ comment := &parser.comments[len(parser.comments)-1]
+ if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) {
+ // If it was in the prior line, reposition so it becomes a
+ // header of the follow up token. Otherwise, keep it in place
+ // so it becomes a header of the former.
+ comment.head = comment.line
+ comment.line = nil
+ if comment.start_mark.line == parser.mark.line-1 {
+ comment.token_mark = parser.mark
+ }
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ if !yaml_parser_scan_comments(parser, scan_mark) {
+ return false
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ // [Go] Discard this inline comment for the time being.
+ //if !yaml_parser_scan_line_comment(parser, start_mark) {
+ // return false
+ //}
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] TODO Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ // TODO Test this and then re-enable it.
+ //if !yaml_parser_scan_line_comment(parser, start_mark) {
+ // return false
+ //}
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab characters that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violates indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
+
+func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool {
+ if parser.newlines > 0 {
+ return true
+ }
+
+ var start_mark yaml_mark_t
+ var text []byte
+
+ for peek := 0; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ if parser.buffer[parser.buffer_pos+peek] == '#' {
+ seen := parser.mark.index+peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else {
+ if parser.mark.index >= seen {
+ if len(text) == 0 {
+ start_mark = parser.mark
+ }
+ text = append(text, parser.buffer[parser.buffer_pos])
+ }
+ skip(parser)
+ }
+ }
+ }
+ break
+ }
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ token_mark: token_mark,
+ start_mark: start_mark,
+ line: text,
+ })
+ }
+ return true
+}
+
+func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool {
+ token := parser.tokens[len(parser.tokens)-1]
+
+ if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 {
+ token = parser.tokens[len(parser.tokens)-2]
+ }
+
+ var token_mark = token.start_mark
+ var start_mark yaml_mark_t
+
+ var recent_empty = false
+ var first_empty = parser.newlines <= 1
+
+ var line = parser.mark.line
+ var column = parser.mark.column
+
+ var text []byte
+
+ // The foot line is the place where a comment must start to
+ // still be considered as a foot of the prior content.
+ // If there's some content in the currently parsed line, then
+ // the foot is the line below it.
+ var foot_line = -1
+ if scan_mark.line > 0 {
+ foot_line = parser.mark.line-parser.newlines+1
+ if parser.newlines == 0 && parser.mark.column > 1 {
+ foot_line++
+ }
+ }
+
+ var peek = 0
+ for ; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ column++
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ c := parser.buffer[parser.buffer_pos+peek]
+ if is_breakz(parser.buffer, parser.buffer_pos+peek) || parser.flow_level > 0 && (c == ']' || c == '}') {
+ // Got line break or terminator.
+ if !recent_empty {
+ if first_empty && (start_mark.line == foot_line || start_mark.column-1 < parser.indent) {
+ // This is the first empty line and there were no empty lines before,
+ // so this initial part of the comment is a foot of the prior token
+ // instead of being a head for the following one. Split it up.
+ if len(text) > 0 {
+ if start_mark.column-1 < parser.indent {
+ // If dedented it's unrelated to the prior token.
+ token_mark = start_mark
+ }
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+ } else {
+ if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 {
+ text = append(text, '\n')
+ }
+ }
+ }
+ if !is_break(parser.buffer, parser.buffer_pos+peek) {
+ break
+ }
+ first_empty = false
+ recent_empty = true
+ column = 0
+ line++
+ continue
+ }
+
+ if len(text) > 0 && column < parser.indent+1 && column != start_mark.column {
+ // The comment at the different indentation is a foot of the
+ // preceding data rather than a head of the upcoming one.
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+
+ if parser.buffer[parser.buffer_pos+peek] != '#' {
+ break
+ }
+
+ if len(text) == 0 {
+ start_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ } else {
+ text = append(text, '\n')
+ }
+
+ recent_empty = false
+
+ // Consume until after the consumed comment line.
+ seen := parser.mark.index+peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else {
+ if parser.mark.index >= seen {
+ text = append(text, parser.buffer[parser.buffer_pos])
+ }
+ skip(parser)
+ }
+ }
+
+ peek = 0
+ column = 0
+ line = parser.mark.line
+ }
+
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: start_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column},
+ head: text,
+ })
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go
new file mode 100644
index 0000000..9210ece
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/sorter.go
@@ -0,0 +1,134 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ digits := false
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ digits = unicode.IsDigit(ar[i])
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ if digits {
+ return al
+ } else {
+ return bl
+ }
+ }
+ var ai, bi int
+ var an, bn int64
+ if ar[i] == '0' || br[i] == '0' {
+ for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ if ar[j] != '0' {
+ an = 1
+ bn = 1
+ break
+ }
+ }
+ }
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go
new file mode 100644
index 0000000..b8a116b
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/writerc.go
@@ -0,0 +1,48 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go
new file mode 100644
index 0000000..b5d35a5
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yaml.go
@@ -0,0 +1,662 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+ "unicode/utf8"
+)
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document.
+type Unmarshaler interface {
+ UnmarshalYAML(value *Node) error
+}
+
+type obsoleteUnmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// A Decorder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ parser *parser
+ knownFields bool
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// KnownFields ensures that the keys in decoded mappings to
+// exist as fields in the struct being decoded into.
+func (dec *Decoder) KnownFields(enable bool) {
+ dec.knownFields = enable
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ d.knownFields = dec.knownFields
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Decode decodes the node and stores its data into the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (n *Node) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ defer handleErr(&err)
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(n, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder()
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[][,[,]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be included if that method returns true.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// SetIndent changes the used indentation used when encoding.
+func (e *Encoder) SetIndent(spaces int) {
+ if spaces < 0 {
+ panic("yaml: cannot indent to a negative number of spaces")
+ }
+ e.encoder.indent = spaces
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+type Kind uint32
+
+const (
+ DocumentNode Kind = 1 << iota
+ SequenceNode
+ MappingNode
+ ScalarNode
+ AliasNode
+)
+
+type Style uint32
+
+const (
+ TaggedStyle Style = 1 << iota
+ DoubleQuotedStyle
+ SingleQuotedStyle
+ LiteralStyle
+ FoldedStyle
+ FlowStyle
+)
+
+// Node represents an element in the YAML document hierarchy. While documents
+// are typically encoded and decoded into higher level types, such as structs
+// and maps, Node is an intermediate representation that allows detailed
+// control over the content being decoded or encoded.
+//
+// Values that make use of the Node type interact with the yaml package in the
+// same way any other type would do, by encoding and decoding yaml data
+// directly or indirectly into them.
+//
+// For example:
+//
+// var person struct {
+// Name string
+// Address yaml.Node
+// }
+// err := yaml.Unmarshal(data, &person)
+//
+// Or by itself:
+//
+// var person Node
+// err := yaml.Unmarshal(data, &person)
+//
+type Node struct {
+ // Kind defines whether the node is a document, a mapping, a sequence,
+ // a scalar value, or an alias to another node. The specific data type of
+ // scalar nodes may be obtained via the ShortTag and LongTag methods.
+ Kind Kind
+
+ // Style allows customizing the apperance of the node in the tree.
+ Style Style
+
+ // Tag holds the YAML tag defining the data type for the value.
+ // When decoding, this field will always be set to the resolved tag,
+ // even when it wasn't explicitly provided in the YAML content.
+ // When encoding, if this field is unset the value type will be
+ // implied from the node properties, and if it is set, it will only
+ // be serialized into the representation if TaggedStyle is used or
+ // the implicit tag diverges from the provided one.
+ Tag string
+
+ // Value holds the unescaped and unquoted represenation of the value.
+ Value string
+
+ // Anchor holds the anchor name for this node, which allows aliases to point to it.
+ Anchor string
+
+ // Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
+ Alias *Node
+
+ // Content holds contained nodes for documents, mappings, and sequences.
+ Content []*Node
+
+ // HeadComment holds any comments in the lines preceding the node and
+ // not separated by an empty line.
+ HeadComment string
+
+ // LineComment holds any comments at the end of the line where the node is in.
+ LineComment string
+
+ // FootComment holds any comments following the node and before empty lines.
+ FootComment string
+
+ // Line and Column hold the node position in the decoded YAML text.
+ // These fields are not respected when encoding the node.
+ Line int
+ Column int
+}
+
+// LongTag returns the long form of the tag that indicates the data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) LongTag() string {
+ return longTag(n.ShortTag())
+}
+
+// ShortTag returns the short form of the YAML tag that indicates data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) ShortTag() string {
+ if n.indicatedString() {
+ return strTag
+ }
+ if n.Tag == "" || n.Tag == "!" {
+ switch n.Kind {
+ case MappingNode:
+ return mapTag
+ case SequenceNode:
+ return seqTag
+ case AliasNode:
+ if n.Alias != nil {
+ return n.Alias.ShortTag()
+ }
+ case ScalarNode:
+ tag, _ := resolve("", n.Value)
+ return tag
+ }
+ return ""
+ }
+ return shortTag(n.Tag)
+}
+
+func (n *Node) indicatedString() bool {
+ return n.Kind == ScalarNode &&
+ (shortTag(n.Tag) == strTag ||
+ (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
+}
+
+// SetString is a convenience function that sets the node to a string value
+// and defines its style in a pleasant way depending on its content.
+func (n *Node) SetString(s string) {
+ n.Kind = ScalarNode
+ if utf8.ValidString(s) {
+ n.Value = s
+ n.Tag = strTag
+ } else {
+ n.Value = encodeBase64(s)
+ n.Tag = binaryTag
+ }
+ if strings.Contains(n.Value, "\n") {
+ n.Style = LiteralStyle
+ }
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+
+ // InlineUnmarshalers holds indexes to inlined fields that
+ // contain unmarshaler values.
+ InlineUnmarshalers [][]int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+var unmarshalerType reflect.Type
+
+func init() {
+ var v Unmarshaler
+ unmarshalerType = reflect.ValueOf(&v).Elem().Type()
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ inlineUnmarshalers := [][]int(nil)
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct, reflect.Ptr:
+ ftype := field.Type
+ for ftype.Kind() == reflect.Ptr {
+ ftype = ftype.Elem()
+ }
+ if ftype.Kind() != reflect.Struct {
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ if reflect.PtrTo(ftype).Implements(unmarshalerType) {
+ inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
+ } else {
+ sinfo, err := getStructInfo(ftype)
+ if err != nil {
+ return nil, err
+ }
+ for _, index := range sinfo.InlineUnmarshalers {
+ inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ }
+ default:
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ InlineUnmarshalers: inlineUnmarshalers,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go
new file mode 100644
index 0000000..2719cfb
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yamlh.go
@@ -0,0 +1,805 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "fmt"
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
+
+ yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return ""
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+ yaml_TAIL_COMMENT_EVENT
+)
+
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+ yaml_TAIL_COMMENT_EVENT: "tail comment",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return ""
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ newlines int // The number of line breaks since last non-break/non-blank character
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Comments
+
+ head_comment []byte // The current head comments
+ line_comment []byte // The current line comments
+ foot_comment []byte // The current foot comments
+ tail_comment []byte // Foot comment that happens at the end of a block.
+ stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
+
+ comments []yaml_comment_t // The folded comments for all parsed tokens
+ comments_head int
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+ simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+type yaml_comment_t struct {
+
+ scan_mark yaml_mark_t // Position where scanning for comments started
+ token_mark yaml_mark_t // Position after which tokens will be associated with this comment
+ start_mark yaml_mark_t // Position of '#' comment mark
+ end_mark yaml_mark_t // Position where comment terminated
+
+ head []byte
+ line []byte
+ foot []byte
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_writer io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ space_above bool // Is there's an empty line above?
+ foot_indent int // The indent used to write the foot comment above, or -1 if none.
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go
new file mode 100644
index 0000000..e88f9c5
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go
@@ -0,0 +1,198 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return (
+ // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return (
+ // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return (
+ // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
new file mode 100644
index 0000000..02d18fe
--- /dev/null
+++ b/vendor/modules.txt
@@ -0,0 +1,57 @@
+# github.com/davecgh/go-spew v1.1.1
+github.com/davecgh/go-spew/spew
+# github.com/felixge/httpsnoop v1.0.1
+github.com/felixge/httpsnoop
+# github.com/getkin/kin-openapi v0.79.0
+## explicit
+github.com/getkin/kin-openapi/jsoninfo
+github.com/getkin/kin-openapi/openapi2
+github.com/getkin/kin-openapi/openapi3
+# github.com/ghodss/yaml v1.0.0
+## explicit
+github.com/ghodss/yaml
+# github.com/go-openapi/jsonpointer v0.19.5
+github.com/go-openapi/jsonpointer
+# github.com/go-openapi/swag v0.19.5
+github.com/go-openapi/swag
+# github.com/gorilla/handlers v1.5.1
+## explicit
+github.com/gorilla/handlers
+# github.com/gorilla/mux v1.8.0
+## explicit
+github.com/gorilla/mux
+# github.com/inconshreveable/mousetrap v1.0.0
+github.com/inconshreveable/mousetrap
+# github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e
+github.com/mailru/easyjson/buffer
+github.com/mailru/easyjson/jlexer
+github.com/mailru/easyjson/jwriter
+# github.com/pmezard/go-difflib v1.0.0
+github.com/pmezard/go-difflib/difflib
+# github.com/radovskyb/watcher v1.0.7
+## explicit
+github.com/radovskyb/watcher
+# github.com/spf13/cobra v1.0.0
+## explicit
+github.com/spf13/cobra
+# github.com/spf13/pflag v1.0.5
+## explicit
+github.com/spf13/pflag
+# github.com/stretchr/testify v1.7.0
+## explicit
+github.com/stretchr/testify/assert
+# github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb
+## explicit
+github.com/xeipuuv/gojsonpointer
+# github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415
+github.com/xeipuuv/gojsonreference
+# github.com/xeipuuv/gojsonschema v1.2.0
+## explicit
+github.com/xeipuuv/gojsonschema
+# gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15
+## explicit
+# gopkg.in/yaml.v2 v2.4.0
+## explicit
+gopkg.in/yaml.v2
+# gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c
+gopkg.in/yaml.v3