diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 000000000..2940874c2 --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,110 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:09a551e015616c8d6f8c0b21821d220a1c665eff2b77531251c7c3ea1dd0e0f6" + name = "github.com/aws/aws-sdk-go" + packages = [ + "aws", + "aws/awserr", + "aws/awsutil", + "aws/client", + "aws/client/metadata", + "aws/corehandlers", + "aws/credentials", + "aws/credentials/ec2rolecreds", + "aws/credentials/endpointcreds", + "aws/credentials/processcreds", + "aws/credentials/stscreds", + "aws/csm", + "aws/defaults", + "aws/ec2metadata", + "aws/endpoints", + "aws/request", + "aws/session", + "aws/signer/v4", + "internal/ini", + "internal/sdkio", + "internal/sdkrand", + "internal/sdkuri", + "internal/shareddefaults", + "private/protocol", + "private/protocol/query", + "private/protocol/query/queryutil", + "private/protocol/rest", + "private/protocol/xml/xmlutil", + "service/ses", + "service/sts", + ] + pruneopts = "UT" + revision = "bae168494294b48b3fc9d6722a711246a8543034" + version = "v1.19.2" + +[[projects]] + digest = "1:ca59b1175189b3f0e9f1793d2c350114be36eaabbe5b9f554b35edee1de50aea" + name = "github.com/gorilla/mux" + packages = ["."] + pruneopts = "UT" + revision = "a7962380ca08b5a188038c69871b8d3fbdf31e89" + version = "v1.7.0" + +[[projects]] + digest = "1:bb81097a5b62634f3e9fec1014657855610c82d19b9a40c17612e32651e35dca" + name = "github.com/jmespath/go-jmespath" + packages = ["."] + pruneopts = "UT" + revision = "c2b33e84" + +[[projects]] + branch = "master" + digest = "1:f843fd15ea011f9d4e8bafa4c0d01c00f307b57da558e0a3b8e6dc8148aa26aa" + name = "github.com/tidepool-org/go-common" + packages = [ + ".", + "clients", + "clients/disc", + "clients/hakken", + "clients/highwater", + "clients/mongo", + "clients/shoreline", + "clients/status", + "errors", + "jepson", + ] + pruneopts = "UT" + revision = "28539d5d0ecdf8749c3f4c3197969b9a5b84331b" + +[[projects]] + branch = "(default)" + digest = "1:d5534b5d370b8f83a981a454c3e21e2d0be531e9927fcc65e3ef979c75a55e06" + name = "labix.org/v2/mgo" + packages = [ + ".", + "bson", + "sasl", + ] + pruneopts = "UT" + revision = "gustavo@niemeyer.net-20140701140051-90be2tvk93pcczzi" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/aws/aws-sdk-go/aws", + "github.com/aws/aws-sdk-go/aws/awserr", + "github.com/aws/aws-sdk-go/aws/session", + "github.com/aws/aws-sdk-go/service/ses", + "github.com/gorilla/mux", + "github.com/tidepool-org/go-common", + "github.com/tidepool-org/go-common/clients", + "github.com/tidepool-org/go-common/clients/disc", + "github.com/tidepool-org/go-common/clients/hakken", + "github.com/tidepool-org/go-common/clients/highwater", + "github.com/tidepool-org/go-common/clients/mongo", + "github.com/tidepool-org/go-common/clients/shoreline", + "github.com/tidepool-org/go-common/clients/status", + "labix.org/v2/mgo", + "labix.org/v2/mgo/bson", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 000000000..c415cd115 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,10 @@ +[[constraint]] + # Required: the root import path of the project being constrained. + name = "github.com/tidepool-org/go-common" + # Recommended: the version constraint to enforce for the project. + # Note that only one of "branch", "version" or "revision" can be specified. + branch = "master" + +[prune] + go-tests = true + unused-packages = true diff --git a/clients/sesNotifier.go b/clients/sesNotifier.go index 5c678e36e..dfe064a74 100644 --- a/clients/sesNotifier.go +++ b/clients/sesNotifier.go @@ -1,76 +1,107 @@ package clients import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "fmt" - "io/ioutil" "log" - "net/http" - "net/url" - "strings" - "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ses" +) + +const ( + // CharSet The character encoding for the email. + CharSet = "UTF-8" + + // DefaultTextMessage will be sent to non-HTML email clients that receive our messages + DefaultTextMessage = "You need an HTML client to read this email." ) type ( + // SesNotifier contains all information needed to send Amazon SES messages SesNotifier struct { Config *SesNotifierConfig + SES *ses.SES } + + // SesNotifierConfig contains the static configuration for the Amazon SES service + // Credentials come from the environment and are not passed in via configuration variables. SesNotifierConfig struct { - EndPoint string `json:"serverEndpoint"` - From string `json:"fromAddress"` - SecretKey string `json:"secretKey"` - AccessKey string `json:"accessKey"` + From string `json:"fromAddress"` + Region string `json:"region"` } ) -func NewSesNotifier(cfg *SesNotifierConfig) *SesNotifier { +//NewSesNotifier creates a new Amazon SES notifier +func NewSesNotifier(cfg *SesNotifierConfig) (*SesNotifier, error) { + sess, err := session.NewSession(&aws.Config{ + Region: aws.String(cfg.Region)}, + ) + + if err != nil { + return nil, err + } + return &SesNotifier{ Config: cfg, - } + SES: ses.New(sess), + }, nil } +// Send a message to a list of recipients with a given subject func (c *SesNotifier) Send(to []string, subject string, msg string) (int, string) { + var toAwsAddress = make([]*string, len(to)) + for i, x := range to { + toAwsAddress[i] = aws.String(x) + } - data := make(url.Values) - data.Add("Action", "SendEmail") - data.Add("Source", c.Config.From) - data.Add("Destination.ToAddresses.member.1", strings.Join(to, ", ")) - data.Add("Message.Subject.Data", subject) - data.Add("Message.Body.Html.Data", msg) - data.Add("AWSAccessKeyId", c.Config.AccessKey) - - return c.sesPost(data) -} - -func (c *SesNotifier) generateAuthHeader(date string) string { - h := hmac.New(sha256.New, []uint8(c.Config.SecretKey)) - h.Write([]uint8(date)) - signature := base64.StdEncoding.EncodeToString(h.Sum(nil)) - return fmt.Sprintf("AWS3-HTTPS AWSAccessKeyId=%s, Algorithm=HmacSHA256, Signature=%s", c.Config.AccessKey, signature) -} - -func (c *SesNotifier) sesPost(data url.Values) (int, string) { - body := strings.NewReader(data.Encode()) - req, err := http.NewRequest("POST", c.Config.EndPoint, body) - if err != nil { - return http.StatusInternalServerError, err.Error() + input := &ses.SendEmailInput{ + Destination: &ses.Destination{ + CcAddresses: []*string{}, + ToAddresses: toAwsAddress, + }, + Message: &ses.Message{ + Body: &ses.Body{ + Html: &ses.Content{ + Charset: aws.String(CharSet), + Data: aws.String(msg), + }, + Text: &ses.Content{ + Charset: aws.String(CharSet), + Data: aws.String(DefaultTextMessage), + }, + }, + Subject: &ses.Content{ + Charset: aws.String(CharSet), + Data: aws.String(subject), + }, + }, + Source: aws.String(c.Config.From), } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - date := time.Now().UTC().Format("Mon, 02 Jan 2006 15:04:05 -0700") - req.Header.Set("Date", date) - req.Header.Set("X-Amzn-Authorization", c.generateAuthHeader(date)) + // Attempt to send the email. + result, err := c.SES.SendEmail(input) - r, err := http.DefaultClient.Do(req) + // Display error messages if they occur. if err != nil { - log.Printf("http error: %s", err) - return http.StatusInternalServerError, err.Error() - } + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case ses.ErrCodeMessageRejected: + log.Printf("%v: %v\n", ses.ErrCodeMessageRejected, aerr.Error()) + case ses.ErrCodeMailFromDomainNotVerifiedException: + log.Printf("%v: %v\n", ses.ErrCodeMailFromDomainNotVerifiedException, aerr.Error()) + case ses.ErrCodeConfigurationSetDoesNotExistException: + log.Printf("%v: %v\n", ses.ErrCodeConfigurationSetDoesNotExistException, aerr.Error()) + default: + log.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + log.Println(err.Error()) + } - resultbody, _ := ioutil.ReadAll(r.Body) - r.Body.Close() - - return r.StatusCode, string(resultbody) + return 400, result.String() + } + return 200, result.String() } diff --git a/hydrophone.go b/hydrophone.go index 9bb9b523c..b74d80433 100644 --- a/hydrophone.go +++ b/hydrophone.go @@ -23,6 +23,7 @@ import ( ) type ( + // Config is the configuration for the service Config struct { clients.Config Service disc.ServiceListing `json:"service"` @@ -39,15 +40,13 @@ func main() { log.Panic("Problem loading config ", err) } - // ses secrets may be passed via a separate env variable to accomodate easy secrets injection via Kubernetes - accessSecret, found := os.LookupEnv("SES_ACCESS_KEY") + region, found := os.LookupEnv("REGION") if found { - config.Mail.AccessKey = accessSecret + config.Mail.Region = region } - secretKey, found := os.LookupEnv("SES_SECRET_KEY") - if found { - config.Mail.SecretKey = secretKey + if config.Mail.Region == "" { + config.Mail.Region = "us-west-2" } // server secret may be passed via a separate env variable to accomodate easy secrets injection via Kubernetes @@ -111,7 +110,11 @@ func main() { * hydrophone setup */ store := sc.NewMongoStoreClient(&config.Mongo) - mail := sc.NewSesNotifier(&config.Mail) + mail, err := sc.NewSesNotifier(&config.Mail) + + if err != nil { + log.Fatal(err) + } emailTemplates, err := templates.New() if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 000000000..899129ecc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 000000000..56fdfc2bf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,145 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 000000000..0202a008f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,194 @@ +package awserr + +import "fmt" + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 000000000..1a3d106d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,108 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 000000000..59fa4a558 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 000000000..11c52c389 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,222 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 000000000..710eb432f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,113 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 000000000..645df2450 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,88 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + for i := 0; i < v.Type().NumField(); i++ { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + continue // ignore unexported fields + } + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { + continue // ignore unset fields + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(ft.Name + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) + } + + buf.WriteString(",\n") + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 000000000..709605384 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,96 @@ +package client + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint string + SigningRegion string + SigningName string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overridden based on metadata the + // service has. + SigningNameDerived bool +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers.Copy(), + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 000000000..a397b0d04 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,116 @@ +package client + +import ( + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// client.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() int { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + // Set the upper limit of delay in retrying at ~five minutes + minTime := 30 + throttle := d.shouldThrottle(r) + if throttle { + if delay, ok := getRetryDelay(r); ok { + return delay + } + + minTime = 500 + } + + retryCount := r.RetryCount + if throttle && retryCount > 8 { + retryCount = 8 + } else if retryCount > 13 { + retryCount = 13 + } + + delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + + if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { + return true + } + return r.IsErrorRetryable() || d.shouldThrottle(r) +} + +// ShouldThrottle returns true if the request should be throttled. +func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 502: + case 503: + case 504: + default: + return r.IsErrorThrottle() + } + + return true +} + +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true +} + +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 000000000..7b5e1276a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,190 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.ResetBody() + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + +func logResponse(r *request.Request) { + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + + if r.HTTPResponse == nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + } + + handlerFn := func(req *request.Request) { + b, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 000000000..920e9fddf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,13 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + ServiceID string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 000000000..10634d173 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,536 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig structure. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // Note: You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS + // Regions and Endpoints. + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the client.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // Note: This configuration option is specific to the Amazon S3 service. + // + // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // for Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disble 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDiableTimeoutOverride(true))) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Instructs the endpoint to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requets. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess := session.Must(session.NewSession()) + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. + SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool + + // EnableEndpointDiscovery will allow for endpoint discovery on operations that + // have the definition in its model. By default, endpoint discovery is off. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // EnableEndpointDiscovery: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("/foo/bar/moo"), + // }) + EnableEndpointDiscovery *bool + + // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing + // request endpoint hosts with modeled information. + // + // Disabling this feature is useful when you want to use local endpoints + // for testing that do not support the modeled host prefix pattern. + DisableEndpointHostPrefix *bool +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// WithEndpointDiscovery will set whether or not to use endpoint discovery. +func (c *Config) WithEndpointDiscovery(t bool) *Config { + c.EnableEndpointDiscovery = &t + return c +} + +// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix +// when making requests. +func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { + c.DisableEndpointHostPrefix = &t + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } + + if other.EnableEndpointDiscovery != nil { + dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery + } + + if other.DisableEndpointHostPrefix != nil { + dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go new file mode 100644 index 000000000..2866f9a7f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go @@ -0,0 +1,37 @@ +// +build !go1.9 + +package aws + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go new file mode 100644 index 000000000..3718b26e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go @@ -0,0 +1,11 @@ +// +build go1.9 + +package aws + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go new file mode 100644 index 000000000..66c5945db --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -0,0 +1,56 @@ +// +build !go1.7 + +package aws + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case backgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +var ( + backgroundCtx = new(emptyCtx) +) + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return backgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go new file mode 100644 index 000000000..9c29f29af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package aws + +import "context" + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go new file mode 100644 index 000000000..304fd1561 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go @@ -0,0 +1,24 @@ +package aws + +import ( + "time" +) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 000000000..ff5d58e06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,387 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 000000000..f8853d78a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,228 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be added to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 5 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(5 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects + } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 000000000..7d50b1557 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 000000000..ab69c7a6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec-env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 000000000..3ad1e798d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true. + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// via the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 000000000..894bbc7f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,292 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := credentials.NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := credentials.NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws/awserr" + "sync" + "time" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) +// // Access public S3 buckets. +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// An Expirer is an interface that Providers can implement to expose the expiration +// time, if known. If the Provider cannot accurately provide this info, +// it should not implement this interface. +type Expirer interface { + // The time at which the credentials are no longer valid + ExpiresAt() time.Time +} + +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now + } + return e.expiration.Before(curTime()) +} + +// ExpiresAt returns the expiration time of the credential +func (e *Expiry) ExpiresAt() time.Time { + return e.expiration +} + +// A Credentials provides concurrency safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + + m sync.RWMutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + // Check the cached credentials first with just the read lock. + c.m.RLock() + if !c.isExpired() { + creds := c.creds + c.m.RUnlock() + return creds, nil + } + c.m.RUnlock() + + // Credentials are expired need to retrieve the credentials taking the full + // lock. + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.RLock() + defer c.m.RUnlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} + +// ExpiresAt provides access to the functionality of the Expirer interface of +// the underlying Provider, if it supports that interface. Otherwise, it returns +// an error. +func (c *Credentials) ExpiresAt() (time.Time, error) { + c.m.RLock() + defer c.m.RUnlock() + + expirer, ok := c.provider.(Expirer) + if !ok { + return time.Time{}, awserr.New("ProviderNotExpirer", + fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName), + nil) + } + if c.forceRefresh { + // set expiration time to the distant past + return time.Time{}, nil + } + return expirer.ExpiresAt(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 000000000..0ed791be6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,178 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New("SerializationError", + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 000000000..ace513138 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,198 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a Credentials wrapper for retrieving credentials +// from an arbitrary endpoint concurrently. The client will request the +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + resp, err := p.getCredentials() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials() (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.HTTPRequest.Header.Set("Accept", "application/json") + if authToken := p.AuthorizationToken; len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 000000000..54c5cf733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,74 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 000000000..7fc91d9d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go new file mode 100644 index 000000000..1980c8c14 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -0,0 +1,425 @@ +/* +Package processcreds is a credential Provider to retrieve `credential_process` +credentials. + +WARNING: The following describes a method of sourcing credentials from an external +process. This can potentially be dangerous, so proceed with caution. Other +credential providers should be preferred if at all possible. If using this +option, you should make sure that the config file is as locked down as possible +using security best practices for your operating system. + +You can use credentials from a `credential_process` in a variety of ways. + +One way is to setup your shared config file, located in the default +location, with the `credential_process` key and the command you want to be +called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. + + [default] + credential_process = /command/to/call + +Creating a new session will use the credential process to retrieve credentials. +NOTE: If there are credentials in the profile you are using, the credential +process will not be used. + + // Initialize a session to load credentials. + sess, _ := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1")}, + ) + + // Create S3 service client to use the credentials. + svc := s3.New(sess) + +Another way to use the `credential_process` method is by using +`credentials.NewCredentials()` and providing a command to be executed to +retrieve credentials: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentials("/path/to/command") + + // Create service client value configured for credentials. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +You can set a non-default timeout for the `credential_process` with another +constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To +set a one minute timeout: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentialsTimeout( + "/path/to/command", + time.Duration(500) * time.Millisecond) + +If you need more control, you can set any configurable options in the +credentials using one or more option functions. For example, you can set a two +minute timeout, a credential duration of 60 minutes, and a maximum stdout +buffer size of 2k. + + creds := processcreds.NewCredentials( + "/path/to/command", + func(opt *ProcessProvider) { + opt.Timeout = time.Duration(2) * time.Minute + opt.Duration = time.Duration(60) * time.Minute + opt.MaxBufSize = 2048 + }) + +You can also use your own `exec.Cmd`: + + // Create an exec.Cmd + myCommand := exec.Command("/path/to/command") + + // Create credentials using your exec.Cmd and custom timeout + creds := processcreds.NewCredentialsCommand( + myCommand, + func(opt *processcreds.ProcessProvider) { + opt.Timeout = time.Duration(1) * time.Second + }) +*/ +package processcreds + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // ErrCodeProcessProviderParse error parsing process output + ErrCodeProcessProviderParse = "ProcessProviderParseError" + + // ErrCodeProcessProviderVersion version error in output + ErrCodeProcessProviderVersion = "ProcessProviderVersionError" + + // ErrCodeProcessProviderRequired required attribute missing in output + ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" + + // ErrCodeProcessProviderExecution execution of command failed + ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" + + // errMsgProcessProviderTimeout process took longer than allowed + errMsgProcessProviderTimeout = "credential process timed out" + + // errMsgProcessProviderProcess process error + errMsgProcessProviderProcess = "error in credential_process" + + // errMsgProcessProviderParse problem parsing output + errMsgProcessProviderParse = "parse failed of credential_process output" + + // errMsgProcessProviderVersion version error in output + errMsgProcessProviderVersion = "wrong version in process output (not 1)" + + // errMsgProcessProviderMissKey missing access key id in output + errMsgProcessProviderMissKey = "missing AccessKeyId in process output" + + // errMsgProcessProviderMissSecret missing secret acess key in output + errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" + + // errMsgProcessProviderPrepareCmd prepare of command failed + errMsgProcessProviderPrepareCmd = "failed to prepare command" + + // errMsgProcessProviderEmptyCmd command must not be empty + errMsgProcessProviderEmptyCmd = "command must not be empty" + + // errMsgProcessProviderPipe failed to initialize pipe + errMsgProcessProviderPipe = "failed to initialize pipe" + + // DefaultDuration is the default amount of time in minutes that the + // credentials will be valid for. + DefaultDuration = time.Duration(15) * time.Minute + + // DefaultBufSize limits buffer size from growing to an enormous + // amount due to a faulty process. + DefaultBufSize = 1024 + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProcessProvider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type ProcessProvider struct { + staticCreds bool + credentials.Expiry + originalCommand []string + + // Expiry duration of the credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // A string representing an os command that should return a JSON with + // credential information. + command *exec.Cmd + + // MaxBufSize limits memory usage from growing to an enormous + // amount due to a faulty process. + MaxBufSize int + + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// ProcessProvider. The credentials will expire every 15 minutes by default. +func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: exec.Command(command), + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsTimeout returns a pointer to a new Credentials object with +// the specified command and timeout, and default duration and max buffer size. +func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { + p := NewCredentials(command, func(opt *ProcessProvider) { + opt.Timeout = timeout + }) + + return p +} + +// NewCredentialsCommand returns a pointer to a new Credentials object with +// the specified command, and default timeout, duration and max buffer size. +func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: command, + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +type credentialProcessResponse struct { + Version int + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + SessionToken string + Expiration *time.Time +} + +// Retrieve executes the 'credential_process' and returns the credentials. +func (p *ProcessProvider) Retrieve() (credentials.Value, error) { + out, err := p.executeCredentialProcess() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // Serialize and validate response + resp := &credentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderParse, + fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), + err) + } + + if resp.Version != 1 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderVersion, + errMsgProcessProviderVersion, + nil) + } + + if len(resp.AccessKeyID) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissKey, + nil) + } + + if len(resp.SecretAccessKey) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissSecret, + nil) + } + + // Handle expiration + p.staticCreds = resp.Expiration == nil + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } + + return credentials.Value{ + ProviderName: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + }, nil +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *ProcessProvider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// prepareCommand prepares the command to be executed. +func (p *ProcessProvider) prepareCommand() error { + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(p.originalCommand) == 0 { + p.originalCommand = make([]string, len(p.command.Args)) + copy(p.originalCommand, p.command.Args) + + // check for empty command because it succeeds + if len(strings.TrimSpace(p.originalCommand[0])) < 1 { + return awserr.New( + ErrCodeProcessProviderExecution, + fmt.Sprintf( + "%s: %s", + errMsgProcessProviderPrepareCmd, + errMsgProcessProviderEmptyCmd), + nil) + } + } + + cmdArgs = append(cmdArgs, p.originalCommand...) + p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) + p.command.Env = os.Environ() + + return nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { + + if err := p.prepareCommand(); err != nil { + return nil, err + } + + // Setup the pipes + outReadPipe, outWritePipe, err := os.Pipe() + if err != nil { + return nil, awserr.New( + ErrCodeProcessProviderExecution, + errMsgProcessProviderPipe, + err) + } + + p.command.Stderr = os.Stderr // display stderr on console for MFA + p.command.Stdout = outWritePipe // get creds json on process's stdout + p.command.Stdin = os.Stdin // enable stdin for MFA + + output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) + + stdoutCh := make(chan error, 1) + go readInput( + io.LimitReader(outReadPipe, int64(p.MaxBufSize)), + output, + stdoutCh) + + execCh := make(chan error, 1) + go executeCommand(*p.command, execCh) + + finished := false + var errors []error + for !finished { + select { + case readError := <-stdoutCh: + errors = appendError(errors, readError) + finished = true + case execError := <-execCh: + err := outWritePipe.Close() + errors = appendError(errors, err) + errors = appendError(errors, execError) + if errors != nil { + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderProcess, + errors) + } + case <-time.After(p.Timeout): + finished = true + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderTimeout, + errors) // errors can be nil + } + } + + out := output.Bytes() + + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) + } + + return out, nil +} + +// appendError conveniently checks for nil before appending slice +func appendError(errors []error, err error) []error { + if err != nil { + return append(errors, err) + } + return errors +} + +func executeCommand(cmd exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} + +func readInput(r io.Reader, w io.Writer, read chan error) { + tee := io.TeeReader(r, w) + + _, err := ioutil.ReadAll(tee) + + if err == io.EOF { + err = nil + } + + read <- err // will only arrive here when write end of pipe is closed +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 000000000..e15514958 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,150 @@ +package credentials + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/ini" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.OpenFile(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + + iniProfile, ok := config.GetSection(profile) + if !ok { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) + } + + id := iniProfile.String("aws_access_key_id") + if len(id) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + nil) + } + + secret := iniProfile.String("aws_secret_access_key") + if len(secret) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.String("aws_session_token") + + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if len(p.Filename) != 0 { + return p.Filename, nil + } + + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil + } + + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 000000000..531139e39 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,55 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 000000000..0d1b55047 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,299 @@ +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +*/ +package stscreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" +) + +// StdinTokenProvider will prompt on stderr and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. + TokenCode *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } + } + + roleOutput, err := p.Client.AssumeRole(input) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 000000000..152d785b3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,46 @@ +// Package csm provides Client Side Monitoring (CSM) which enables sending metrics +// via UDP connection. Using the Start function will enable the reporting of +// metrics on a given port. If Start is called, with different parameters, again, +// a panic will occur. +// +// Pause can be called to pause any metrics publishing on a given port. Sessions +// that have had their handlers modified via InjectHandlers may still be used. +// However, the handlers will act as a no-op meaning no metrics will be published. +// +// Example: +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// r.InjectHandlers(&sess.Handlers) +// +// client := s3.New(sess) +// resp, err := client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +// +// Start returns a Reporter that is used to enable or disable monitoring. If +// access to the Reporter is required later, calling Get will return the Reporter +// singleton. +// +// Example: +// r := csm.Get() +// r.Continue() +package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 000000000..2f0c6eac9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,67 @@ +package csm + +import ( + "fmt" + "sync" +) + +var ( + lock sync.Mutex +) + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// Start will start the a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// Example: +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 000000000..5bacc791a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,109 @@ +package csm + +import ( + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` + FinalAWSException *string `json:"FinalAwsException,omitempty"` + FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` + FinalSDKException *string `json:"FinalSdkException,omitempty"` + FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` +} + +func (m *metric) TruncateFields() { + m.ClientID = truncateString(m.ClientID, 255) + m.UserAgent = truncateString(m.UserAgent, 256) + + m.AWSException = truncateString(m.AWSException, 128) + m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) + + m.SDKException = truncateString(m.SDKException, 128) + m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) + + m.FinalAWSException = truncateString(m.FinalAWSException, 128) + m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) + + m.FinalSDKException = truncateString(m.FinalSDKException, 128) + m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) +} + +func truncateString(v *string, l int) *string { + if v != nil && len(*v) > l { + nv := (*v)[:l] + return &nv + } + + return v +} + +func (m *metric) SetException(e metricException) { + switch te := e.(type) { + case awsException: + m.AWSException = aws.String(te.exception) + m.AWSExceptionMessage = aws.String(te.message) + case sdkException: + m.SDKException = aws.String(te.exception) + m.SDKExceptionMessage = aws.String(te.message) + } +} + +func (m *metric) SetFinalException(e metricException) { + switch te := e.(type) { + case awsException: + m.FinalAWSException = aws.String(te.exception) + m.FinalAWSExceptionMessage = aws.String(te.message) + case sdkException: + m.FinalSDKException = aws.String(te.exception) + m.FinalSDKExceptionMessage = aws.String(te.message) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 000000000..514fc3739 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,54 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(&ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(&ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(&ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go new file mode 100644 index 000000000..54a99280c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go @@ -0,0 +1,26 @@ +package csm + +type metricException interface { + Exception() string + Message() string +} + +type requestException struct { + exception string + message string +} + +func (e requestException) Exception() string { + return e.exception +} +func (e requestException) Message() string { + return e.message +} + +type awsException struct { + requestException +} + +type sdkException struct { + requestException +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 000000000..0b5571acf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,260 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // DefaultPort is used when no port is specified + DefaultPort = "31000" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptCount: aws.Int(r.RetryCount + 1), + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetException(getMetricException(awserr)) + } + } + + m.TruncateFields() + rep.metricsCh.Push(m) +} + +func getMetricException(err awserr.Error) metricException { + msg := err.Error() + code := err.Code() + + switch code { + case "RequestError", + "SerializationError", + request.CanceledErrorCode: + return sdkException{ + requestException{exception: code, message: msg}, + } + default: + return awsException{ + requestException{exception: code, message: msg}, + } + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), + } + + if r.HTTPResponse != nil { + m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetFinalException(getMetricException(awserr)) + } + } + + m.TruncateFields() + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from +// being added. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring +// to be resumed. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// Example: +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + handlers.Complete.PushFrontNamed(request.NamedHandler{ + Name: APICallMetricHandlerName, + Fn: rep.sendAPICallMetric, + }) + + handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ + Name: APICallAttemptMetricHandlerName, + Fn: rep.sendAPICallAttemptMetric, + }) +} + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } + + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 000000000..23bb639e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,207 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "net" + "net/http" + "net/url" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithEndpointResolver(endpoints.DefaultResolver()) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: CredProviders(cfg, handlers), + }) +} + +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + +const ( + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } + + if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) + return httpCredProvider(cfg, handlers, u) + } + + return ec2RoleProvider(cfg, handlers) +} + +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + resolver := cfg.EndpointResolver + if resolver == nil { + resolver = endpoints.DefaultResolver() + } + + e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 000000000..ca0ee1dcc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 000000000..4fcb61618 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 000000000..d57a1af59 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,169 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// GetMetadata uses the path provided to request information from the EC2 +// instance metdata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + err := req.Send() + + return output.Content, err +} + +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: "/user-data", + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.Handlers.UnmarshalError.PushBack(func(r *request.Request) { + if r.HTTPResponse.StatusCode == http.StatusNotFound { + r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) + } + }) + err := req.Send() + + return output.Content, err +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + err := req.Send() + + return output.Content, err +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicData("instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("SerializationError", + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + resp, err := c.GetMetadata("iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New("SerializationError", + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + if len(resp) == 0 { + return "", awserr.New("EC2MetadataError", "invalid Region response", nil) + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshaling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 000000000..f4438eae9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,152 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environment variable is set to true, (case insensitive). +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" +const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Disable the EC2 Metadata service if the environment variable is set. + // This shortcirctes the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + } + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go new file mode 100644 index 000000000..87b9ff3ff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,188 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custAddEC2Metadata(p) + custAddS3DualStack(p) + custRmIotDataService(p) + custFixAppAutoscalingChina(p) + custFixAppAutoscalingUsGov(p) + } + + return ps, nil +} + +func custAddS3DualStack(p *partition) { + if p.ID != "aws" { + return + } + + custAddDualstack(p, "s3") + custAddDualstack(p, "s3-control") +} + +func custAddDualstack(p *partition, svcName string) { + s, ok := p.Services[svcName] + if !ok { + return + } + + s.Defaults.HasDualStack = boxedTrue + s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" + + p.Services[svcName] = s +} + +func custAddEC2Metadata(p *partition) { + p.Services["ec2metadata"] = service{ + IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + } +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + if e, a := s.Defaults.Hostname, expectHostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + + s.Defaults.Hostname = expectHostname + ".cn" + p.Services[serviceName] = s +} + +func custFixAppAutoscalingUsGov(p *partition) { + if p.ID != "aws-us-gov" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + if a := s.Defaults.CredentialScope.Service; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) + return + } + + if a := s.Defaults.Hostname; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) + return + } + + s.Defaults.CredentialScope.Service = "application-autoscaling" + s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" + + p.Services[serviceName] = s +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 000000000..4a2277917 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,4138 @@ +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. +) + +// AWS Standard partition's regions. +const ( + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). + EuNorth1RegionID = "eu-north-1" // EU (Stockholm). + EuWest1RegionID = "eu-west-1" // EU (Ireland). + EuWest2RegionID = "eu-west-2" // EU (London). + EuWest3RegionID = "eu-west-3" // EU (Paris). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). +) + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). +// +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). +// +// partitions := endpoints.DefaultPartitions +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "eu-central-1": region{ + Description: "EU (Frankfurt)", + }, + "eu-north-1": region{ + Description: "EU (Stockholm)", + }, + "eu-west-1": region{ + Description: "EU (Ireland)", + }, + "eu-west-2": region{ + Description: "EU (London)", + }, + "eu-west-3": region{ + Description: "EU (Paris)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "a4b": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "api.ecr.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "api.ecr.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "api.ecr.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "api.ecr.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "api.ecr.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "api.ecr.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "api.ecr.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "api.ecr.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "api.ecr.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "api.ecr.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "api.ecr.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "api.ecr.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "api.ecr.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "api.ecr.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "api.ecr.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "api.ecr.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.mediatailor": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.pricing": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appsync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "autoscaling-plans", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "chime": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudsearch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-sync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "devicefarm": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "discovery": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "docdb": service{ + + Endpoints: endpoints{ + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elastictranscoder": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "entitlement.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fsx": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mgh": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mq": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "robomaker": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "s3": service{ + PartitionEndpoint: "us-east-1", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-east-2-fips": endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-west-2-fips": endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-3": endpoint{ + Protocols: []string{"https"}, + }, + "sa-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-2": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "shield": service{ + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + Defaults: endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{ + Hostname: "sts.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "support": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transfer": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "s3-control.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-east-1": region{ + Description: "AWS GovCloud (US-East)", + }, + "us-gov-west-1": region{ + Description: "AWS GovCloud (US)", + }, + }, + Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "s3-fips-us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "s3-control.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3-control.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go new file mode 100644 index 000000000..000dd79ee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go @@ -0,0 +1,141 @@ +package endpoints + +// Service identifiers +// +// Deprecated: Use client package's EndpointID value instead of these +// ServiceIDs. These IDs are not maintained, and are out of date. +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + S3ControlServiceID = "s3-control" // S3Control. + SagemakerServiceID = "api.sagemaker" // Sagemaker. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TransferServiceID = "transfer" // Transfer. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go new file mode 100644 index 000000000..84316b92c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,66 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.ID()) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.ID()) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 000000000..f82babf6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,449 @@ +package endpoints + +import ( + "fmt" + "regexp" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + UseDualStack bool + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unknown and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !ok { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id string + p *partition +} + +// ID returns the identifier of the partition. +func (p Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the endpoint returned my look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expansions. +// +// Errors that can be returned. +// * UnknownServiceError +// * UnknownEndpointError +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p Partition) Regions() map[string]Region { + rs := map[string]Region{} + for id, r := range p.p.Regions { + rs[id] = Region{ + id: id, + desc: r.Description, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p Partition) Services() map[string]Service { + ss := map[string]Service{} + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r Region) ID() string { return r.id } + +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[r.id]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + for id := range s.p.Services[s.id].Endpoints { + if r, ok := s.p.Regions[id]; ok { + rs[id] = Region{ + id: id, + desc: r.Description, + p: s.p, + } + } + } + + return rs +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { + es := map[string]Endpoint{} + for id := range s.p.Services[s.id].Endpoints { + es[id] = Endpoint{ + id: id, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 000000000..ff6f76db6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,307 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpoint `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[region] + + if hasEndpoint && hasService { + return true + } + + if strictMatch { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + s, hasService := p.Services[service] + if !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + e, hasEndpoint := s.endpointForRegion(region) + if !hasEndpoint && opt.StrictMatching { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) + } + + defs := []endpoint{p.Defaults, s.Defaults} + return e.resolve(service, region, p.DNSSuffix, defs, opt), nil +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es endpoints) []string { + list := make([]string, 0, len(es)) + for k := range es { + list = append(list, k) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpoint `json:"defaults"` + Endpoints endpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string) (endpoint, bool) { + if s.IsRegionalized == boxedFalse { + return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint + } + + if e, ok := s.Endpoints[region]; ok { + return e, true + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type endpoints map[string]endpoint + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + // Custom fields not modeled + HasDualStack boxedBool `json:"-"` + DualStackHostname string `json:"-"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + hostname := e.Hostname + + // Offset the hostname for dualstack if enabled + if opts.UseDualStack && e.HasDualStack == boxedTrue { + hostname = e.DualStackHostname + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + return ResolvedEndpoint{ + URL: u, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + } +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if other.HasDualStack != boxedBoolUnset { + e.HasDualStack = other.HasDualStack + } + if len(other.DualStackHostname) > 0 { + e.DualStackHostname = other.DualStackHostname + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 000000000..0fdfcc56e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,351 @@ +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions + + // Disables code generation of the service endpoint prefix IDs defined in + // the model. + DisableGenerateServiceIDs bool +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + v := struct { + Resolver + CodeGenOptions + }{ + Resolver: resolver, + CodeGenOptions: opts, + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" $.Resolver }} + + {{ range $_, $partition := $.Resolver }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ if not $.DisableGenerateServiceIDs -}} + {{ template "service consts" $.Resolver }} + {{- end }} + + {{ template "endpoint resolvers" $.Resolver }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. + // + // partitions := endpoints.DefaultPartitions + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults }}, + {{- end }} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults -}}, + {{- end }} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +endpoints{ + {{ range $id, $endpoint := . -}} + "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} + {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} + +} +{{- end }} +` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 000000000..fa06f7a8f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,13 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 000000000..91a6f277a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 000000000..6ed15b2ec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,118 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nil, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 000000000..271da432c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,19 @@ +// +build !appengine,!plan9 + +package request + +import ( + "net" + "os" + "syscall" +) + +func isErrConnectionReset(err error) bool { + if opErr, ok := err.(*net.OpError); ok { + if sysErr, ok := opErr.Err.(*os.SyscallError); ok { + return sysErr.Err == syscall.ECONNRESET + } + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go new file mode 100644 index 000000000..daf9eca43 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go @@ -0,0 +1,11 @@ +// +build appengine plan9 + +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + return strings.Contains(err.Error(), "connection reset") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 000000000..8ef8548a9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,277 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalStream HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList + CompleteAttempt HandlerList + Complete HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + CompleteAttempt: h.CompleteAttempt.copy(), + Complete: h.Complete.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalStream.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() + h.CompleteAttempt.Clear() + h.Complete.Clear() +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = l.list[0:0] +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.PushBackNamed(NamedHandler{"__anonymous", f}) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } + l.list = append(l.list, n) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 000000000..79f79602b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 000000000..b0c2ef4fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,60 @@ +package request + +import ( + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { + reader := &offsetReader{} + buf.Seek(offset, sdkio.SeekStart) + + reader.buf = buf + return reader +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { + o.Close() + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 000000000..8f2eb3e43 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,673 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is received + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + AttemptTime time.Time + Time time.Time + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + + context aws.Context + + built bool + + // Need to persist an intermediate body between the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and wrap the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator + + BeforePresignFn func(r *Request) error +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + SanitizeHostForHeader(httpReq) + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. + r.ResetBody() +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expiration +// time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. + r.NotHoist = false + + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. The expire parameter is only used for +// presigned Amazon S3 API requests. All other AWS services will use a fixed +// expiration time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + + if r.Operation.BeforePresignFn != nil { + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err + } + } + + if err := r.Sign(); err != nil { + return "", nil, err + } + + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Any additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request, returning error if errors are encountered. +// +// Sign will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +func (r *Request) getNextRequestBody() (io.ReadCloser, error) { + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody = newOffsetReader(r.Body, r.BodyStart) + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := aws.SeekerLen(r.Body) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) + } + + var body io.ReadCloser + if l == 0 { + body = NoBody + } else if l > 0 { + body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + body = NoBody + default: + body = r.safeBody + } + } + + return body, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request, returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + defer func() { + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + + if err := r.Error; err != nil { + return err + } + + for { + r.Error = nil + r.AttemptTime = time.Now() + + if err := r.Sign(); err != nil { + debugLogReqError(r, "Sign Request", false, err) + return err + } + + if err := r.sendRequest(); err == nil { + return nil + } else if !shouldRetryCancel(r.Error) { + return err + } else { + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } + + r.prepareRetry() + continue + } + } +} + +func (r *Request) prepareRetry() { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } +} + +func (r *Request) sendRequest() (sendErr error) { + defer r.Handlers.CompleteAttempt.Run(r) + + r.Retryable = nil + r.Handlers.Send.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", r.WillRetry(), r.Error) + return r.Error + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + debugLogReqError(r, "Validate Response", r.WillRetry(), r.Error) + return r.Error + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", r.WillRetry(), r.Error) + return r.Error + } + + return nil +} + +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryCancel(err error) bool { + switch err := err.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false + } + return shouldRetryCancel(err.OrigErr()) + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryCancel(err.Err) + case temporary: + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retriable. See + // TestRequest4xxUnretryable for an example. + return true + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true + } +} + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go new file mode 100644 index 000000000..e36e468b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -0,0 +1,39 @@ +// +build !go1.8 + +package request + +import "io" + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +// +// Copy of Go 1.8 NoBody type from net/http/http.go +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// NoBody is an empty reader that will trigger the Go HTTP client to not include +// and body in the HTTP request. +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go new file mode 100644 index 000000000..7c6a8000f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,33 @@ +// +build go1.8 + +package request + +import ( + "net/http" +) + +// NoBody is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 000000000..a7365cd1e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,14 @@ +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 000000000..307fa0705 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 000000000..a633ed5ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,264 @@ +package request + +import ( + "reflect" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// cont := true +// for p.Next() && cont { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// } +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool + + started bool + prevTokens []interface{} + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.prevTokens = p.nextTokens + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { + tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } + } + + tokenAdded = true + tokens = append(tokens, v) + } + if !tokenAdded { + return nil + } + + return tokens +} + +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 000000000..d0aa54c6d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,163 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the client.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "RequestThrottledException": {}, + "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 + "TransactionInProgressException": {}, +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: {}, + ErrCodeRead: {}, +} + +type temporaryError interface { + Temporary() bool +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporaryError); ok { + return t.Temporary() || isErrConnectionReset(err) + } + + return isErrConnectionReset(err) +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) + } + } + return false +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeThrottle(aerr.Code()) + } + } + return false +} + +// IsErrorExpiredCreds returns whether the error code is a credential expiry error. +// Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeExpiredCreds(aerr.Code()) + } + } + return false +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + return IsErrorThrottle(r.Error) +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 000000000..09a44eb98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 000000000..8630683f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,286 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" + // ParamMaxLenErrCode is the error code for value being too long. + ParamMaxLenErrCode = "ParamMaxLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinLenErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} + +// An ErrParamMaxLen represents a maximum length parameter error. +type ErrParamMaxLen struct { + errInvalidParam + max int +} + +// NewErrParamMaxLen creates a new maximum length parameter error. +func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { + return &ErrParamMaxLen{ + errInvalidParam: errInvalidParam{ + code: ParamMaxLenErrCode, + field: field, + msg: fmt.Sprintf("maximum size of %v, %v", max, value), + }, + max: max, + } +} + +// MaxLen returns the field's required minimum length. +func (e *ErrParamMaxLen) MaxLen() int { + return e.max +} + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 000000000..4601f883c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,295 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go new file mode 100644 index 000000000..ea9ebb6f6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go @@ -0,0 +1,26 @@ +// +build go1.7 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go new file mode 100644 index 000000000..fec39dfc1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go @@ -0,0 +1,22 @@ +// +build !go1.6,go1.5 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go new file mode 100644 index 000000000..1c5a5391e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go @@ -0,0 +1,23 @@ +// +build !go1.7,go1.6 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 000000000..38a7b05a6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,273 @@ +/* +Package session provides configuration for the SDK's service clients. + +Sessions can be shared across all service clients that share the same base +configuration. The Session is built from the SDK's default configuration and +request handlers. + +Sessions should be cached when possible, because creating a new Session will +load all configuration values from the environment, and config files each time +the Session is created. Sharing the Session value across all of your service +clients will ensure the configuration is loaded the fewest number of times possible. + +Concurrency + +Sessions are safe to use concurrently as long as the Session is not being +modified. The SDK will not modify the Session once the Session has been created. +Creating service clients concurrently from a shared Session is safe. + +Sessions from Shared Config + +Sessions can be created using the method above that will only load the +additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. +Alternatively you can explicitly create a Session with shared config enabled. +To do this you can use NewSessionWithOptions to configure how the Session will +be created. Using the NewSessionWithOptions with SharedConfigState set to +SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG +environment variable was set. + +Creating Sessions + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. See the section Sessions from Shared Config for +more information. + +Create a Session with the default config and request handlers. With credentials +region, and profile loaded from the environment and shared config automatically. +Requires the AWS_PROFILE to be set, or "default" is used. + + // Create Session + sess := session.Must(session.NewSession()) + + // Create a Session with a custom region + sess := session.Must(session.NewSession(&aws.Config{ + Region: aws.String("us-east-1"), + })) + + // Create a S3 client instance from a session + sess := session.Must(session.NewSession()) + + svc := s3.New(sess) + +Create Session With Option Overrides + +In addition to NewSession, Sessions can be created using NewSessionWithOptions. +This func allows you to control and override how the Session will be created +through code instead of being driven by environment variables only. + +Use NewSessionWithOptions when you want to provide the config profile, or +override the shared config state (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess := session.Must(session.NewSessionWithOptions(session.Options{ + // Options + })) + + // Specify profile to load for the session's config + sess := session.Must(session.NewSessionWithOptions(session.Options{ + Profile: "profile_name", + })) + + // Specify profile for config and region for requests + sess := session.Must(session.NewSessionWithOptions(session.Options{ + Config: aws.Config{Region: aws.String("us-east-1")}, + Profile: "profile_name", + })) + + // Force enable Shared Config support + sess := session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) + +Adding Handlers + +You can add handlers to a session for processing HTTP requests. All service +clients that use the session inherit the handlers. For example, the following +handler logs every request and its payload made by a service client: + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess := session.Must(session.NewSession()) + + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Printf("Request: %s/%s, Payload: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Deprecated "New" function + +The New session function has been deprecated because it does not provide good +way to return errors that occur when loading the configuration files and values. +Because of this, NewSession was created so errors can be retrieved when +creating a session fails. + +Shared Config Fields + +By default the SDK will only load the shared credentials file's (~/.aws/credentials) +credentials values, and all other config is provided by the environment variables, +SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). + +Credentials are the values the SDK should use for authenticating requests with +AWS Services. They are from a configuration file will need to include both +aws_access_key_id and aws_secret_access_key must be provided together in the +same file to be considered valid. The values will be ignored if not a complete +group. aws_session_token is an optional field that can be provided if both of +the other two fields are also provided. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + +Assume Role values allow you to configure the SDK to assume an IAM role using +a set of credentials provided in a config file via the source_profile field. +Both "role_arn" and "source_profile" are required. The SDK supports assuming +a role with MFA token if the session option AssumeRoleTokenProvider +is set. + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = + role_session_name = session_name + +Region is the region the SDK should use for looking up AWS service endpoints +and signing requests. + + region = us-east-1 + +Assume Role with MFA token + +To create a session with support for assuming an IAM role with MFA set the +session option AssumeRoleTokenProvider to a function that will prompt for the +MFA token code when the SDK assumes the role and refreshes the role's credentials. +This allows you to configure the SDK via the shared config to assumea role +with MFA tokens. + +In order for the SDK to assume a role with MFA the SharedConfigState +session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG +environment variable set. + +The shared configuration instructs the SDK to assume an IAM role with MFA +when the mfa_serial configuration field is set in the shared config +(~/.aws/config) or shared credentials (~/.aws/credentials) file. + +If mfa_serial is set in the configuration, the SDK will assume the role, and +the AssumeRoleTokenProvider session option is not set an an error will +be returned when creating the session. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess) + +To setup assume role outside of a session see the stscreds.AssumeRoleProvider +documentation. + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. +*/ +package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 000000000..e3959b959 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,236 @@ +package session + +import ( + "os" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" +) + +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + csmEnabled string + CSMEnabled bool + CSMPort string + CSMClientID string + + enableEndpointDiscovery string + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery *bool +} + +var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + enableEndpointDiscoveryEnvKey = []string{ + "AWS_ENABLE_ENDPOINT_DISCOVERY", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() envConfig { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() envConfig { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) envConfig { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + cfg.CSMEnabled = len(cfg.csmEnabled) > 0 + + // Require logical grouping of credentials + if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { + cfg.Creds = credentials.Value{} + } else { + cfg.Creds.ProviderName = EnvProviderName + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + // endpoint discovery is in reference to it being enabled. + setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) + if len(cfg.enableEndpointDiscovery) > 0 { + cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") + } + + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") + + return cfg +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + *dst = v + break + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 000000000..be4b5f077 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,719 @@ +package session + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/csm" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occurred while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg := loadEnvConfig() + + if envCfg.EnableSharedConfig { + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occurring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) + } + + return s + } + + s := deprecatedNewSession(cfgs...) + if envCfg.CSMEnabled { + enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + } + + return s +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created. Such as specifying the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + opts := Options{} + opts.Config.MergeIn(cfgs...) + + return NewSessionWithOptions(opts) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevant. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // Enabling this option will attempt to merge the Transport into the SDK's HTTP + // client. If the client's Transport is not a http.Transport an error will be + // returned. If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // The Session option CustomCABundle is also available when creating sessions + // to also enable this feature. CustomCABundle session option field has priority + // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + CustomCABundle io.Reader +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) +// +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) +// +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + if opts.SharedConfigState == SharedConfigEnable { + envCfg = loadSharedEnvConfig() + } else { + envCfg = loadEnvConfig() + } + + if len(opts.Profile) > 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + // Only use AWS_CA_BUNDLE if session option is not provided. + if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { + f, err := os.Open(envCfg.CustomCABundle) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to open custom CA bundle PEM file", err) + } + defer f.Close() + opts.CustomCABundle = f + } + + return newSession(opts, envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +func deprecatedNewSession(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + if cfg.EndpointResolver == nil { + // An endpoint resolver is required for a session to be able to provide + // endpoints for service client configurations. + cfg.EndpointResolver = endpoints.DefaultResolver() + } + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + return s +} + +func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) { + logger.Log("Enabling CSM") + if len(port) == 0 { + port = csm.DefaultPort + } + + r, err := csm.Start(clientID, "127.0.0.1:"+port) + if err != nil { + return + } + r.InjectHandlers(handlers) +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + + // Ordered config files will be loaded in with later files overwriting + // previous config file values. + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) + if err != nil { + return nil, err + } + + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + if envCfg.CSMEnabled { + enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + } + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { + return nil, err + } + } + + return s, nil +} + +func loadCustomCABundle(s *Session, bundle io.Reader) error { + var t *http.Transport + switch v := s.Config.HTTPClient.Transport.(type) { + case *http.Transport: + t = v + default: + if s.Config.HTTPClient.Transport != nil { + return awserr.New("LoadCustomCABundleError", + "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) + } + } + if t == nil { + // Nil transport implies `http.DefaultTransport` should be used. Since + // the SDK cannot modify, nor copy the `DefaultTransport` specifying + // the values the next closest behavior. + t = getCABundleTransport() + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + s.Config.HTTPClient.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New("LoadCustomCABundleError", + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { + // Merge in user provided configuration + cfg.MergeIn(userCfg) + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + if cfg.EnableEndpointDiscovery == nil { + if envCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) + } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) + } + } + + // Configure credentials if not already set + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + + // inspect the profile to see if a credential source has been specified. + if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 { + + // if both credential_source and source_profile have been set, return an error + // as this is undefined behavior. + if len(sharedCfg.AssumeRole.SourceProfile) > 0 { + return ErrSharedConfigSourceCollision + } + + // valid credential source values + const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" + ) + + switch sharedCfg.AssumeRole.CredentialSource { + case credSourceEc2Metadata: + cfgCp := *cfg + p := defaults.RemoteCredProvider(cfgCp, handlers) + cfgCp.Credentials = credentials.NewCredentials(p) + + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return AssumeRoleTokenProviderNotSetError{} + } + + cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) + case credSourceEnvironment: + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + envCfg.Creds, + ) + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return ErrSharedConfigECSContainerEnvVarEmpty + } + + cfgCp := *cfg + p := defaults.RemoteCredProvider(cfgCp, handlers) + creds := credentials.NewCredentials(p) + + cfg.Credentials = creds + default: + return ErrSharedConfigInvalidCredSource + } + + return nil + } + + if len(envCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + envCfg.Creds, + ) + } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { + cfgCp := *cfg + cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.AssumeRoleSource.Creds, + ) + + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return AssumeRoleTokenProviderNotSetError{} + } + + cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) + } else if len(sharedCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + } else if len(sharedCfg.CredentialProcess) > 0 { + cfg.Credentials = processcreds.NewCredentials( + sharedCfg.CredentialProcess, + ) + } else { + // Fallback to default credentials provider, include mock errors + // for the credential chain so user can identify why credentials + // failed to be retrieved. + cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, + &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + } + + return nil +} + +func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials { + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.AssumeRole.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + + // Assume role with external ID + if len(sharedCfg.AssumeRole.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.AssumeRole.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ) +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the +// MFAToken option is not set when shared config is configured load assume a +// role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +var emptyCreds = credentials.Value{} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + // Backwards compatibility, the error will be eaten if user calls ClientConfig + // directly. All SDK services will use ClientconfigWithError. + cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) + + return cfg +} + +func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + var err error + + region := aws.StringValue(s.Config.Region) + + if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { + resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } else { + resolved, err = s.Config.EndpointResolver.EndpointFor( + serviceName, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) + opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + }, err +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + + region := aws.StringValue(s.Config.Region) + + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 000000000..7cb44021b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,329 @@ +package session + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + + "github.com/aws/aws-sdk-go/internal/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + + // Additional Config fields + regionKey = `region` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + // External Credential Process + credentialProcessKey = `credential_process` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` +) + +type assumeRoleConfig struct { + RoleARN string + SourceProfile string + CredentialSource string + ExternalID string + MFASerial string + RoleSessionName string +} + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + // Credentials values from the config file. Both aws_access_key_id + // and aws_secret_access_key must be provided together in the same file + // to be considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of the + // other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + AssumeRole assumeRoleConfig + AssumeRoleSource *sharedConfig + + // An external process to request credentials + CredentialProcess string + + // Region is the region the SDK should use for looking up AWS service endpoints + // and signing requests. + // + // region + Region string + + // EnableEndpointDiscovery can be enabled in the shared config by setting + // endpoint_discovery_enabled to true + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery *bool +} + +type sharedConfigFile struct { + Filename string + IniData ini.Sections +} + +// loadSharedConfig retrieves the configuration from the list of files +// using the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + if err = cfg.setFromIniFiles(profile, files); err != nil { + return sharedConfig{}, err + } + + if len(cfg.AssumeRole.SourceProfile) > 0 { + if err := cfg.setAssumeRoleSource(profile, files); err != nil { + return sharedConfig{}, err + } + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { + // Skip files which can't be opened and read for whatever reason + continue + } else if err != nil { + return nil, SharedConfigLoadError{Filename: filename, Err: err} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: sections, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { + var assumeRoleSrc sharedConfig + + if len(cfg.AssumeRole.CredentialSource) > 0 { + // setAssumeRoleSource is only called when source_profile is found. + // If both source_profile and credential_source are set, then + // ErrSharedConfigSourceCollision will be returned + return ErrSharedConfigSourceCollision + } + + // Multiple level assume role chains are not support + if cfg.AssumeRole.SourceProfile == origProfile { + assumeRoleSrc = *cfg + assumeRoleSrc.AssumeRole = assumeRoleConfig{} + } else { + err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) + if err != nil { + return err + } + } + + if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { + return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + } + + cfg.AssumeRoleSource = &assumeRoleSrc + + return nil +} + +func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { + // Trim files from the list that don't exist. + for _, f := range files { + if err := cfg.setFromIniFile(profile, f); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore proviles missings + continue + } + return err + } + } + + return nil +} + +// setFromFile loads the configuration from the file using +// the profile provided. A sharedConfig pointer type value is used so that +// multiple config file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For example +// if a config file only includes aws_access_key_id but no aws_secret_access_key +// the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { + section, ok := file.IniData.GetSection(profile) + if !ok { + // Fallback to to alternate profile name: profile + section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if !ok { + return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} + } + } + + // Shared Credentials + akid := section.String(accessKeyIDKey) + secret := section.String(secretAccessKey) + if len(akid) > 0 && len(secret) > 0 { + cfg.Creds = credentials.Value{ + AccessKeyID: akid, + SecretAccessKey: secret, + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + } + + // Assume Role + roleArn := section.String(roleArnKey) + srcProfile := section.String(sourceProfileKey) + credentialSource := section.String(credentialSourceKey) + hasSource := len(srcProfile) > 0 || len(credentialSource) > 0 + if len(roleArn) > 0 && hasSource { + cfg.AssumeRole = assumeRoleConfig{ + RoleARN: roleArn, + SourceProfile: srcProfile, + CredentialSource: credentialSource, + ExternalID: section.String(externalIDKey), + MFASerial: section.String(mfaSerialKey), + RoleSessionName: section.String(roleSessionNameKey), + } + } + + // `credential_process` + if credProc := section.String(credentialProcessKey); len(credProc) > 0 { + cfg.CredentialProcess = credProc + } + + // Region + if v := section.String(regionKey); len(v) > 0 { + cfg.Region = v + } + + // Endpoint discovery + if section.Has(enableEndpointDiscoveryKey) { + v := section.Bool(enableEndpointDiscoveryKey) + cfg.EnableEndpointDiscovery = &v + } + + return nil +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", + e.RoleARN) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 000000000..244c86da0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 000000000..6aa2ed241 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 000000000..bd082e9d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,24 @@ +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 000000000..523db79f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,796 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Tagging": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disables the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + formattedTime string + formattedShortTime string + unsignedPayload bool + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, false, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, true, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: isPresign, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.Get() + if err != nil { + return http.Header{}, err + } + + ctx.sanitizeHostForHeader() + ctx.assignAmzQueryValues() + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler should only be used with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now) +} + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now, opts...) + }, + } +} + +// SignSDKRequestWithCurrentTime will sign the SDK's request using the time +// function passed in. Behaves the same as SignSDKRequest with the exception +// the request is signed with the value returned by the current time function. +func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true + }) + + for _, opt := range opts { + opt(v4) + } + + curTime := curTimeFn() + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, req.ExpireTime > 0, curTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTime +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + if err := ctx.buildBodyDigest(); err != nil { + return err + } + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + "Signature=" + ctx.signature, + } + ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } + + return nil +} + +func (ctx *signingCtx) buildTime() { + ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) + ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) + + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = strings.Join([]string{ + ctx.formattedShortTime, + ctx.Region, + ctx.ServiceName, + "aws4_request", + }, "/") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + if ctx.Request.Host != "" { + headerValues[i] = "host:" + ctx.Request.Host + } else { + headerValues[i] = "host:" + ctx.Request.URL.Host + } + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + stripExcessSpaces(headerValues) + ctx.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + ctx.formattedTime, + ctx.credentialString, + hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + secret := ctx.credValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) + region := makeHmac(date, []byte(ctx.Region)) + service := makeHmac(region, []byte(ctx.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() error { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "glacier" + + s3Presign := ctx.isPresign && ctx.ServiceName == "s3" + + if ctx.unsignedPayload || s3Presign { + hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } + hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + } + + if includeSHA256Header { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash + + return nil +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, sdkio.SeekCurrent) + defer reader.Seek(start, sdkio.SeekStart) + + // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies + // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. + size, err := aws.SeekerLen(reader) + if err != nil { + io.Copy(hash, reader) + } else { + io.CopyN(hash, reader, size) + } + + return hash.Sum(nil) +} + +const doubleSpace = " " + +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + vals[i] = string(buf[:m]) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 000000000..8b6f23425 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,201 @@ +package aws + +import ( + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should +// only be used with an io.Reader that is also an io.Seeker. Doing so may +// cause request signature errors, or request body's not sent for GET, HEAD +// and DELETE HTTP methods. +// +// Deprecated: Should only be used with io.ReadSeeker. If using for +// S3 PutObject to stream content use s3manager.Uploader instead. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 000000000..6192b2455 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,12 @@ +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 000000000..0210d2720 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 000000000..cb36d3bcc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.19.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go new file mode 100644 index 000000000..e83a99886 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go new file mode 100644 index 000000000..0895d53cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go new file mode 100644 index 000000000..0b76999ba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go new file mode 100644 index 000000000..25ce0fe13 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go @@ -0,0 +1,29 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// Grammar: +// stmt -> value stmt' +// stmt' -> epsilon | op stmt +// value -> number | string | boolean | quoted_string +// +// section -> [ section' +// section' -> value section_close +// section_close -> ] +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go new file mode 100644 index 000000000..04345a54c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go new file mode 100644 index 000000000..91ba2a59d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go new file mode 100644 index 000000000..8d462f77e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go @@ -0,0 +1,17 @@ +// +build gofuzz + +package ini + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + b := bytes.NewReader(data) + + if _, err := Parse(b); err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go new file mode 100644 index 000000000..3b0ca7afe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go @@ -0,0 +1,51 @@ +package ini + +import ( + "io" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (Sections, error) { + f, err := os.Open(path) + if err != nil { + return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) + } + defer f.Close() + + return Parse(f) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go new file mode 100644 index 000000000..582c024ad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go @@ -0,0 +1,165 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // ErrCodeUnableToReadFile is used when a file is failed to be + // opened or read from. + ErrCodeUnableToReadFile = "FailedRead" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go new file mode 100644 index 000000000..f99703372 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -0,0 +1,347 @@ +package ini + +import ( + "fmt" + "io" +) + +// State enums for the parse table +const ( + InvalidState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]int{ + ASTKindStart: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: map[TokenType]int{ + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: ValueState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: map[TokenType]int{ + TokenLit: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + }, + ASTKindStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: map[TokenType]int{ + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: map[TokenType]int{ + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + k = trimSpaces(k) + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + // + // This grammar occurs when the RHS is a number, word, or quoted string. + // equal_expr -> lit op equal_expr' + // equal_expr' -> number | string | quoted_string + // quoted_string -> " quoted_string' + // quoted_string' -> string quoted_string_end + // quoted_string_end -> " + // + // otherwise + // expr_stmt -> equal_expr (expr_stmt')* + // expr_stmt' -> ws S | op S | MarkComplete + // S -> equal_expr' expr_stmt' + switch k.Kind { + case ASTKindEqualExpr: + // assiging a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExpr: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stack.Push(k) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + k.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + k = trimSpaces(k) + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok)) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container)) + } + + // returns a sublist which excludes the start symbol + return stack.List(), nil +} + +// trimSpaces will trim spaces on the left and right hand side of +// the literal. +func trimSpaces(k AST) AST { + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i >= 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + } + + return k +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go new file mode 100644 index 000000000..24df543d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -0,0 +1,324 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType + IntegerType + StringType + QuotedStringType + BoolType +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 + decimal float64 + boolean bool + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = runeCompare(v.raw, runesTrue) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// Append will append values and change the type to a string +// type. +func (v *Value) Append(tok Token) { + r := tok.Raw() + if v.Type != QuotedStringType { + v.Type = StringType + r = tok.raw[1 : len(tok.raw)-1] + } + if tok.Type() != TokenLit { + v.raw = append(v.raw, tok.Raw()...) + } else { + v.raw = append(v.raw, r...) + } +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else if isNumberValue(b) { + var base int + base, n, err = getNumericalValue(b) + if err != nil { + return token, 0, err + } + + value := b[:n] + vType := IntegerType + if contains(value, '.') || hasExponent(value) { + vType = DecimalType + } + token = newToken(TokenLit, value, vType) + token.base = base + } else if isBoolValue(b) { + n, err = getBoolValue(b) + + token = newToken(TokenLit, b[:n], BoolType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() int64 { + return v.integer +} + +// FloatValue returns a float value +func (v Value) FloatValue() float64 { + return v.decimal +} + +// BoolValue returns a bool value +func (v Value) BoolValue() bool { + return v.boolean +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go new file mode 100644 index 000000000..e52ac399f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go new file mode 100644 index 000000000..a45c0bc56 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go new file mode 100644 index 000000000..8a84c7cbe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go new file mode 100644 index 000000000..457287019 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go @@ -0,0 +1,43 @@ +package ini + +import "fmt" + +const ( + // ErrCodeParseError is returned when a parsing error + // has occurred. + ErrCodeParseError = "INIParseError" +) + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +// Code will return the ErrCodeParseError +func (err *ParseError) Code() string { + return ErrCodeParseError +} + +// Message returns the error's message +func (err *ParseError) Message() string { + return err.msg +} + +// OrigError return nothing since there will never be any +// original error. +func (err *ParseError) OrigError() error { + return nil +} + +func (err *ParseError) Error() string { + return fmt.Sprintf("%s: %s", err.Code(), err.Message()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go new file mode 100644 index 000000000..7f01cf7c7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go new file mode 100644 index 000000000..f82095ba2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go new file mode 100644 index 000000000..6bb696447 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + + s.Continue() + return false + } + s.prevTok = tok + + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true + s.prevTok = emptyToken +} + +func (s *skipper) Continue() { + s.shouldSkip = false + s.prevTok = emptyToken +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go new file mode 100644 index 000000000..18f3fe893 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini definition. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go new file mode 100644 index 000000000..305999d29 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go new file mode 100644 index 000000000..94841c324 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -0,0 +1,166 @@ +package ini + +import ( + "fmt" + "sort" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + scope string + Sections Sections +} + +// NewDefaultVisitor return a DefaultVisitor +func NewDefaultVisitor() *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + if rhs.Root.Type() != TokenLit { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + t.values[key] = v + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + v.Sections.container[name] = Section{} + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + Name string + values values +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) bool { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) int64 { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) float64 { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go new file mode 100644 index 000000000..99915f7f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go new file mode 100644 index 000000000..7ffb4ae06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 000000000..5aa9137e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,10 @@ +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 000000000..e5f005613 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 000000000..0c9802d87 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 000000000..38ea61afe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go new file mode 100644 index 000000000..7da8a49ce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -0,0 +1,12 @@ +package shareddefaults + +const ( + // ECSCredsProviderEnvVar is an environmental variable key used to + // determine which path needs to be hit. + ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// ECSContainerCredentialsURI is the endpoint to retrieve container +// credentials. This can be overridden to test to ensure the credential process +// is behaving correctly. +var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 000000000..ebcbc2b40 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,40 @@ +package shareddefaults + +import ( + "os" + "path/filepath" + "runtime" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go new file mode 100644 index 000000000..d7d42db0a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -0,0 +1,68 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateEndpointHostHandler is a request handler that will validate the +// request endpoint's hosts is a valid RFC 3986 host. +var ValidateEndpointHostHandler = request.NamedHandler{ + Name: "awssdk.protocol.ValidateEndpointHostHandler", + Fn: func(r *request.Request) { + err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) + if err != nil { + r.Error = err + } + }, +} + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(opName, host string) error { + paramErrs := request.ErrInvalidParams{Context: opName} + labels := strings.Split(host, ".") + + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + paramErrs.Add(request.NewErrParamFormat( + "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) + } + } + + if len(host) > 255 { + paramErrs.Add(request.NewErrParamMaxLen( + "endpoint host", 255, host, + )) + } + + if paramErrs.Len() > 0 { + return paramErrs + } + return nil +} + +// ValidHostLabel returns if the label is a valid RFC 3986 host label. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go new file mode 100644 index 000000000..915b0fcaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go @@ -0,0 +1,54 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// HostPrefixHandlerName is the handler name for the host prefix request +// handler. +const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" + +// NewHostPrefixHandler constructs a build handler +func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { + builder := HostPrefixBuilder{ + Prefix: prefix, + LabelsFn: labelsFn, + } + + return request.NamedHandler{ + Name: HostPrefixHandlerName, + Fn: builder.Build, + } +} + +// HostPrefixBuilder provides the request handler to expand and prepend +// the host prefix into the operation's request endpoint host. +type HostPrefixBuilder struct { + Prefix string + LabelsFn func() map[string]string +} + +// Build updates the passed in Request with the HostPrefix template expanded. +func (h HostPrefixBuilder) Build(r *request.Request) { + if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { + return + } + + var labels map[string]string + if h.LabelsFn != nil { + labels = h.LabelsFn() + } + + prefix := h.Prefix + for name, value := range labels { + prefix = strings.Replace(prefix, "{"+name+"}", value, -1) + } + + r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host + if len(r.HTTPRequest.Host) > 0 { + r.HTTPRequest.Host = prefix + r.HTTPRequest.Host + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 000000000..53831dff9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 000000000..776d11018 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 000000000..e21614a12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "GET"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 000000000..60e5b09d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialization of AWS query requests, and responses. +package query + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + return + } + + if !r.IsPresigned() { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 000000000..75866d012 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,246 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 000000000..3495c7307 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,39 @@ +package query + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", "failed decoding Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 000000000..46d354e82 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,74 @@ +package query + +import ( + "encoding/xml" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlServiceUnavailableResponse struct { + XMLName xml.Name `xml:"ServiceUnavailableException"` +} + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", "failed to read from query HTTP response body", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // First check for specific error + resp := xmlErrorResponse{} + decodeErr := xml.Unmarshal(bodyBytes, &resp) + if decodeErr == nil { + reqID := resp.RequestID + if reqID == "" { + reqID = r.RequestID + } + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) + return + } + + // Check for unhandled error + servUnavailResp := xmlServiceUnavailableResponse{} + unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) + if unavailErr == nil { + r.Error = awserr.NewRequestFailure( + awserr.New("ServiceUnavailableException", "service is unavailable", nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Failed to retrieve any error message from the response body + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", + "failed to decode query XML error response", decodeErr), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 000000000..b80f84fbb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,300 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, false) + buildBody(r, v) + } +} + +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { + query := r.HTTPRequest.URL.Query() + + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if kind := m.Kind(); kind == reflect.Ptr { + m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } + } + if !m.IsValid() { + continue + } + if field.Tag.Get("ignore") != "" { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) + case "querystring": + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + name = strings.TrimSpace(name) + str = strings.TrimSpace(str) + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key), tag) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + keyStr := strings.TrimSpace(key.String()) + str = strings.TrimSpace(str) + + header.Add(prefix+keyStr, str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") + + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) + + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" + } +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) + case aws.JSONValue: + if len(value) == 0 { + return "", errValueNotSet + } + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) + } + default: + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 000000000..4366de2e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 000000000..33fd53b12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,225 @@ +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", + "failed to read response body", err) + return + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + defer r.HTTPResponse.Body.Close() + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + isJSONValue := tag.Get("type") == "jsonvalue" + if isJSONValue { + if len(header) == 0 { + return nil + } + } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + m, err := protocol.DecodeJSONValue(header, escaping) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 000000000..b7ed6c6f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,72 @@ +package protocol + +import ( + "strconv" + "time" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC() + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822TimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601TimeFormat) + case UnixTimeFormatName: + return strconv.FormatInt(t.Unix(), 10) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: + return time.Parse(RFC822TimeFormat, value) + case ISO8601TimeFormatName: + return time.Parse(ISO8601TimeFormat, value) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), 0), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 000000000..da1a68111 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,21 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 000000000..cf981fe95 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,306 @@ +// Package xmlutil provides XML serialization of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, sorted) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + var payloadFields, nonPayloadFields int + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ + continue + } + payloadFields++ + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + } + + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 000000000..ff1ef6830 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,272 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err = parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + if val, ok := node.findElem(name); ok { + elems = []*XMLNode{{Text: val}} + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 000000000..515ce1521 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,148 @@ +package xmlutil + +import ( + "encoding/xml" + "fmt" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` + + namespaces map[string]string + parent *XMLNode +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if err != nil { + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + out.findNamespaces() + if e != nil { + return out, e + } + node.Name = typed.Name + node.findNamespaces() + tempOut := *out + // Save into a temp variable, simply because out gets squashed during + // loop iterations + node.parent = &tempOut + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + out = &XMLNode{} + } + } + return out, nil +} + +func (n *XMLNode) findNamespaces() { + ns := map[string]string{} + for _, a := range n.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + } + } + + n.namespaces = ns +} + +func (n *XMLNode) findElem(name string) (string, bool) { + for node := n; node != nil; node = node.parent { + for _, a := range node.Attr { + namespace := a.Name.Space + if v, ok := node.namespaces[namespace]; ok { + namespace = v + } + if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { + return a.Value, true + } + } + } + return "", false +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/api.go b/vendor/github.com/aws/aws-sdk-go/service/ses/api.go new file mode 100644 index 000000000..691f57e58 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/api.go @@ -0,0 +1,15146 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ses + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opCloneReceiptRuleSet = "CloneReceiptRuleSet" + +// CloneReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the CloneReceiptRuleSet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CloneReceiptRuleSet for more information on using the CloneReceiptRuleSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CloneReceiptRuleSetRequest method. +// req, resp := client.CloneReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CloneReceiptRuleSet +func (c *SES) CloneReceiptRuleSetRequest(input *CloneReceiptRuleSetInput) (req *request.Request, output *CloneReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opCloneReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CloneReceiptRuleSetInput{} + } + + output = &CloneReceiptRuleSetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CloneReceiptRuleSet API operation for Amazon Simple Email Service. +// +// Creates a receipt rule set by cloning an existing one. All receipt rules +// and configurations are copied to the new receipt rule set and are completely +// independent of the source rule set. +// +// For information about setting up rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation CloneReceiptRuleSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRuleSetDoesNotExistException "RuleSetDoesNotExist" +// Indicates that the provided receipt rule set does not exist. +// +// * ErrCodeAlreadyExistsException "AlreadyExists" +// Indicates that a resource could not be created because of a naming conflict. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// Indicates that a resource could not be created because of service limits. +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CloneReceiptRuleSet +func (c *SES) CloneReceiptRuleSet(input *CloneReceiptRuleSetInput) (*CloneReceiptRuleSetOutput, error) { + req, out := c.CloneReceiptRuleSetRequest(input) + return out, req.Send() +} + +// CloneReceiptRuleSetWithContext is the same as CloneReceiptRuleSet with the addition of +// the ability to pass a context and additional request options. +// +// See CloneReceiptRuleSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) CloneReceiptRuleSetWithContext(ctx aws.Context, input *CloneReceiptRuleSetInput, opts ...request.Option) (*CloneReceiptRuleSetOutput, error) { + req, out := c.CloneReceiptRuleSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateConfigurationSet = "CreateConfigurationSet" + +// CreateConfigurationSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateConfigurationSet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateConfigurationSet for more information on using the CreateConfigurationSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateConfigurationSetRequest method. +// req, resp := client.CreateConfigurationSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateConfigurationSet +func (c *SES) CreateConfigurationSetRequest(input *CreateConfigurationSetInput) (req *request.Request, output *CreateConfigurationSetOutput) { + op := &request.Operation{ + Name: opCreateConfigurationSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateConfigurationSetInput{} + } + + output = &CreateConfigurationSetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateConfigurationSet API operation for Amazon Simple Email Service. +// +// Creates a configuration set. +// +// Configuration sets enable you to publish email sending events. For information +// about using configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation CreateConfigurationSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConfigurationSetAlreadyExistsException "ConfigurationSetAlreadyExists" +// Indicates that the configuration set could not be created because of a naming +// conflict. +// +// * ErrCodeInvalidConfigurationSetException "InvalidConfigurationSet" +// Indicates that the configuration set is invalid. See the error message for +// details. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// Indicates that a resource could not be created because of service limits. +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateConfigurationSet +func (c *SES) CreateConfigurationSet(input *CreateConfigurationSetInput) (*CreateConfigurationSetOutput, error) { + req, out := c.CreateConfigurationSetRequest(input) + return out, req.Send() +} + +// CreateConfigurationSetWithContext is the same as CreateConfigurationSet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateConfigurationSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) CreateConfigurationSetWithContext(ctx aws.Context, input *CreateConfigurationSetInput, opts ...request.Option) (*CreateConfigurationSetOutput, error) { + req, out := c.CreateConfigurationSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateConfigurationSetEventDestination = "CreateConfigurationSetEventDestination" + +// CreateConfigurationSetEventDestinationRequest generates a "aws/request.Request" representing the +// client's request for the CreateConfigurationSetEventDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateConfigurationSetEventDestination for more information on using the CreateConfigurationSetEventDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateConfigurationSetEventDestinationRequest method. +// req, resp := client.CreateConfigurationSetEventDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateConfigurationSetEventDestination +func (c *SES) CreateConfigurationSetEventDestinationRequest(input *CreateConfigurationSetEventDestinationInput) (req *request.Request, output *CreateConfigurationSetEventDestinationOutput) { + op := &request.Operation{ + Name: opCreateConfigurationSetEventDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateConfigurationSetEventDestinationInput{} + } + + output = &CreateConfigurationSetEventDestinationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateConfigurationSetEventDestination API operation for Amazon Simple Email Service. +// +// Creates a configuration set event destination. +// +// When you create or update an event destination, you must provide one, and +// only one, destination. The destination can be CloudWatch, Amazon Kinesis +// Firehose, or Amazon Simple Notification Service (Amazon SNS). +// +// An event destination is the AWS service to which Amazon SES publishes the +// email sending events associated with a configuration set. For information +// about using configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation CreateConfigurationSetEventDestination for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// * ErrCodeEventDestinationAlreadyExistsException "EventDestinationAlreadyExists" +// Indicates that the event destination could not be created because of a naming +// conflict. +// +// * ErrCodeInvalidCloudWatchDestinationException "InvalidCloudWatchDestination" +// Indicates that the Amazon CloudWatch destination is invalid. See the error +// message for details. +// +// * ErrCodeInvalidFirehoseDestinationException "InvalidFirehoseDestination" +// Indicates that the Amazon Kinesis Firehose destination is invalid. See the +// error message for details. +// +// * ErrCodeInvalidSNSDestinationException "InvalidSNSDestination" +// Indicates that the Amazon Simple Notification Service (Amazon SNS) destination +// is invalid. See the error message for details. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// Indicates that a resource could not be created because of service limits. +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateConfigurationSetEventDestination +func (c *SES) CreateConfigurationSetEventDestination(input *CreateConfigurationSetEventDestinationInput) (*CreateConfigurationSetEventDestinationOutput, error) { + req, out := c.CreateConfigurationSetEventDestinationRequest(input) + return out, req.Send() +} + +// CreateConfigurationSetEventDestinationWithContext is the same as CreateConfigurationSetEventDestination with the addition of +// the ability to pass a context and additional request options. +// +// See CreateConfigurationSetEventDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) CreateConfigurationSetEventDestinationWithContext(ctx aws.Context, input *CreateConfigurationSetEventDestinationInput, opts ...request.Option) (*CreateConfigurationSetEventDestinationOutput, error) { + req, out := c.CreateConfigurationSetEventDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateConfigurationSetTrackingOptions = "CreateConfigurationSetTrackingOptions" + +// CreateConfigurationSetTrackingOptionsRequest generates a "aws/request.Request" representing the +// client's request for the CreateConfigurationSetTrackingOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateConfigurationSetTrackingOptions for more information on using the CreateConfigurationSetTrackingOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateConfigurationSetTrackingOptionsRequest method. +// req, resp := client.CreateConfigurationSetTrackingOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateConfigurationSetTrackingOptions +func (c *SES) CreateConfigurationSetTrackingOptionsRequest(input *CreateConfigurationSetTrackingOptionsInput) (req *request.Request, output *CreateConfigurationSetTrackingOptionsOutput) { + op := &request.Operation{ + Name: opCreateConfigurationSetTrackingOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateConfigurationSetTrackingOptionsInput{} + } + + output = &CreateConfigurationSetTrackingOptionsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateConfigurationSetTrackingOptions API operation for Amazon Simple Email Service. +// +// Creates an association between a configuration set and a custom domain for +// open and click event tracking. +// +// By default, images and links used for tracking open and click events are +// hosted on domains operated by Amazon SES. You can configure a subdomain of +// your own to handle these events. For information about using custom domains, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation CreateConfigurationSetTrackingOptions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// * ErrCodeTrackingOptionsAlreadyExistsException "TrackingOptionsAlreadyExistsException" +// Indicates that the configuration set you specified already contains a TrackingOptions +// object. +// +// * ErrCodeInvalidTrackingOptionsException "InvalidTrackingOptions" +// Indicates that the custom domain to be used for open and click tracking redirects +// is invalid. This error appears most often in the following situations: +// +// * When the tracking domain you specified is not verified in Amazon SES. +// +// * When the tracking domain you specified is not a valid domain or subdomain. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateConfigurationSetTrackingOptions +func (c *SES) CreateConfigurationSetTrackingOptions(input *CreateConfigurationSetTrackingOptionsInput) (*CreateConfigurationSetTrackingOptionsOutput, error) { + req, out := c.CreateConfigurationSetTrackingOptionsRequest(input) + return out, req.Send() +} + +// CreateConfigurationSetTrackingOptionsWithContext is the same as CreateConfigurationSetTrackingOptions with the addition of +// the ability to pass a context and additional request options. +// +// See CreateConfigurationSetTrackingOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) CreateConfigurationSetTrackingOptionsWithContext(ctx aws.Context, input *CreateConfigurationSetTrackingOptionsInput, opts ...request.Option) (*CreateConfigurationSetTrackingOptionsOutput, error) { + req, out := c.CreateConfigurationSetTrackingOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateCustomVerificationEmailTemplate = "CreateCustomVerificationEmailTemplate" + +// CreateCustomVerificationEmailTemplateRequest generates a "aws/request.Request" representing the +// client's request for the CreateCustomVerificationEmailTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateCustomVerificationEmailTemplate for more information on using the CreateCustomVerificationEmailTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateCustomVerificationEmailTemplateRequest method. +// req, resp := client.CreateCustomVerificationEmailTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateCustomVerificationEmailTemplate +func (c *SES) CreateCustomVerificationEmailTemplateRequest(input *CreateCustomVerificationEmailTemplateInput) (req *request.Request, output *CreateCustomVerificationEmailTemplateOutput) { + op := &request.Operation{ + Name: opCreateCustomVerificationEmailTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCustomVerificationEmailTemplateInput{} + } + + output = &CreateCustomVerificationEmailTemplateOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateCustomVerificationEmailTemplate API operation for Amazon Simple Email Service. +// +// Creates a new custom verification email template. +// +// For more information about custom verification email templates, see Using +// Custom Verification Email Templates (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) +// in the Amazon SES Developer Guide. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation CreateCustomVerificationEmailTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCustomVerificationEmailTemplateAlreadyExistsException "CustomVerificationEmailTemplateAlreadyExists" +// Indicates that a custom verification email template with the name you specified +// already exists. +// +// * ErrCodeFromEmailAddressNotVerifiedException "FromEmailAddressNotVerified" +// Indicates that the sender address specified for a custom verification email +// is not verified, and is therefore not eligible to send the custom verification +// email. +// +// * ErrCodeCustomVerificationEmailInvalidContentException "CustomVerificationEmailInvalidContent" +// Indicates that custom verification email template provided content is invalid. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// Indicates that a resource could not be created because of service limits. +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateCustomVerificationEmailTemplate +func (c *SES) CreateCustomVerificationEmailTemplate(input *CreateCustomVerificationEmailTemplateInput) (*CreateCustomVerificationEmailTemplateOutput, error) { + req, out := c.CreateCustomVerificationEmailTemplateRequest(input) + return out, req.Send() +} + +// CreateCustomVerificationEmailTemplateWithContext is the same as CreateCustomVerificationEmailTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See CreateCustomVerificationEmailTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) CreateCustomVerificationEmailTemplateWithContext(ctx aws.Context, input *CreateCustomVerificationEmailTemplateInput, opts ...request.Option) (*CreateCustomVerificationEmailTemplateOutput, error) { + req, out := c.CreateCustomVerificationEmailTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateReceiptFilter = "CreateReceiptFilter" + +// CreateReceiptFilterRequest generates a "aws/request.Request" representing the +// client's request for the CreateReceiptFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateReceiptFilter for more information on using the CreateReceiptFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateReceiptFilterRequest method. +// req, resp := client.CreateReceiptFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateReceiptFilter +func (c *SES) CreateReceiptFilterRequest(input *CreateReceiptFilterInput) (req *request.Request, output *CreateReceiptFilterOutput) { + op := &request.Operation{ + Name: opCreateReceiptFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReceiptFilterInput{} + } + + output = &CreateReceiptFilterOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateReceiptFilter API operation for Amazon Simple Email Service. +// +// Creates a new IP address filter. +// +// For information about setting up IP address filters, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation CreateReceiptFilter for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceeded" +// Indicates that a resource could not be created because of service limits. +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// +// * ErrCodeAlreadyExistsException "AlreadyExists" +// Indicates that a resource could not be created because of a naming conflict. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateReceiptFilter +func (c *SES) CreateReceiptFilter(input *CreateReceiptFilterInput) (*CreateReceiptFilterOutput, error) { + req, out := c.CreateReceiptFilterRequest(input) + return out, req.Send() +} + +// CreateReceiptFilterWithContext is the same as CreateReceiptFilter with the addition of +// the ability to pass a context and additional request options. +// +// See CreateReceiptFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) CreateReceiptFilterWithContext(ctx aws.Context, input *CreateReceiptFilterInput, opts ...request.Option) (*CreateReceiptFilterOutput, error) { + req, out := c.CreateReceiptFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateReceiptRule = "CreateReceiptRule" + +// CreateReceiptRuleRequest generates a "aws/request.Request" representing the +// client's request for the CreateReceiptRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateReceiptRule for more information on using the CreateReceiptRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateReceiptRuleRequest method. +// req, resp := client.CreateReceiptRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateReceiptRule +func (c *SES) CreateReceiptRuleRequest(input *CreateReceiptRuleInput) (req *request.Request, output *CreateReceiptRuleOutput) { + op := &request.Operation{ + Name: opCreateReceiptRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReceiptRuleInput{} + } + + output = &CreateReceiptRuleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateReceiptRule API operation for Amazon Simple Email Service. +// +// Creates a receipt rule. +// +// For information about setting up receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation CreateReceiptRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidSnsTopicException "InvalidSnsTopic" +// Indicates that the provided Amazon SNS topic is invalid, or that Amazon SES +// could not publish to the topic, possibly due to permissions issues. For information +// about giving permissions, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// * ErrCodeInvalidS3ConfigurationException "InvalidS3Configuration" +// Indicates that the provided Amazon S3 bucket or AWS KMS encryption key is +// invalid, or that Amazon SES could not publish to the bucket, possibly due +// to permissions issues. For information about giving permissions, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// * ErrCodeInvalidLambdaFunctionException "InvalidLambdaFunction" +// Indicates that the provided AWS Lambda function is invalid, or that Amazon +// SES could not execute the provided function, possibly due to permissions +// issues. For information about giving permissions, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// * ErrCodeAlreadyExistsException "AlreadyExists" +// Indicates that a resource could not be created because of a naming conflict. +// +// * ErrCodeRuleDoesNotExistException "RuleDoesNotExist" +// Indicates that the provided receipt rule does not exist. +// +// * ErrCodeRuleSetDoesNotExistException "RuleSetDoesNotExist" +// Indicates that the provided receipt rule set does not exist. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// Indicates that a resource could not be created because of service limits. +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateReceiptRule +func (c *SES) CreateReceiptRule(input *CreateReceiptRuleInput) (*CreateReceiptRuleOutput, error) { + req, out := c.CreateReceiptRuleRequest(input) + return out, req.Send() +} + +// CreateReceiptRuleWithContext is the same as CreateReceiptRule with the addition of +// the ability to pass a context and additional request options. +// +// See CreateReceiptRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) CreateReceiptRuleWithContext(ctx aws.Context, input *CreateReceiptRuleInput, opts ...request.Option) (*CreateReceiptRuleOutput, error) { + req, out := c.CreateReceiptRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateReceiptRuleSet = "CreateReceiptRuleSet" + +// CreateReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateReceiptRuleSet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateReceiptRuleSet for more information on using the CreateReceiptRuleSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateReceiptRuleSetRequest method. +// req, resp := client.CreateReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateReceiptRuleSet +func (c *SES) CreateReceiptRuleSetRequest(input *CreateReceiptRuleSetInput) (req *request.Request, output *CreateReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opCreateReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReceiptRuleSetInput{} + } + + output = &CreateReceiptRuleSetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateReceiptRuleSet API operation for Amazon Simple Email Service. +// +// Creates an empty receipt rule set. +// +// For information about setting up receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation CreateReceiptRuleSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAlreadyExistsException "AlreadyExists" +// Indicates that a resource could not be created because of a naming conflict. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// Indicates that a resource could not be created because of service limits. +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateReceiptRuleSet +func (c *SES) CreateReceiptRuleSet(input *CreateReceiptRuleSetInput) (*CreateReceiptRuleSetOutput, error) { + req, out := c.CreateReceiptRuleSetRequest(input) + return out, req.Send() +} + +// CreateReceiptRuleSetWithContext is the same as CreateReceiptRuleSet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateReceiptRuleSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) CreateReceiptRuleSetWithContext(ctx aws.Context, input *CreateReceiptRuleSetInput, opts ...request.Option) (*CreateReceiptRuleSetOutput, error) { + req, out := c.CreateReceiptRuleSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTemplate = "CreateTemplate" + +// CreateTemplateRequest generates a "aws/request.Request" representing the +// client's request for the CreateTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTemplate for more information on using the CreateTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTemplateRequest method. +// req, resp := client.CreateTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateTemplate +func (c *SES) CreateTemplateRequest(input *CreateTemplateInput) (req *request.Request, output *CreateTemplateOutput) { + op := &request.Operation{ + Name: opCreateTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTemplateInput{} + } + + output = &CreateTemplateOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateTemplate API operation for Amazon Simple Email Service. +// +// Creates an email template. Email templates enable you to send personalized +// email to one or more destinations in a single API operation. For more information, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation CreateTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAlreadyExistsException "AlreadyExists" +// Indicates that a resource could not be created because of a naming conflict. +// +// * ErrCodeInvalidTemplateException "InvalidTemplate" +// Indicates that the template that you specified could not be rendered. This +// issue may occur when a template refers to a partial that does not exist. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// Indicates that a resource could not be created because of service limits. +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateTemplate +func (c *SES) CreateTemplate(input *CreateTemplateInput) (*CreateTemplateOutput, error) { + req, out := c.CreateTemplateRequest(input) + return out, req.Send() +} + +// CreateTemplateWithContext is the same as CreateTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) CreateTemplateWithContext(ctx aws.Context, input *CreateTemplateInput, opts ...request.Option) (*CreateTemplateOutput, error) { + req, out := c.CreateTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteConfigurationSet = "DeleteConfigurationSet" + +// DeleteConfigurationSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteConfigurationSet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteConfigurationSet for more information on using the DeleteConfigurationSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteConfigurationSetRequest method. +// req, resp := client.DeleteConfigurationSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteConfigurationSet +func (c *SES) DeleteConfigurationSetRequest(input *DeleteConfigurationSetInput) (req *request.Request, output *DeleteConfigurationSetOutput) { + op := &request.Operation{ + Name: opDeleteConfigurationSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteConfigurationSetInput{} + } + + output = &DeleteConfigurationSetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteConfigurationSet API operation for Amazon Simple Email Service. +// +// Deletes a configuration set. Configuration sets enable you to publish email +// sending events. For information about using configuration sets, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation DeleteConfigurationSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteConfigurationSet +func (c *SES) DeleteConfigurationSet(input *DeleteConfigurationSetInput) (*DeleteConfigurationSetOutput, error) { + req, out := c.DeleteConfigurationSetRequest(input) + return out, req.Send() +} + +// DeleteConfigurationSetWithContext is the same as DeleteConfigurationSet with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteConfigurationSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) DeleteConfigurationSetWithContext(ctx aws.Context, input *DeleteConfigurationSetInput, opts ...request.Option) (*DeleteConfigurationSetOutput, error) { + req, out := c.DeleteConfigurationSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteConfigurationSetEventDestination = "DeleteConfigurationSetEventDestination" + +// DeleteConfigurationSetEventDestinationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteConfigurationSetEventDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteConfigurationSetEventDestination for more information on using the DeleteConfigurationSetEventDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteConfigurationSetEventDestinationRequest method. +// req, resp := client.DeleteConfigurationSetEventDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteConfigurationSetEventDestination +func (c *SES) DeleteConfigurationSetEventDestinationRequest(input *DeleteConfigurationSetEventDestinationInput) (req *request.Request, output *DeleteConfigurationSetEventDestinationOutput) { + op := &request.Operation{ + Name: opDeleteConfigurationSetEventDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteConfigurationSetEventDestinationInput{} + } + + output = &DeleteConfigurationSetEventDestinationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteConfigurationSetEventDestination API operation for Amazon Simple Email Service. +// +// Deletes a configuration set event destination. Configuration set event destinations +// are associated with configuration sets, which enable you to publish email +// sending events. For information about using configuration sets, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation DeleteConfigurationSetEventDestination for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// * ErrCodeEventDestinationDoesNotExistException "EventDestinationDoesNotExist" +// Indicates that the event destination does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteConfigurationSetEventDestination +func (c *SES) DeleteConfigurationSetEventDestination(input *DeleteConfigurationSetEventDestinationInput) (*DeleteConfigurationSetEventDestinationOutput, error) { + req, out := c.DeleteConfigurationSetEventDestinationRequest(input) + return out, req.Send() +} + +// DeleteConfigurationSetEventDestinationWithContext is the same as DeleteConfigurationSetEventDestination with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteConfigurationSetEventDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) DeleteConfigurationSetEventDestinationWithContext(ctx aws.Context, input *DeleteConfigurationSetEventDestinationInput, opts ...request.Option) (*DeleteConfigurationSetEventDestinationOutput, error) { + req, out := c.DeleteConfigurationSetEventDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteConfigurationSetTrackingOptions = "DeleteConfigurationSetTrackingOptions" + +// DeleteConfigurationSetTrackingOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteConfigurationSetTrackingOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteConfigurationSetTrackingOptions for more information on using the DeleteConfigurationSetTrackingOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteConfigurationSetTrackingOptionsRequest method. +// req, resp := client.DeleteConfigurationSetTrackingOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteConfigurationSetTrackingOptions +func (c *SES) DeleteConfigurationSetTrackingOptionsRequest(input *DeleteConfigurationSetTrackingOptionsInput) (req *request.Request, output *DeleteConfigurationSetTrackingOptionsOutput) { + op := &request.Operation{ + Name: opDeleteConfigurationSetTrackingOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteConfigurationSetTrackingOptionsInput{} + } + + output = &DeleteConfigurationSetTrackingOptionsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteConfigurationSetTrackingOptions API operation for Amazon Simple Email Service. +// +// Deletes an association between a configuration set and a custom domain for +// open and click event tracking. +// +// By default, images and links used for tracking open and click events are +// hosted on domains operated by Amazon SES. You can configure a subdomain of +// your own to handle these events. For information about using custom domains, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html). +// +// Deleting this kind of association will result in emails sent using the specified +// configuration set to capture open and click events using the standard, Amazon +// SES-operated domains. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation DeleteConfigurationSetTrackingOptions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// * ErrCodeTrackingOptionsDoesNotExistException "TrackingOptionsDoesNotExistException" +// Indicates that the TrackingOptions object you specified does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteConfigurationSetTrackingOptions +func (c *SES) DeleteConfigurationSetTrackingOptions(input *DeleteConfigurationSetTrackingOptionsInput) (*DeleteConfigurationSetTrackingOptionsOutput, error) { + req, out := c.DeleteConfigurationSetTrackingOptionsRequest(input) + return out, req.Send() +} + +// DeleteConfigurationSetTrackingOptionsWithContext is the same as DeleteConfigurationSetTrackingOptions with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteConfigurationSetTrackingOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) DeleteConfigurationSetTrackingOptionsWithContext(ctx aws.Context, input *DeleteConfigurationSetTrackingOptionsInput, opts ...request.Option) (*DeleteConfigurationSetTrackingOptionsOutput, error) { + req, out := c.DeleteConfigurationSetTrackingOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteCustomVerificationEmailTemplate = "DeleteCustomVerificationEmailTemplate" + +// DeleteCustomVerificationEmailTemplateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCustomVerificationEmailTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteCustomVerificationEmailTemplate for more information on using the DeleteCustomVerificationEmailTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteCustomVerificationEmailTemplateRequest method. +// req, resp := client.DeleteCustomVerificationEmailTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteCustomVerificationEmailTemplate +func (c *SES) DeleteCustomVerificationEmailTemplateRequest(input *DeleteCustomVerificationEmailTemplateInput) (req *request.Request, output *DeleteCustomVerificationEmailTemplateOutput) { + op := &request.Operation{ + Name: opDeleteCustomVerificationEmailTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCustomVerificationEmailTemplateInput{} + } + + output = &DeleteCustomVerificationEmailTemplateOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteCustomVerificationEmailTemplate API operation for Amazon Simple Email Service. +// +// Deletes an existing custom verification email template. +// +// For more information about custom verification email templates, see Using +// Custom Verification Email Templates (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) +// in the Amazon SES Developer Guide. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation DeleteCustomVerificationEmailTemplate for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteCustomVerificationEmailTemplate +func (c *SES) DeleteCustomVerificationEmailTemplate(input *DeleteCustomVerificationEmailTemplateInput) (*DeleteCustomVerificationEmailTemplateOutput, error) { + req, out := c.DeleteCustomVerificationEmailTemplateRequest(input) + return out, req.Send() +} + +// DeleteCustomVerificationEmailTemplateWithContext is the same as DeleteCustomVerificationEmailTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteCustomVerificationEmailTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) DeleteCustomVerificationEmailTemplateWithContext(ctx aws.Context, input *DeleteCustomVerificationEmailTemplateInput, opts ...request.Option) (*DeleteCustomVerificationEmailTemplateOutput, error) { + req, out := c.DeleteCustomVerificationEmailTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteIdentity = "DeleteIdentity" + +// DeleteIdentityRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteIdentity for more information on using the DeleteIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteIdentityRequest method. +// req, resp := client.DeleteIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteIdentity +func (c *SES) DeleteIdentityRequest(input *DeleteIdentityInput) (req *request.Request, output *DeleteIdentityOutput) { + op := &request.Operation{ + Name: opDeleteIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIdentityInput{} + } + + output = &DeleteIdentityOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteIdentity API operation for Amazon Simple Email Service. +// +// Deletes the specified identity (an email address or a domain) from the list +// of verified identities. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation DeleteIdentity for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteIdentity +func (c *SES) DeleteIdentity(input *DeleteIdentityInput) (*DeleteIdentityOutput, error) { + req, out := c.DeleteIdentityRequest(input) + return out, req.Send() +} + +// DeleteIdentityWithContext is the same as DeleteIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) DeleteIdentityWithContext(ctx aws.Context, input *DeleteIdentityInput, opts ...request.Option) (*DeleteIdentityOutput, error) { + req, out := c.DeleteIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteIdentityPolicy = "DeleteIdentityPolicy" + +// DeleteIdentityPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIdentityPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteIdentityPolicy for more information on using the DeleteIdentityPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteIdentityPolicyRequest method. +// req, resp := client.DeleteIdentityPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteIdentityPolicy +func (c *SES) DeleteIdentityPolicyRequest(input *DeleteIdentityPolicyInput) (req *request.Request, output *DeleteIdentityPolicyOutput) { + op := &request.Operation{ + Name: opDeleteIdentityPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIdentityPolicyInput{} + } + + output = &DeleteIdentityPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteIdentityPolicy API operation for Amazon Simple Email Service. +// +// Deletes the specified sending authorization policy for the given identity +// (an email address or a domain). This API returns successfully even if a policy +// with the specified name does not exist. +// +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. +// +// Sending authorization is a feature that enables an identity owner to authorize +// other senders to use its identities. For information about using sending +// authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation DeleteIdentityPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteIdentityPolicy +func (c *SES) DeleteIdentityPolicy(input *DeleteIdentityPolicyInput) (*DeleteIdentityPolicyOutput, error) { + req, out := c.DeleteIdentityPolicyRequest(input) + return out, req.Send() +} + +// DeleteIdentityPolicyWithContext is the same as DeleteIdentityPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteIdentityPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) DeleteIdentityPolicyWithContext(ctx aws.Context, input *DeleteIdentityPolicyInput, opts ...request.Option) (*DeleteIdentityPolicyOutput, error) { + req, out := c.DeleteIdentityPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteReceiptFilter = "DeleteReceiptFilter" + +// DeleteReceiptFilterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReceiptFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteReceiptFilter for more information on using the DeleteReceiptFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteReceiptFilterRequest method. +// req, resp := client.DeleteReceiptFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteReceiptFilter +func (c *SES) DeleteReceiptFilterRequest(input *DeleteReceiptFilterInput) (req *request.Request, output *DeleteReceiptFilterOutput) { + op := &request.Operation{ + Name: opDeleteReceiptFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReceiptFilterInput{} + } + + output = &DeleteReceiptFilterOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteReceiptFilter API operation for Amazon Simple Email Service. +// +// Deletes the specified IP address filter. +// +// For information about managing IP address filters, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-ip-filters.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation DeleteReceiptFilter for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteReceiptFilter +func (c *SES) DeleteReceiptFilter(input *DeleteReceiptFilterInput) (*DeleteReceiptFilterOutput, error) { + req, out := c.DeleteReceiptFilterRequest(input) + return out, req.Send() +} + +// DeleteReceiptFilterWithContext is the same as DeleteReceiptFilter with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteReceiptFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) DeleteReceiptFilterWithContext(ctx aws.Context, input *DeleteReceiptFilterInput, opts ...request.Option) (*DeleteReceiptFilterOutput, error) { + req, out := c.DeleteReceiptFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteReceiptRule = "DeleteReceiptRule" + +// DeleteReceiptRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReceiptRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteReceiptRule for more information on using the DeleteReceiptRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteReceiptRuleRequest method. +// req, resp := client.DeleteReceiptRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteReceiptRule +func (c *SES) DeleteReceiptRuleRequest(input *DeleteReceiptRuleInput) (req *request.Request, output *DeleteReceiptRuleOutput) { + op := &request.Operation{ + Name: opDeleteReceiptRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReceiptRuleInput{} + } + + output = &DeleteReceiptRuleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteReceiptRule API operation for Amazon Simple Email Service. +// +// Deletes the specified receipt rule. +// +// For information about managing receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation DeleteReceiptRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRuleSetDoesNotExistException "RuleSetDoesNotExist" +// Indicates that the provided receipt rule set does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteReceiptRule +func (c *SES) DeleteReceiptRule(input *DeleteReceiptRuleInput) (*DeleteReceiptRuleOutput, error) { + req, out := c.DeleteReceiptRuleRequest(input) + return out, req.Send() +} + +// DeleteReceiptRuleWithContext is the same as DeleteReceiptRule with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteReceiptRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) DeleteReceiptRuleWithContext(ctx aws.Context, input *DeleteReceiptRuleInput, opts ...request.Option) (*DeleteReceiptRuleOutput, error) { + req, out := c.DeleteReceiptRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteReceiptRuleSet = "DeleteReceiptRuleSet" + +// DeleteReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReceiptRuleSet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteReceiptRuleSet for more information on using the DeleteReceiptRuleSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteReceiptRuleSetRequest method. +// req, resp := client.DeleteReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteReceiptRuleSet +func (c *SES) DeleteReceiptRuleSetRequest(input *DeleteReceiptRuleSetInput) (req *request.Request, output *DeleteReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opDeleteReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReceiptRuleSetInput{} + } + + output = &DeleteReceiptRuleSetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteReceiptRuleSet API operation for Amazon Simple Email Service. +// +// Deletes the specified receipt rule set and all of the receipt rules it contains. +// +// The currently active rule set cannot be deleted. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation DeleteReceiptRuleSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCannotDeleteException "CannotDelete" +// Indicates that the delete operation could not be completed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteReceiptRuleSet +func (c *SES) DeleteReceiptRuleSet(input *DeleteReceiptRuleSetInput) (*DeleteReceiptRuleSetOutput, error) { + req, out := c.DeleteReceiptRuleSetRequest(input) + return out, req.Send() +} + +// DeleteReceiptRuleSetWithContext is the same as DeleteReceiptRuleSet with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteReceiptRuleSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) DeleteReceiptRuleSetWithContext(ctx aws.Context, input *DeleteReceiptRuleSetInput, opts ...request.Option) (*DeleteReceiptRuleSetOutput, error) { + req, out := c.DeleteReceiptRuleSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTemplate = "DeleteTemplate" + +// DeleteTemplateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTemplate for more information on using the DeleteTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTemplateRequest method. +// req, resp := client.DeleteTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteTemplate +func (c *SES) DeleteTemplateRequest(input *DeleteTemplateInput) (req *request.Request, output *DeleteTemplateOutput) { + op := &request.Operation{ + Name: opDeleteTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTemplateInput{} + } + + output = &DeleteTemplateOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteTemplate API operation for Amazon Simple Email Service. +// +// Deletes an email template. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation DeleteTemplate for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteTemplate +func (c *SES) DeleteTemplate(input *DeleteTemplateInput) (*DeleteTemplateOutput, error) { + req, out := c.DeleteTemplateRequest(input) + return out, req.Send() +} + +// DeleteTemplateWithContext is the same as DeleteTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) DeleteTemplateWithContext(ctx aws.Context, input *DeleteTemplateInput, opts ...request.Option) (*DeleteTemplateOutput, error) { + req, out := c.DeleteTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteVerifiedEmailAddress = "DeleteVerifiedEmailAddress" + +// DeleteVerifiedEmailAddressRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVerifiedEmailAddress operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteVerifiedEmailAddress for more information on using the DeleteVerifiedEmailAddress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteVerifiedEmailAddressRequest method. +// req, resp := client.DeleteVerifiedEmailAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteVerifiedEmailAddress +func (c *SES) DeleteVerifiedEmailAddressRequest(input *DeleteVerifiedEmailAddressInput) (req *request.Request, output *DeleteVerifiedEmailAddressOutput) { + op := &request.Operation{ + Name: opDeleteVerifiedEmailAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVerifiedEmailAddressInput{} + } + + output = &DeleteVerifiedEmailAddressOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteVerifiedEmailAddress API operation for Amazon Simple Email Service. +// +// Deprecated. Use the DeleteIdentity operation to delete email addresses and +// domains. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation DeleteVerifiedEmailAddress for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteVerifiedEmailAddress +func (c *SES) DeleteVerifiedEmailAddress(input *DeleteVerifiedEmailAddressInput) (*DeleteVerifiedEmailAddressOutput, error) { + req, out := c.DeleteVerifiedEmailAddressRequest(input) + return out, req.Send() +} + +// DeleteVerifiedEmailAddressWithContext is the same as DeleteVerifiedEmailAddress with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVerifiedEmailAddress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) DeleteVerifiedEmailAddressWithContext(ctx aws.Context, input *DeleteVerifiedEmailAddressInput, opts ...request.Option) (*DeleteVerifiedEmailAddressOutput, error) { + req, out := c.DeleteVerifiedEmailAddressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeActiveReceiptRuleSet = "DescribeActiveReceiptRuleSet" + +// DescribeActiveReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeActiveReceiptRuleSet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeActiveReceiptRuleSet for more information on using the DescribeActiveReceiptRuleSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeActiveReceiptRuleSetRequest method. +// req, resp := client.DescribeActiveReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DescribeActiveReceiptRuleSet +func (c *SES) DescribeActiveReceiptRuleSetRequest(input *DescribeActiveReceiptRuleSetInput) (req *request.Request, output *DescribeActiveReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opDescribeActiveReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeActiveReceiptRuleSetInput{} + } + + output = &DescribeActiveReceiptRuleSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeActiveReceiptRuleSet API operation for Amazon Simple Email Service. +// +// Returns the metadata and receipt rules for the receipt rule set that is currently +// active. +// +// For information about setting up receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation DescribeActiveReceiptRuleSet for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DescribeActiveReceiptRuleSet +func (c *SES) DescribeActiveReceiptRuleSet(input *DescribeActiveReceiptRuleSetInput) (*DescribeActiveReceiptRuleSetOutput, error) { + req, out := c.DescribeActiveReceiptRuleSetRequest(input) + return out, req.Send() +} + +// DescribeActiveReceiptRuleSetWithContext is the same as DescribeActiveReceiptRuleSet with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeActiveReceiptRuleSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) DescribeActiveReceiptRuleSetWithContext(ctx aws.Context, input *DescribeActiveReceiptRuleSetInput, opts ...request.Option) (*DescribeActiveReceiptRuleSetOutput, error) { + req, out := c.DescribeActiveReceiptRuleSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeConfigurationSet = "DescribeConfigurationSet" + +// DescribeConfigurationSetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConfigurationSet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeConfigurationSet for more information on using the DescribeConfigurationSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeConfigurationSetRequest method. +// req, resp := client.DescribeConfigurationSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DescribeConfigurationSet +func (c *SES) DescribeConfigurationSetRequest(input *DescribeConfigurationSetInput) (req *request.Request, output *DescribeConfigurationSetOutput) { + op := &request.Operation{ + Name: opDescribeConfigurationSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConfigurationSetInput{} + } + + output = &DescribeConfigurationSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeConfigurationSet API operation for Amazon Simple Email Service. +// +// Returns the details of the specified configuration set. For information about +// using configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation DescribeConfigurationSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DescribeConfigurationSet +func (c *SES) DescribeConfigurationSet(input *DescribeConfigurationSetInput) (*DescribeConfigurationSetOutput, error) { + req, out := c.DescribeConfigurationSetRequest(input) + return out, req.Send() +} + +// DescribeConfigurationSetWithContext is the same as DescribeConfigurationSet with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeConfigurationSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) DescribeConfigurationSetWithContext(ctx aws.Context, input *DescribeConfigurationSetInput, opts ...request.Option) (*DescribeConfigurationSetOutput, error) { + req, out := c.DescribeConfigurationSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeReceiptRule = "DescribeReceiptRule" + +// DescribeReceiptRuleRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReceiptRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeReceiptRule for more information on using the DescribeReceiptRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeReceiptRuleRequest method. +// req, resp := client.DescribeReceiptRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DescribeReceiptRule +func (c *SES) DescribeReceiptRuleRequest(input *DescribeReceiptRuleInput) (req *request.Request, output *DescribeReceiptRuleOutput) { + op := &request.Operation{ + Name: opDescribeReceiptRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReceiptRuleInput{} + } + + output = &DescribeReceiptRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeReceiptRule API operation for Amazon Simple Email Service. +// +// Returns the details of the specified receipt rule. +// +// For information about setting up receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation DescribeReceiptRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRuleDoesNotExistException "RuleDoesNotExist" +// Indicates that the provided receipt rule does not exist. +// +// * ErrCodeRuleSetDoesNotExistException "RuleSetDoesNotExist" +// Indicates that the provided receipt rule set does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DescribeReceiptRule +func (c *SES) DescribeReceiptRule(input *DescribeReceiptRuleInput) (*DescribeReceiptRuleOutput, error) { + req, out := c.DescribeReceiptRuleRequest(input) + return out, req.Send() +} + +// DescribeReceiptRuleWithContext is the same as DescribeReceiptRule with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeReceiptRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) DescribeReceiptRuleWithContext(ctx aws.Context, input *DescribeReceiptRuleInput, opts ...request.Option) (*DescribeReceiptRuleOutput, error) { + req, out := c.DescribeReceiptRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeReceiptRuleSet = "DescribeReceiptRuleSet" + +// DescribeReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReceiptRuleSet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeReceiptRuleSet for more information on using the DescribeReceiptRuleSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeReceiptRuleSetRequest method. +// req, resp := client.DescribeReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DescribeReceiptRuleSet +func (c *SES) DescribeReceiptRuleSetRequest(input *DescribeReceiptRuleSetInput) (req *request.Request, output *DescribeReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opDescribeReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReceiptRuleSetInput{} + } + + output = &DescribeReceiptRuleSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeReceiptRuleSet API operation for Amazon Simple Email Service. +// +// Returns the details of the specified receipt rule set. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation DescribeReceiptRuleSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRuleSetDoesNotExistException "RuleSetDoesNotExist" +// Indicates that the provided receipt rule set does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DescribeReceiptRuleSet +func (c *SES) DescribeReceiptRuleSet(input *DescribeReceiptRuleSetInput) (*DescribeReceiptRuleSetOutput, error) { + req, out := c.DescribeReceiptRuleSetRequest(input) + return out, req.Send() +} + +// DescribeReceiptRuleSetWithContext is the same as DescribeReceiptRuleSet with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeReceiptRuleSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) DescribeReceiptRuleSetWithContext(ctx aws.Context, input *DescribeReceiptRuleSetInput, opts ...request.Option) (*DescribeReceiptRuleSetOutput, error) { + req, out := c.DescribeReceiptRuleSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAccountSendingEnabled = "GetAccountSendingEnabled" + +// GetAccountSendingEnabledRequest generates a "aws/request.Request" representing the +// client's request for the GetAccountSendingEnabled operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccountSendingEnabled for more information on using the GetAccountSendingEnabled +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccountSendingEnabledRequest method. +// req, resp := client.GetAccountSendingEnabledRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetAccountSendingEnabled +func (c *SES) GetAccountSendingEnabledRequest(input *GetAccountSendingEnabledInput) (req *request.Request, output *GetAccountSendingEnabledOutput) { + op := &request.Operation{ + Name: opGetAccountSendingEnabled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccountSendingEnabledInput{} + } + + output = &GetAccountSendingEnabledOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccountSendingEnabled API operation for Amazon Simple Email Service. +// +// Returns the email sending status of the Amazon SES account for the current +// region. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation GetAccountSendingEnabled for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetAccountSendingEnabled +func (c *SES) GetAccountSendingEnabled(input *GetAccountSendingEnabledInput) (*GetAccountSendingEnabledOutput, error) { + req, out := c.GetAccountSendingEnabledRequest(input) + return out, req.Send() +} + +// GetAccountSendingEnabledWithContext is the same as GetAccountSendingEnabled with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccountSendingEnabled for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) GetAccountSendingEnabledWithContext(ctx aws.Context, input *GetAccountSendingEnabledInput, opts ...request.Option) (*GetAccountSendingEnabledOutput, error) { + req, out := c.GetAccountSendingEnabledRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCustomVerificationEmailTemplate = "GetCustomVerificationEmailTemplate" + +// GetCustomVerificationEmailTemplateRequest generates a "aws/request.Request" representing the +// client's request for the GetCustomVerificationEmailTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCustomVerificationEmailTemplate for more information on using the GetCustomVerificationEmailTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCustomVerificationEmailTemplateRequest method. +// req, resp := client.GetCustomVerificationEmailTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetCustomVerificationEmailTemplate +func (c *SES) GetCustomVerificationEmailTemplateRequest(input *GetCustomVerificationEmailTemplateInput) (req *request.Request, output *GetCustomVerificationEmailTemplateOutput) { + op := &request.Operation{ + Name: opGetCustomVerificationEmailTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCustomVerificationEmailTemplateInput{} + } + + output = &GetCustomVerificationEmailTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCustomVerificationEmailTemplate API operation for Amazon Simple Email Service. +// +// Returns the custom email verification template for the template name you +// specify. +// +// For more information about custom verification email templates, see Using +// Custom Verification Email Templates (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) +// in the Amazon SES Developer Guide. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation GetCustomVerificationEmailTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCustomVerificationEmailTemplateDoesNotExistException "CustomVerificationEmailTemplateDoesNotExist" +// Indicates that a custom verification email template with the name you specified +// does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetCustomVerificationEmailTemplate +func (c *SES) GetCustomVerificationEmailTemplate(input *GetCustomVerificationEmailTemplateInput) (*GetCustomVerificationEmailTemplateOutput, error) { + req, out := c.GetCustomVerificationEmailTemplateRequest(input) + return out, req.Send() +} + +// GetCustomVerificationEmailTemplateWithContext is the same as GetCustomVerificationEmailTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See GetCustomVerificationEmailTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) GetCustomVerificationEmailTemplateWithContext(ctx aws.Context, input *GetCustomVerificationEmailTemplateInput, opts ...request.Option) (*GetCustomVerificationEmailTemplateOutput, error) { + req, out := c.GetCustomVerificationEmailTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetIdentityDkimAttributes = "GetIdentityDkimAttributes" + +// GetIdentityDkimAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityDkimAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetIdentityDkimAttributes for more information on using the GetIdentityDkimAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetIdentityDkimAttributesRequest method. +// req, resp := client.GetIdentityDkimAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetIdentityDkimAttributes +func (c *SES) GetIdentityDkimAttributesRequest(input *GetIdentityDkimAttributesInput) (req *request.Request, output *GetIdentityDkimAttributesOutput) { + op := &request.Operation{ + Name: opGetIdentityDkimAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityDkimAttributesInput{} + } + + output = &GetIdentityDkimAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetIdentityDkimAttributes API operation for Amazon Simple Email Service. +// +// Returns the current status of Easy DKIM signing for an entity. For domain +// name identities, this operation also returns the DKIM tokens that are required +// for Easy DKIM signing, and whether Amazon SES has successfully verified that +// these tokens have been published. +// +// This operation takes a list of identities as input and returns the following +// information for each: +// +// * Whether Easy DKIM signing is enabled or disabled. +// +// * A set of DKIM tokens that represent the identity. If the identity is +// an email address, the tokens represent the domain of that address. +// +// * Whether Amazon SES has successfully verified the DKIM tokens published +// in the domain's DNS. This information is only returned for domain name +// identities, not for email addresses. +// +// This operation is throttled at one request per second and can only get DKIM +// attributes for up to 100 identities at a time. +// +// For more information about creating DNS records using DKIM tokens, go to +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation GetIdentityDkimAttributes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetIdentityDkimAttributes +func (c *SES) GetIdentityDkimAttributes(input *GetIdentityDkimAttributesInput) (*GetIdentityDkimAttributesOutput, error) { + req, out := c.GetIdentityDkimAttributesRequest(input) + return out, req.Send() +} + +// GetIdentityDkimAttributesWithContext is the same as GetIdentityDkimAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See GetIdentityDkimAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) GetIdentityDkimAttributesWithContext(ctx aws.Context, input *GetIdentityDkimAttributesInput, opts ...request.Option) (*GetIdentityDkimAttributesOutput, error) { + req, out := c.GetIdentityDkimAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetIdentityMailFromDomainAttributes = "GetIdentityMailFromDomainAttributes" + +// GetIdentityMailFromDomainAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityMailFromDomainAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetIdentityMailFromDomainAttributes for more information on using the GetIdentityMailFromDomainAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetIdentityMailFromDomainAttributesRequest method. +// req, resp := client.GetIdentityMailFromDomainAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetIdentityMailFromDomainAttributes +func (c *SES) GetIdentityMailFromDomainAttributesRequest(input *GetIdentityMailFromDomainAttributesInput) (req *request.Request, output *GetIdentityMailFromDomainAttributesOutput) { + op := &request.Operation{ + Name: opGetIdentityMailFromDomainAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityMailFromDomainAttributesInput{} + } + + output = &GetIdentityMailFromDomainAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetIdentityMailFromDomainAttributes API operation for Amazon Simple Email Service. +// +// Returns the custom MAIL FROM attributes for a list of identities (email addresses +// : domains). +// +// This operation is throttled at one request per second and can only get custom +// MAIL FROM attributes for up to 100 identities at a time. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation GetIdentityMailFromDomainAttributes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetIdentityMailFromDomainAttributes +func (c *SES) GetIdentityMailFromDomainAttributes(input *GetIdentityMailFromDomainAttributesInput) (*GetIdentityMailFromDomainAttributesOutput, error) { + req, out := c.GetIdentityMailFromDomainAttributesRequest(input) + return out, req.Send() +} + +// GetIdentityMailFromDomainAttributesWithContext is the same as GetIdentityMailFromDomainAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See GetIdentityMailFromDomainAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) GetIdentityMailFromDomainAttributesWithContext(ctx aws.Context, input *GetIdentityMailFromDomainAttributesInput, opts ...request.Option) (*GetIdentityMailFromDomainAttributesOutput, error) { + req, out := c.GetIdentityMailFromDomainAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetIdentityNotificationAttributes = "GetIdentityNotificationAttributes" + +// GetIdentityNotificationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityNotificationAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetIdentityNotificationAttributes for more information on using the GetIdentityNotificationAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetIdentityNotificationAttributesRequest method. +// req, resp := client.GetIdentityNotificationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetIdentityNotificationAttributes +func (c *SES) GetIdentityNotificationAttributesRequest(input *GetIdentityNotificationAttributesInput) (req *request.Request, output *GetIdentityNotificationAttributesOutput) { + op := &request.Operation{ + Name: opGetIdentityNotificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityNotificationAttributesInput{} + } + + output = &GetIdentityNotificationAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetIdentityNotificationAttributes API operation for Amazon Simple Email Service. +// +// Given a list of verified identities (email addresses and/or domains), returns +// a structure describing identity notification attributes. +// +// This operation is throttled at one request per second and can only get notification +// attributes for up to 100 identities at a time. +// +// For more information about using notifications with Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation GetIdentityNotificationAttributes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetIdentityNotificationAttributes +func (c *SES) GetIdentityNotificationAttributes(input *GetIdentityNotificationAttributesInput) (*GetIdentityNotificationAttributesOutput, error) { + req, out := c.GetIdentityNotificationAttributesRequest(input) + return out, req.Send() +} + +// GetIdentityNotificationAttributesWithContext is the same as GetIdentityNotificationAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See GetIdentityNotificationAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) GetIdentityNotificationAttributesWithContext(ctx aws.Context, input *GetIdentityNotificationAttributesInput, opts ...request.Option) (*GetIdentityNotificationAttributesOutput, error) { + req, out := c.GetIdentityNotificationAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetIdentityPolicies = "GetIdentityPolicies" + +// GetIdentityPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityPolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetIdentityPolicies for more information on using the GetIdentityPolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetIdentityPoliciesRequest method. +// req, resp := client.GetIdentityPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetIdentityPolicies +func (c *SES) GetIdentityPoliciesRequest(input *GetIdentityPoliciesInput) (req *request.Request, output *GetIdentityPoliciesOutput) { + op := &request.Operation{ + Name: opGetIdentityPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityPoliciesInput{} + } + + output = &GetIdentityPoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetIdentityPolicies API operation for Amazon Simple Email Service. +// +// Returns the requested sending authorization policies for the given identity +// (an email address or a domain). The policies are returned as a map of policy +// names to policy contents. You can retrieve a maximum of 20 policies at a +// time. +// +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. +// +// Sending authorization is a feature that enables an identity owner to authorize +// other senders to use its identities. For information about using sending +// authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation GetIdentityPolicies for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetIdentityPolicies +func (c *SES) GetIdentityPolicies(input *GetIdentityPoliciesInput) (*GetIdentityPoliciesOutput, error) { + req, out := c.GetIdentityPoliciesRequest(input) + return out, req.Send() +} + +// GetIdentityPoliciesWithContext is the same as GetIdentityPolicies with the addition of +// the ability to pass a context and additional request options. +// +// See GetIdentityPolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) GetIdentityPoliciesWithContext(ctx aws.Context, input *GetIdentityPoliciesInput, opts ...request.Option) (*GetIdentityPoliciesOutput, error) { + req, out := c.GetIdentityPoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetIdentityVerificationAttributes = "GetIdentityVerificationAttributes" + +// GetIdentityVerificationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityVerificationAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetIdentityVerificationAttributes for more information on using the GetIdentityVerificationAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetIdentityVerificationAttributesRequest method. +// req, resp := client.GetIdentityVerificationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetIdentityVerificationAttributes +func (c *SES) GetIdentityVerificationAttributesRequest(input *GetIdentityVerificationAttributesInput) (req *request.Request, output *GetIdentityVerificationAttributesOutput) { + op := &request.Operation{ + Name: opGetIdentityVerificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityVerificationAttributesInput{} + } + + output = &GetIdentityVerificationAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetIdentityVerificationAttributes API operation for Amazon Simple Email Service. +// +// Given a list of identities (email addresses and/or domains), returns the +// verification status and (for domain identities) the verification token for +// each identity. +// +// The verification status of an email address is "Pending" until the email +// address owner clicks the link within the verification email that Amazon SES +// sent to that address. If the email address owner clicks the link within 24 +// hours, the verification status of the email address changes to "Success". +// If the link is not clicked within 24 hours, the verification status changes +// to "Failed." In that case, if you still want to verify the email address, +// you must restart the verification process from the beginning. +// +// For domain identities, the domain's verification status is "Pending" as Amazon +// SES searches for the required TXT record in the DNS settings of the domain. +// When Amazon SES detects the record, the domain's verification status changes +// to "Success". If Amazon SES is unable to detect the record within 72 hours, +// the domain's verification status changes to "Failed." In that case, if you +// still want to verify the domain, you must restart the verification process +// from the beginning. +// +// This operation is throttled at one request per second and can only get verification +// attributes for up to 100 identities at a time. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation GetIdentityVerificationAttributes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetIdentityVerificationAttributes +func (c *SES) GetIdentityVerificationAttributes(input *GetIdentityVerificationAttributesInput) (*GetIdentityVerificationAttributesOutput, error) { + req, out := c.GetIdentityVerificationAttributesRequest(input) + return out, req.Send() +} + +// GetIdentityVerificationAttributesWithContext is the same as GetIdentityVerificationAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See GetIdentityVerificationAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) GetIdentityVerificationAttributesWithContext(ctx aws.Context, input *GetIdentityVerificationAttributesInput, opts ...request.Option) (*GetIdentityVerificationAttributesOutput, error) { + req, out := c.GetIdentityVerificationAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSendQuota = "GetSendQuota" + +// GetSendQuotaRequest generates a "aws/request.Request" representing the +// client's request for the GetSendQuota operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSendQuota for more information on using the GetSendQuota +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSendQuotaRequest method. +// req, resp := client.GetSendQuotaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetSendQuota +func (c *SES) GetSendQuotaRequest(input *GetSendQuotaInput) (req *request.Request, output *GetSendQuotaOutput) { + op := &request.Operation{ + Name: opGetSendQuota, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSendQuotaInput{} + } + + output = &GetSendQuotaOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSendQuota API operation for Amazon Simple Email Service. +// +// Provides the sending limits for the Amazon SES account. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation GetSendQuota for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetSendQuota +func (c *SES) GetSendQuota(input *GetSendQuotaInput) (*GetSendQuotaOutput, error) { + req, out := c.GetSendQuotaRequest(input) + return out, req.Send() +} + +// GetSendQuotaWithContext is the same as GetSendQuota with the addition of +// the ability to pass a context and additional request options. +// +// See GetSendQuota for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) GetSendQuotaWithContext(ctx aws.Context, input *GetSendQuotaInput, opts ...request.Option) (*GetSendQuotaOutput, error) { + req, out := c.GetSendQuotaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSendStatistics = "GetSendStatistics" + +// GetSendStatisticsRequest generates a "aws/request.Request" representing the +// client's request for the GetSendStatistics operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSendStatistics for more information on using the GetSendStatistics +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSendStatisticsRequest method. +// req, resp := client.GetSendStatisticsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetSendStatistics +func (c *SES) GetSendStatisticsRequest(input *GetSendStatisticsInput) (req *request.Request, output *GetSendStatisticsOutput) { + op := &request.Operation{ + Name: opGetSendStatistics, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSendStatisticsInput{} + } + + output = &GetSendStatisticsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSendStatistics API operation for Amazon Simple Email Service. +// +// Provides sending statistics for the current AWS Region. The result is a list +// of data points, representing the last two weeks of sending activity. Each +// data point in the list contains statistics for a 15-minute period of time. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation GetSendStatistics for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetSendStatistics +func (c *SES) GetSendStatistics(input *GetSendStatisticsInput) (*GetSendStatisticsOutput, error) { + req, out := c.GetSendStatisticsRequest(input) + return out, req.Send() +} + +// GetSendStatisticsWithContext is the same as GetSendStatistics with the addition of +// the ability to pass a context and additional request options. +// +// See GetSendStatistics for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) GetSendStatisticsWithContext(ctx aws.Context, input *GetSendStatisticsInput, opts ...request.Option) (*GetSendStatisticsOutput, error) { + req, out := c.GetSendStatisticsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetTemplate = "GetTemplate" + +// GetTemplateRequest generates a "aws/request.Request" representing the +// client's request for the GetTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetTemplate for more information on using the GetTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetTemplateRequest method. +// req, resp := client.GetTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetTemplate +func (c *SES) GetTemplateRequest(input *GetTemplateInput) (req *request.Request, output *GetTemplateOutput) { + op := &request.Operation{ + Name: opGetTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTemplateInput{} + } + + output = &GetTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetTemplate API operation for Amazon Simple Email Service. +// +// Displays the template object (which includes the Subject line, HTML part +// and text part) for the template you specify. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation GetTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTemplateDoesNotExistException "TemplateDoesNotExist" +// Indicates that the Template object you specified does not exist in your Amazon +// SES account. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetTemplate +func (c *SES) GetTemplate(input *GetTemplateInput) (*GetTemplateOutput, error) { + req, out := c.GetTemplateRequest(input) + return out, req.Send() +} + +// GetTemplateWithContext is the same as GetTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See GetTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) GetTemplateWithContext(ctx aws.Context, input *GetTemplateInput, opts ...request.Option) (*GetTemplateOutput, error) { + req, out := c.GetTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListConfigurationSets = "ListConfigurationSets" + +// ListConfigurationSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListConfigurationSets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListConfigurationSets for more information on using the ListConfigurationSets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListConfigurationSetsRequest method. +// req, resp := client.ListConfigurationSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListConfigurationSets +func (c *SES) ListConfigurationSetsRequest(input *ListConfigurationSetsInput) (req *request.Request, output *ListConfigurationSetsOutput) { + op := &request.Operation{ + Name: opListConfigurationSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListConfigurationSetsInput{} + } + + output = &ListConfigurationSetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListConfigurationSets API operation for Amazon Simple Email Service. +// +// Provides a list of the configuration sets associated with your Amazon SES +// account in the current AWS Region. For information about using configuration +// sets, see Monitoring Your Amazon SES Sending Activity (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html) +// in the Amazon SES Developer Guide. +// +// You can execute this operation no more than once per second. This operation +// will return up to 1,000 configuration sets each time it is run. If your Amazon +// SES account has more than 1,000 configuration sets, this operation will also +// return a NextToken element. You can then execute the ListConfigurationSets +// operation again, passing the NextToken parameter and the value of the NextToken +// element to retrieve additional results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation ListConfigurationSets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListConfigurationSets +func (c *SES) ListConfigurationSets(input *ListConfigurationSetsInput) (*ListConfigurationSetsOutput, error) { + req, out := c.ListConfigurationSetsRequest(input) + return out, req.Send() +} + +// ListConfigurationSetsWithContext is the same as ListConfigurationSets with the addition of +// the ability to pass a context and additional request options. +// +// See ListConfigurationSets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) ListConfigurationSetsWithContext(ctx aws.Context, input *ListConfigurationSetsInput, opts ...request.Option) (*ListConfigurationSetsOutput, error) { + req, out := c.ListConfigurationSetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListCustomVerificationEmailTemplates = "ListCustomVerificationEmailTemplates" + +// ListCustomVerificationEmailTemplatesRequest generates a "aws/request.Request" representing the +// client's request for the ListCustomVerificationEmailTemplates operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListCustomVerificationEmailTemplates for more information on using the ListCustomVerificationEmailTemplates +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListCustomVerificationEmailTemplatesRequest method. +// req, resp := client.ListCustomVerificationEmailTemplatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListCustomVerificationEmailTemplates +func (c *SES) ListCustomVerificationEmailTemplatesRequest(input *ListCustomVerificationEmailTemplatesInput) (req *request.Request, output *ListCustomVerificationEmailTemplatesOutput) { + op := &request.Operation{ + Name: opListCustomVerificationEmailTemplates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListCustomVerificationEmailTemplatesInput{} + } + + output = &ListCustomVerificationEmailTemplatesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListCustomVerificationEmailTemplates API operation for Amazon Simple Email Service. +// +// Lists the existing custom verification email templates for your account in +// the current AWS Region. +// +// For more information about custom verification email templates, see Using +// Custom Verification Email Templates (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) +// in the Amazon SES Developer Guide. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation ListCustomVerificationEmailTemplates for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListCustomVerificationEmailTemplates +func (c *SES) ListCustomVerificationEmailTemplates(input *ListCustomVerificationEmailTemplatesInput) (*ListCustomVerificationEmailTemplatesOutput, error) { + req, out := c.ListCustomVerificationEmailTemplatesRequest(input) + return out, req.Send() +} + +// ListCustomVerificationEmailTemplatesWithContext is the same as ListCustomVerificationEmailTemplates with the addition of +// the ability to pass a context and additional request options. +// +// See ListCustomVerificationEmailTemplates for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) ListCustomVerificationEmailTemplatesWithContext(ctx aws.Context, input *ListCustomVerificationEmailTemplatesInput, opts ...request.Option) (*ListCustomVerificationEmailTemplatesOutput, error) { + req, out := c.ListCustomVerificationEmailTemplatesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListCustomVerificationEmailTemplatesPages iterates over the pages of a ListCustomVerificationEmailTemplates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListCustomVerificationEmailTemplates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListCustomVerificationEmailTemplates operation. +// pageNum := 0 +// err := client.ListCustomVerificationEmailTemplatesPages(params, +// func(page *ListCustomVerificationEmailTemplatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SES) ListCustomVerificationEmailTemplatesPages(input *ListCustomVerificationEmailTemplatesInput, fn func(*ListCustomVerificationEmailTemplatesOutput, bool) bool) error { + return c.ListCustomVerificationEmailTemplatesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListCustomVerificationEmailTemplatesPagesWithContext same as ListCustomVerificationEmailTemplatesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) ListCustomVerificationEmailTemplatesPagesWithContext(ctx aws.Context, input *ListCustomVerificationEmailTemplatesInput, fn func(*ListCustomVerificationEmailTemplatesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListCustomVerificationEmailTemplatesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListCustomVerificationEmailTemplatesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListCustomVerificationEmailTemplatesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListIdentities = "ListIdentities" + +// ListIdentitiesRequest generates a "aws/request.Request" representing the +// client's request for the ListIdentities operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListIdentities for more information on using the ListIdentities +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListIdentitiesRequest method. +// req, resp := client.ListIdentitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListIdentities +func (c *SES) ListIdentitiesRequest(input *ListIdentitiesInput) (req *request.Request, output *ListIdentitiesOutput) { + op := &request.Operation{ + Name: opListIdentities, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListIdentitiesInput{} + } + + output = &ListIdentitiesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListIdentities API operation for Amazon Simple Email Service. +// +// Returns a list containing all of the identities (email addresses and domains) +// for your AWS account in the current AWS Region, regardless of verification +// status. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation ListIdentities for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListIdentities +func (c *SES) ListIdentities(input *ListIdentitiesInput) (*ListIdentitiesOutput, error) { + req, out := c.ListIdentitiesRequest(input) + return out, req.Send() +} + +// ListIdentitiesWithContext is the same as ListIdentities with the addition of +// the ability to pass a context and additional request options. +// +// See ListIdentities for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) ListIdentitiesWithContext(ctx aws.Context, input *ListIdentitiesInput, opts ...request.Option) (*ListIdentitiesOutput, error) { + req, out := c.ListIdentitiesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListIdentitiesPages iterates over the pages of a ListIdentities operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListIdentities method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListIdentities operation. +// pageNum := 0 +// err := client.ListIdentitiesPages(params, +// func(page *ListIdentitiesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SES) ListIdentitiesPages(input *ListIdentitiesInput, fn func(*ListIdentitiesOutput, bool) bool) error { + return c.ListIdentitiesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListIdentitiesPagesWithContext same as ListIdentitiesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) ListIdentitiesPagesWithContext(ctx aws.Context, input *ListIdentitiesInput, fn func(*ListIdentitiesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListIdentitiesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListIdentitiesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListIdentitiesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListIdentityPolicies = "ListIdentityPolicies" + +// ListIdentityPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListIdentityPolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListIdentityPolicies for more information on using the ListIdentityPolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListIdentityPoliciesRequest method. +// req, resp := client.ListIdentityPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListIdentityPolicies +func (c *SES) ListIdentityPoliciesRequest(input *ListIdentityPoliciesInput) (req *request.Request, output *ListIdentityPoliciesOutput) { + op := &request.Operation{ + Name: opListIdentityPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListIdentityPoliciesInput{} + } + + output = &ListIdentityPoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListIdentityPolicies API operation for Amazon Simple Email Service. +// +// Returns a list of sending authorization policies that are attached to the +// given identity (an email address or a domain). This API returns only a list. +// If you want the actual policy content, you can use GetIdentityPolicies. +// +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. +// +// Sending authorization is a feature that enables an identity owner to authorize +// other senders to use its identities. For information about using sending +// authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation ListIdentityPolicies for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListIdentityPolicies +func (c *SES) ListIdentityPolicies(input *ListIdentityPoliciesInput) (*ListIdentityPoliciesOutput, error) { + req, out := c.ListIdentityPoliciesRequest(input) + return out, req.Send() +} + +// ListIdentityPoliciesWithContext is the same as ListIdentityPolicies with the addition of +// the ability to pass a context and additional request options. +// +// See ListIdentityPolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) ListIdentityPoliciesWithContext(ctx aws.Context, input *ListIdentityPoliciesInput, opts ...request.Option) (*ListIdentityPoliciesOutput, error) { + req, out := c.ListIdentityPoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListReceiptFilters = "ListReceiptFilters" + +// ListReceiptFiltersRequest generates a "aws/request.Request" representing the +// client's request for the ListReceiptFilters operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListReceiptFilters for more information on using the ListReceiptFilters +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListReceiptFiltersRequest method. +// req, resp := client.ListReceiptFiltersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListReceiptFilters +func (c *SES) ListReceiptFiltersRequest(input *ListReceiptFiltersInput) (req *request.Request, output *ListReceiptFiltersOutput) { + op := &request.Operation{ + Name: opListReceiptFilters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListReceiptFiltersInput{} + } + + output = &ListReceiptFiltersOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListReceiptFilters API operation for Amazon Simple Email Service. +// +// Lists the IP address filters associated with your AWS account in the current +// AWS Region. +// +// For information about managing IP address filters, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-ip-filters.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation ListReceiptFilters for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListReceiptFilters +func (c *SES) ListReceiptFilters(input *ListReceiptFiltersInput) (*ListReceiptFiltersOutput, error) { + req, out := c.ListReceiptFiltersRequest(input) + return out, req.Send() +} + +// ListReceiptFiltersWithContext is the same as ListReceiptFilters with the addition of +// the ability to pass a context and additional request options. +// +// See ListReceiptFilters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) ListReceiptFiltersWithContext(ctx aws.Context, input *ListReceiptFiltersInput, opts ...request.Option) (*ListReceiptFiltersOutput, error) { + req, out := c.ListReceiptFiltersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListReceiptRuleSets = "ListReceiptRuleSets" + +// ListReceiptRuleSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListReceiptRuleSets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListReceiptRuleSets for more information on using the ListReceiptRuleSets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListReceiptRuleSetsRequest method. +// req, resp := client.ListReceiptRuleSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListReceiptRuleSets +func (c *SES) ListReceiptRuleSetsRequest(input *ListReceiptRuleSetsInput) (req *request.Request, output *ListReceiptRuleSetsOutput) { + op := &request.Operation{ + Name: opListReceiptRuleSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListReceiptRuleSetsInput{} + } + + output = &ListReceiptRuleSetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListReceiptRuleSets API operation for Amazon Simple Email Service. +// +// Lists the receipt rule sets that exist under your AWS account in the current +// AWS Region. If there are additional receipt rule sets to be retrieved, you +// will receive a NextToken that you can provide to the next call to ListReceiptRuleSets +// to retrieve the additional entries. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation ListReceiptRuleSets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListReceiptRuleSets +func (c *SES) ListReceiptRuleSets(input *ListReceiptRuleSetsInput) (*ListReceiptRuleSetsOutput, error) { + req, out := c.ListReceiptRuleSetsRequest(input) + return out, req.Send() +} + +// ListReceiptRuleSetsWithContext is the same as ListReceiptRuleSets with the addition of +// the ability to pass a context and additional request options. +// +// See ListReceiptRuleSets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) ListReceiptRuleSetsWithContext(ctx aws.Context, input *ListReceiptRuleSetsInput, opts ...request.Option) (*ListReceiptRuleSetsOutput, error) { + req, out := c.ListReceiptRuleSetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListTemplates = "ListTemplates" + +// ListTemplatesRequest generates a "aws/request.Request" representing the +// client's request for the ListTemplates operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTemplates for more information on using the ListTemplates +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTemplatesRequest method. +// req, resp := client.ListTemplatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListTemplates +func (c *SES) ListTemplatesRequest(input *ListTemplatesInput) (req *request.Request, output *ListTemplatesOutput) { + op := &request.Operation{ + Name: opListTemplates, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTemplatesInput{} + } + + output = &ListTemplatesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTemplates API operation for Amazon Simple Email Service. +// +// Lists the email templates present in your Amazon SES account in the current +// AWS Region. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation ListTemplates for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListTemplates +func (c *SES) ListTemplates(input *ListTemplatesInput) (*ListTemplatesOutput, error) { + req, out := c.ListTemplatesRequest(input) + return out, req.Send() +} + +// ListTemplatesWithContext is the same as ListTemplates with the addition of +// the ability to pass a context and additional request options. +// +// See ListTemplates for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) ListTemplatesWithContext(ctx aws.Context, input *ListTemplatesInput, opts ...request.Option) (*ListTemplatesOutput, error) { + req, out := c.ListTemplatesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListVerifiedEmailAddresses = "ListVerifiedEmailAddresses" + +// ListVerifiedEmailAddressesRequest generates a "aws/request.Request" representing the +// client's request for the ListVerifiedEmailAddresses operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListVerifiedEmailAddresses for more information on using the ListVerifiedEmailAddresses +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListVerifiedEmailAddressesRequest method. +// req, resp := client.ListVerifiedEmailAddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListVerifiedEmailAddresses +func (c *SES) ListVerifiedEmailAddressesRequest(input *ListVerifiedEmailAddressesInput) (req *request.Request, output *ListVerifiedEmailAddressesOutput) { + op := &request.Operation{ + Name: opListVerifiedEmailAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListVerifiedEmailAddressesInput{} + } + + output = &ListVerifiedEmailAddressesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListVerifiedEmailAddresses API operation for Amazon Simple Email Service. +// +// Deprecated. Use the ListIdentities operation to list the email addresses +// and domains associated with your account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation ListVerifiedEmailAddresses for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListVerifiedEmailAddresses +func (c *SES) ListVerifiedEmailAddresses(input *ListVerifiedEmailAddressesInput) (*ListVerifiedEmailAddressesOutput, error) { + req, out := c.ListVerifiedEmailAddressesRequest(input) + return out, req.Send() +} + +// ListVerifiedEmailAddressesWithContext is the same as ListVerifiedEmailAddresses with the addition of +// the ability to pass a context and additional request options. +// +// See ListVerifiedEmailAddresses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) ListVerifiedEmailAddressesWithContext(ctx aws.Context, input *ListVerifiedEmailAddressesInput, opts ...request.Option) (*ListVerifiedEmailAddressesOutput, error) { + req, out := c.ListVerifiedEmailAddressesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutIdentityPolicy = "PutIdentityPolicy" + +// PutIdentityPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutIdentityPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutIdentityPolicy for more information on using the PutIdentityPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutIdentityPolicyRequest method. +// req, resp := client.PutIdentityPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/PutIdentityPolicy +func (c *SES) PutIdentityPolicyRequest(input *PutIdentityPolicyInput) (req *request.Request, output *PutIdentityPolicyOutput) { + op := &request.Operation{ + Name: opPutIdentityPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutIdentityPolicyInput{} + } + + output = &PutIdentityPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutIdentityPolicy API operation for Amazon Simple Email Service. +// +// Adds or updates a sending authorization policy for the specified identity +// (an email address or a domain). +// +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. +// +// Sending authorization is a feature that enables an identity owner to authorize +// other senders to use its identities. For information about using sending +// authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation PutIdentityPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidPolicyException "InvalidPolicy" +// Indicates that the provided policy is invalid. Check the error stack for +// more information about what caused the error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/PutIdentityPolicy +func (c *SES) PutIdentityPolicy(input *PutIdentityPolicyInput) (*PutIdentityPolicyOutput, error) { + req, out := c.PutIdentityPolicyRequest(input) + return out, req.Send() +} + +// PutIdentityPolicyWithContext is the same as PutIdentityPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutIdentityPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) PutIdentityPolicyWithContext(ctx aws.Context, input *PutIdentityPolicyInput, opts ...request.Option) (*PutIdentityPolicyOutput, error) { + req, out := c.PutIdentityPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opReorderReceiptRuleSet = "ReorderReceiptRuleSet" + +// ReorderReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the ReorderReceiptRuleSet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ReorderReceiptRuleSet for more information on using the ReorderReceiptRuleSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ReorderReceiptRuleSetRequest method. +// req, resp := client.ReorderReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ReorderReceiptRuleSet +func (c *SES) ReorderReceiptRuleSetRequest(input *ReorderReceiptRuleSetInput) (req *request.Request, output *ReorderReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opReorderReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReorderReceiptRuleSetInput{} + } + + output = &ReorderReceiptRuleSetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ReorderReceiptRuleSet API operation for Amazon Simple Email Service. +// +// Reorders the receipt rules within a receipt rule set. +// +// All of the rules in the rule set must be represented in this request. That +// is, this API will return an error if the reorder request doesn't explicitly +// position all of the rules. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation ReorderReceiptRuleSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRuleSetDoesNotExistException "RuleSetDoesNotExist" +// Indicates that the provided receipt rule set does not exist. +// +// * ErrCodeRuleDoesNotExistException "RuleDoesNotExist" +// Indicates that the provided receipt rule does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ReorderReceiptRuleSet +func (c *SES) ReorderReceiptRuleSet(input *ReorderReceiptRuleSetInput) (*ReorderReceiptRuleSetOutput, error) { + req, out := c.ReorderReceiptRuleSetRequest(input) + return out, req.Send() +} + +// ReorderReceiptRuleSetWithContext is the same as ReorderReceiptRuleSet with the addition of +// the ability to pass a context and additional request options. +// +// See ReorderReceiptRuleSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) ReorderReceiptRuleSetWithContext(ctx aws.Context, input *ReorderReceiptRuleSetInput, opts ...request.Option) (*ReorderReceiptRuleSetOutput, error) { + req, out := c.ReorderReceiptRuleSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSendBounce = "SendBounce" + +// SendBounceRequest generates a "aws/request.Request" representing the +// client's request for the SendBounce operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SendBounce for more information on using the SendBounce +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SendBounceRequest method. +// req, resp := client.SendBounceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendBounce +func (c *SES) SendBounceRequest(input *SendBounceInput) (req *request.Request, output *SendBounceOutput) { + op := &request.Operation{ + Name: opSendBounce, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendBounceInput{} + } + + output = &SendBounceOutput{} + req = c.newRequest(op, input, output) + return +} + +// SendBounce API operation for Amazon Simple Email Service. +// +// Generates and sends a bounce message to the sender of an email you received +// through Amazon SES. You can only use this API on an email up to 24 hours +// after you receive it. +// +// You cannot use this API to send generic bounces for mail that was not received +// by Amazon SES. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation SendBounce for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMessageRejected "MessageRejected" +// Indicates that the action failed, and the message could not be sent. Check +// the error stack for more information about what caused the error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendBounce +func (c *SES) SendBounce(input *SendBounceInput) (*SendBounceOutput, error) { + req, out := c.SendBounceRequest(input) + return out, req.Send() +} + +// SendBounceWithContext is the same as SendBounce with the addition of +// the ability to pass a context and additional request options. +// +// See SendBounce for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) SendBounceWithContext(ctx aws.Context, input *SendBounceInput, opts ...request.Option) (*SendBounceOutput, error) { + req, out := c.SendBounceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSendBulkTemplatedEmail = "SendBulkTemplatedEmail" + +// SendBulkTemplatedEmailRequest generates a "aws/request.Request" representing the +// client's request for the SendBulkTemplatedEmail operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SendBulkTemplatedEmail for more information on using the SendBulkTemplatedEmail +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SendBulkTemplatedEmailRequest method. +// req, resp := client.SendBulkTemplatedEmailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendBulkTemplatedEmail +func (c *SES) SendBulkTemplatedEmailRequest(input *SendBulkTemplatedEmailInput) (req *request.Request, output *SendBulkTemplatedEmailOutput) { + op := &request.Operation{ + Name: opSendBulkTemplatedEmail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendBulkTemplatedEmailInput{} + } + + output = &SendBulkTemplatedEmailOutput{} + req = c.newRequest(op, input, output) + return +} + +// SendBulkTemplatedEmail API operation for Amazon Simple Email Service. +// +// Composes an email message to multiple destinations. The message body is created +// using an email template. +// +// In order to send email using the SendBulkTemplatedEmail operation, your call +// to the API must meet the following requirements: +// +// * The call must refer to an existing email template. You can create email +// templates using the CreateTemplate operation. +// +// * The message must be sent from a verified email address or domain. +// +// * If your account is still in the Amazon SES sandbox, you may only send +// to verified addresses or domains, or to email addresses associated with +// the Amazon SES Mailbox Simulator. For more information, see Verifying +// Email Addresses and Domains (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) +// in the Amazon SES Developer Guide. +// +// * The maximum message size is 10 MB. +// +// * Each Destination parameter must include at least one recipient email +// address. The recipient address can be a To: address, a CC: address, or +// a BCC: address. If a recipient email address is invalid (that is, it is +// not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire +// message will be rejected, even if the message contains other recipients +// that are valid. +// +// * The message may not include more than 50 recipients, across the To:, +// CC: and BCC: fields. If you need to send an email message to a larger +// audience, you can divide your recipient list into groups of 50 or fewer, +// and then call the SendBulkTemplatedEmail operation several times to send +// the message to each group. +// +// * The number of destinations you can contact in a single call to the API +// may be limited by your account's maximum sending rate. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation SendBulkTemplatedEmail for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMessageRejected "MessageRejected" +// Indicates that the action failed, and the message could not be sent. Check +// the error stack for more information about what caused the error. +// +// * ErrCodeMailFromDomainNotVerifiedException "MailFromDomainNotVerifiedException" +// Indicates that the message could not be sent because Amazon SES could not +// read the MX record required to use the specified MAIL FROM domain. For information +// about editing the custom MAIL FROM domain settings for an identity, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). +// +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// * ErrCodeTemplateDoesNotExistException "TemplateDoesNotExist" +// Indicates that the Template object you specified does not exist in your Amazon +// SES account. +// +// * ErrCodeConfigurationSetSendingPausedException "ConfigurationSetSendingPausedException" +// Indicates that email sending is disabled for the configuration set. +// +// You can enable or disable email sending for a configuration set using UpdateConfigurationSetSendingEnabled. +// +// * ErrCodeAccountSendingPausedException "AccountSendingPausedException" +// Indicates that email sending is disabled for your entire Amazon SES account. +// +// You can enable or disable email sending for your Amazon SES account using +// UpdateAccountSendingEnabled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendBulkTemplatedEmail +func (c *SES) SendBulkTemplatedEmail(input *SendBulkTemplatedEmailInput) (*SendBulkTemplatedEmailOutput, error) { + req, out := c.SendBulkTemplatedEmailRequest(input) + return out, req.Send() +} + +// SendBulkTemplatedEmailWithContext is the same as SendBulkTemplatedEmail with the addition of +// the ability to pass a context and additional request options. +// +// See SendBulkTemplatedEmail for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) SendBulkTemplatedEmailWithContext(ctx aws.Context, input *SendBulkTemplatedEmailInput, opts ...request.Option) (*SendBulkTemplatedEmailOutput, error) { + req, out := c.SendBulkTemplatedEmailRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSendCustomVerificationEmail = "SendCustomVerificationEmail" + +// SendCustomVerificationEmailRequest generates a "aws/request.Request" representing the +// client's request for the SendCustomVerificationEmail operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SendCustomVerificationEmail for more information on using the SendCustomVerificationEmail +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SendCustomVerificationEmailRequest method. +// req, resp := client.SendCustomVerificationEmailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendCustomVerificationEmail +func (c *SES) SendCustomVerificationEmailRequest(input *SendCustomVerificationEmailInput) (req *request.Request, output *SendCustomVerificationEmailOutput) { + op := &request.Operation{ + Name: opSendCustomVerificationEmail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendCustomVerificationEmailInput{} + } + + output = &SendCustomVerificationEmailOutput{} + req = c.newRequest(op, input, output) + return +} + +// SendCustomVerificationEmail API operation for Amazon Simple Email Service. +// +// Adds an email address to the list of identities for your Amazon SES account +// in the current AWS Region and attempts to verify it. As a result of executing +// this operation, a customized verification email is sent to the specified +// address. +// +// To use this operation, you must first create a custom verification email +// template. For more information about creating and using custom verification +// email templates, see Using Custom Verification Email Templates (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) +// in the Amazon SES Developer Guide. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation SendCustomVerificationEmail for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMessageRejected "MessageRejected" +// Indicates that the action failed, and the message could not be sent. Check +// the error stack for more information about what caused the error. +// +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// * ErrCodeCustomVerificationEmailTemplateDoesNotExistException "CustomVerificationEmailTemplateDoesNotExist" +// Indicates that a custom verification email template with the name you specified +// does not exist. +// +// * ErrCodeFromEmailAddressNotVerifiedException "FromEmailAddressNotVerified" +// Indicates that the sender address specified for a custom verification email +// is not verified, and is therefore not eligible to send the custom verification +// email. +// +// * ErrCodeProductionAccessNotGrantedException "ProductionAccessNotGranted" +// Indicates that the account has not been granted production access. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendCustomVerificationEmail +func (c *SES) SendCustomVerificationEmail(input *SendCustomVerificationEmailInput) (*SendCustomVerificationEmailOutput, error) { + req, out := c.SendCustomVerificationEmailRequest(input) + return out, req.Send() +} + +// SendCustomVerificationEmailWithContext is the same as SendCustomVerificationEmail with the addition of +// the ability to pass a context and additional request options. +// +// See SendCustomVerificationEmail for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) SendCustomVerificationEmailWithContext(ctx aws.Context, input *SendCustomVerificationEmailInput, opts ...request.Option) (*SendCustomVerificationEmailOutput, error) { + req, out := c.SendCustomVerificationEmailRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSendEmail = "SendEmail" + +// SendEmailRequest generates a "aws/request.Request" representing the +// client's request for the SendEmail operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SendEmail for more information on using the SendEmail +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SendEmailRequest method. +// req, resp := client.SendEmailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendEmail +func (c *SES) SendEmailRequest(input *SendEmailInput) (req *request.Request, output *SendEmailOutput) { + op := &request.Operation{ + Name: opSendEmail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendEmailInput{} + } + + output = &SendEmailOutput{} + req = c.newRequest(op, input, output) + return +} + +// SendEmail API operation for Amazon Simple Email Service. +// +// Composes an email message and immediately queues it for sending. In order +// to send email using the SendEmail operation, your message must meet the following +// requirements: +// +// * The message must be sent from a verified email address or domain. If +// you attempt to send email using a non-verified address or domain, the +// operation will result in an "Email address not verified" error. +// +// * If your account is still in the Amazon SES sandbox, you may only send +// to verified addresses or domains, or to email addresses associated with +// the Amazon SES Mailbox Simulator. For more information, see Verifying +// Email Addresses and Domains (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) +// in the Amazon SES Developer Guide. +// +// * The maximum message size is 10 MB. +// +// * The message must include at least one recipient email address. The recipient +// address can be a To: address, a CC: address, or a BCC: address. If a recipient +// email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), +// the entire message will be rejected, even if the message contains other +// recipients that are valid. +// +// * The message may not include more than 50 recipients, across the To:, +// CC: and BCC: fields. If you need to send an email message to a larger +// audience, you can divide your recipient list into groups of 50 or fewer, +// and then call the SendEmail operation several times to send the message +// to each group. +// +// For every message that you send, the total number of recipients (including +// each recipient in the To:, CC: and BCC: fields) is counted against the maximum +// number of emails you can send in a 24-hour period (your sending quota). For +// more information about sending quotas in Amazon SES, see Managing Your Amazon +// SES Sending Limits (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html) +// in the Amazon SES Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation SendEmail for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMessageRejected "MessageRejected" +// Indicates that the action failed, and the message could not be sent. Check +// the error stack for more information about what caused the error. +// +// * ErrCodeMailFromDomainNotVerifiedException "MailFromDomainNotVerifiedException" +// Indicates that the message could not be sent because Amazon SES could not +// read the MX record required to use the specified MAIL FROM domain. For information +// about editing the custom MAIL FROM domain settings for an identity, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). +// +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// * ErrCodeConfigurationSetSendingPausedException "ConfigurationSetSendingPausedException" +// Indicates that email sending is disabled for the configuration set. +// +// You can enable or disable email sending for a configuration set using UpdateConfigurationSetSendingEnabled. +// +// * ErrCodeAccountSendingPausedException "AccountSendingPausedException" +// Indicates that email sending is disabled for your entire Amazon SES account. +// +// You can enable or disable email sending for your Amazon SES account using +// UpdateAccountSendingEnabled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendEmail +func (c *SES) SendEmail(input *SendEmailInput) (*SendEmailOutput, error) { + req, out := c.SendEmailRequest(input) + return out, req.Send() +} + +// SendEmailWithContext is the same as SendEmail with the addition of +// the ability to pass a context and additional request options. +// +// See SendEmail for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) SendEmailWithContext(ctx aws.Context, input *SendEmailInput, opts ...request.Option) (*SendEmailOutput, error) { + req, out := c.SendEmailRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSendRawEmail = "SendRawEmail" + +// SendRawEmailRequest generates a "aws/request.Request" representing the +// client's request for the SendRawEmail operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SendRawEmail for more information on using the SendRawEmail +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SendRawEmailRequest method. +// req, resp := client.SendRawEmailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendRawEmail +func (c *SES) SendRawEmailRequest(input *SendRawEmailInput) (req *request.Request, output *SendRawEmailOutput) { + op := &request.Operation{ + Name: opSendRawEmail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendRawEmailInput{} + } + + output = &SendRawEmailOutput{} + req = c.newRequest(op, input, output) + return +} + +// SendRawEmail API operation for Amazon Simple Email Service. +// +// Composes an email message and immediately queues it for sending. +// +// This operation is more flexible than the SendEmail API operation. When you +// use the SendRawEmail operation, you can specify the headers of the message +// as well as its content. This flexibility is useful, for example, when you +// want to send a multipart MIME email (such a message that contains both a +// text and an HTML version). You can also use this operation to send messages +// that include attachments. +// +// The SendRawEmail operation has the following requirements: +// +// * You can only send email from verified email addresses or domains (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). +// If you try to send email from an address that isn't verified, the operation +// results in an "Email address not verified" error. +// +// * If your account is still in the Amazon SES sandbox (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/request-production-access.html), +// you can only send email to other verified addresses in your account, or +// to addresses that are associated with the Amazon SES mailbox simulator +// (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mailbox-simulator.html). +// +// * The maximum message size, including attachments, is 10 MB. +// +// * Each message has to include at least one recipient address. A recipient +// address includes any address on the To:, CC:, or BCC: lines. +// +// * If you send a single message to more than one recipient address, and +// one of the recipient addresses isn't in a valid format (that is, it's +// not in the format UserName@[SubDomain.]Domain.TopLevelDomain), Amazon +// SES rejects the entire message, even if the other addresses are valid. +// +// * Each message can include up to 50 recipient addresses across the To:, +// CC:, or BCC: lines. If you need to send a single message to more than +// 50 recipients, you have to split the list of recipient addresses into +// groups of less than 50 recipients, and send separate messages to each +// group. +// +// * Amazon SES allows you to specify 8-bit Content-Transfer-Encoding for +// MIME message parts. However, if Amazon SES has to modify the contents +// of your message (for example, if you use open and click tracking), 8-bit +// content isn't preserved. For this reason, we highly recommend that you +// encode all content that isn't 7-bit ASCII. For more information, see MIME +// Encoding (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html#send-email-mime-encoding) +// in the Amazon SES Developer Guide. +// +// Additionally, keep the following considerations in mind when using the SendRawEmail +// operation: +// +// * Although you can customize the message headers when using the SendRawEmail +// operation, Amazon SES will automatically apply its own Message-ID and +// Date headers; if you passed these headers when creating the message, they +// will be overwritten by the values that Amazon SES provides. +// +// * If you are using sending authorization to send on behalf of another +// user, SendRawEmail enables you to specify the cross-account identity for +// the email's Source, From, and Return-Path parameters in one of two ways: +// you can pass optional parameters SourceArn, FromArn, and/or ReturnPathArn +// to the API, or you can include the following X-headers in the header of +// your raw email: +// +// X-SES-SOURCE-ARN +// +// X-SES-FROM-ARN +// +// X-SES-RETURN-PATH-ARN +// +// Do not include these X-headers in the DKIM signature; Amazon SES will remove +// them before sending the email. +// +// For most common sending authorization scenarios, we recommend that you specify +// the SourceIdentityArn parameter and not the FromIdentityArn or ReturnPathIdentityArn +// parameters. If you only specify the SourceIdentityArn parameter, Amazon +// SES will set the From and Return Path addresses to the identity specified +// in SourceIdentityArn. For more information about sending authorization, +// see the Using Sending Authorization with Amazon SES (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html) +// in the Amazon SES Developer Guide. +// +// * For every message that you send, the total number of recipients (including +// each recipient in the To:, CC: and BCC: fields) is counted against the +// maximum number of emails you can send in a 24-hour period (your sending +// quota). For more information about sending quotas in Amazon SES, see Managing +// Your Amazon SES Sending Limits (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html) +// in the Amazon SES Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation SendRawEmail for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMessageRejected "MessageRejected" +// Indicates that the action failed, and the message could not be sent. Check +// the error stack for more information about what caused the error. +// +// * ErrCodeMailFromDomainNotVerifiedException "MailFromDomainNotVerifiedException" +// Indicates that the message could not be sent because Amazon SES could not +// read the MX record required to use the specified MAIL FROM domain. For information +// about editing the custom MAIL FROM domain settings for an identity, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). +// +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// * ErrCodeConfigurationSetSendingPausedException "ConfigurationSetSendingPausedException" +// Indicates that email sending is disabled for the configuration set. +// +// You can enable or disable email sending for a configuration set using UpdateConfigurationSetSendingEnabled. +// +// * ErrCodeAccountSendingPausedException "AccountSendingPausedException" +// Indicates that email sending is disabled for your entire Amazon SES account. +// +// You can enable or disable email sending for your Amazon SES account using +// UpdateAccountSendingEnabled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendRawEmail +func (c *SES) SendRawEmail(input *SendRawEmailInput) (*SendRawEmailOutput, error) { + req, out := c.SendRawEmailRequest(input) + return out, req.Send() +} + +// SendRawEmailWithContext is the same as SendRawEmail with the addition of +// the ability to pass a context and additional request options. +// +// See SendRawEmail for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) SendRawEmailWithContext(ctx aws.Context, input *SendRawEmailInput, opts ...request.Option) (*SendRawEmailOutput, error) { + req, out := c.SendRawEmailRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSendTemplatedEmail = "SendTemplatedEmail" + +// SendTemplatedEmailRequest generates a "aws/request.Request" representing the +// client's request for the SendTemplatedEmail operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SendTemplatedEmail for more information on using the SendTemplatedEmail +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SendTemplatedEmailRequest method. +// req, resp := client.SendTemplatedEmailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendTemplatedEmail +func (c *SES) SendTemplatedEmailRequest(input *SendTemplatedEmailInput) (req *request.Request, output *SendTemplatedEmailOutput) { + op := &request.Operation{ + Name: opSendTemplatedEmail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendTemplatedEmailInput{} + } + + output = &SendTemplatedEmailOutput{} + req = c.newRequest(op, input, output) + return +} + +// SendTemplatedEmail API operation for Amazon Simple Email Service. +// +// Composes an email message using an email template and immediately queues +// it for sending. +// +// In order to send email using the SendTemplatedEmail operation, your call +// to the API must meet the following requirements: +// +// * The call must refer to an existing email template. You can create email +// templates using the CreateTemplate operation. +// +// * The message must be sent from a verified email address or domain. +// +// * If your account is still in the Amazon SES sandbox, you may only send +// to verified addresses or domains, or to email addresses associated with +// the Amazon SES Mailbox Simulator. For more information, see Verifying +// Email Addresses and Domains (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) +// in the Amazon SES Developer Guide. +// +// * The maximum message size is 10 MB. +// +// * Calls to the SendTemplatedEmail operation may only include one Destination +// parameter. A destination is a set of recipients who will receive the same +// version of the email. The Destination parameter can include up to 50 recipients, +// across the To:, CC: and BCC: fields. +// +// * The Destination parameter must include at least one recipient email +// address. The recipient address can be a To: address, a CC: address, or +// a BCC: address. If a recipient email address is invalid (that is, it is +// not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire +// message will be rejected, even if the message contains other recipients +// that are valid. +// +// If your call to the SendTemplatedEmail operation includes all of the required +// parameters, Amazon SES accepts it and returns a Message ID. However, if Amazon +// SES can't render the email because the template contains errors, it doesn't +// send the email. Additionally, because it already accepted the message, Amazon +// SES doesn't return a message stating that it was unable to send the email. +// +// For these reasons, we highly recommend that you set up Amazon SES to send +// you notifications when Rendering Failure events occur. For more information, +// see Sending Personalized Email Using the Amazon SES API (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html) +// in the Amazon Simple Email Service Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation SendTemplatedEmail for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMessageRejected "MessageRejected" +// Indicates that the action failed, and the message could not be sent. Check +// the error stack for more information about what caused the error. +// +// * ErrCodeMailFromDomainNotVerifiedException "MailFromDomainNotVerifiedException" +// Indicates that the message could not be sent because Amazon SES could not +// read the MX record required to use the specified MAIL FROM domain. For information +// about editing the custom MAIL FROM domain settings for an identity, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). +// +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// * ErrCodeTemplateDoesNotExistException "TemplateDoesNotExist" +// Indicates that the Template object you specified does not exist in your Amazon +// SES account. +// +// * ErrCodeConfigurationSetSendingPausedException "ConfigurationSetSendingPausedException" +// Indicates that email sending is disabled for the configuration set. +// +// You can enable or disable email sending for a configuration set using UpdateConfigurationSetSendingEnabled. +// +// * ErrCodeAccountSendingPausedException "AccountSendingPausedException" +// Indicates that email sending is disabled for your entire Amazon SES account. +// +// You can enable or disable email sending for your Amazon SES account using +// UpdateAccountSendingEnabled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendTemplatedEmail +func (c *SES) SendTemplatedEmail(input *SendTemplatedEmailInput) (*SendTemplatedEmailOutput, error) { + req, out := c.SendTemplatedEmailRequest(input) + return out, req.Send() +} + +// SendTemplatedEmailWithContext is the same as SendTemplatedEmail with the addition of +// the ability to pass a context and additional request options. +// +// See SendTemplatedEmail for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) SendTemplatedEmailWithContext(ctx aws.Context, input *SendTemplatedEmailInput, opts ...request.Option) (*SendTemplatedEmailOutput, error) { + req, out := c.SendTemplatedEmailRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSetActiveReceiptRuleSet = "SetActiveReceiptRuleSet" + +// SetActiveReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the SetActiveReceiptRuleSet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetActiveReceiptRuleSet for more information on using the SetActiveReceiptRuleSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SetActiveReceiptRuleSetRequest method. +// req, resp := client.SetActiveReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SetActiveReceiptRuleSet +func (c *SES) SetActiveReceiptRuleSetRequest(input *SetActiveReceiptRuleSetInput) (req *request.Request, output *SetActiveReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opSetActiveReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetActiveReceiptRuleSetInput{} + } + + output = &SetActiveReceiptRuleSetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SetActiveReceiptRuleSet API operation for Amazon Simple Email Service. +// +// Sets the specified receipt rule set as the active receipt rule set. +// +// To disable your email-receiving through Amazon SES completely, you can call +// this API with RuleSetName set to null. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation SetActiveReceiptRuleSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRuleSetDoesNotExistException "RuleSetDoesNotExist" +// Indicates that the provided receipt rule set does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SetActiveReceiptRuleSet +func (c *SES) SetActiveReceiptRuleSet(input *SetActiveReceiptRuleSetInput) (*SetActiveReceiptRuleSetOutput, error) { + req, out := c.SetActiveReceiptRuleSetRequest(input) + return out, req.Send() +} + +// SetActiveReceiptRuleSetWithContext is the same as SetActiveReceiptRuleSet with the addition of +// the ability to pass a context and additional request options. +// +// See SetActiveReceiptRuleSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) SetActiveReceiptRuleSetWithContext(ctx aws.Context, input *SetActiveReceiptRuleSetInput, opts ...request.Option) (*SetActiveReceiptRuleSetOutput, error) { + req, out := c.SetActiveReceiptRuleSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSetIdentityDkimEnabled = "SetIdentityDkimEnabled" + +// SetIdentityDkimEnabledRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityDkimEnabled operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetIdentityDkimEnabled for more information on using the SetIdentityDkimEnabled +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SetIdentityDkimEnabledRequest method. +// req, resp := client.SetIdentityDkimEnabledRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SetIdentityDkimEnabled +func (c *SES) SetIdentityDkimEnabledRequest(input *SetIdentityDkimEnabledInput) (req *request.Request, output *SetIdentityDkimEnabledOutput) { + op := &request.Operation{ + Name: opSetIdentityDkimEnabled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityDkimEnabledInput{} + } + + output = &SetIdentityDkimEnabledOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SetIdentityDkimEnabled API operation for Amazon Simple Email Service. +// +// Enables or disables Easy DKIM signing of email sent from an identity: +// +// * If Easy DKIM signing is enabled for a domain name identity (such as +// example.com), then Amazon SES will DKIM-sign all email sent by addresses +// under that domain name (for example, user@example.com). +// +// * If Easy DKIM signing is enabled for an email address, then Amazon SES +// will DKIM-sign all email sent by that email address. +// +// For email addresses (for example, user@example.com), you can only enable +// Easy DKIM signing if the corresponding domain (in this case, example.com) +// has been set up for Easy DKIM using the AWS Console or the VerifyDomainDkim +// operation. +// +// You can execute this operation no more than once per second. +// +// For more information about Easy DKIM signing, go to the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation SetIdentityDkimEnabled for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SetIdentityDkimEnabled +func (c *SES) SetIdentityDkimEnabled(input *SetIdentityDkimEnabledInput) (*SetIdentityDkimEnabledOutput, error) { + req, out := c.SetIdentityDkimEnabledRequest(input) + return out, req.Send() +} + +// SetIdentityDkimEnabledWithContext is the same as SetIdentityDkimEnabled with the addition of +// the ability to pass a context and additional request options. +// +// See SetIdentityDkimEnabled for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) SetIdentityDkimEnabledWithContext(ctx aws.Context, input *SetIdentityDkimEnabledInput, opts ...request.Option) (*SetIdentityDkimEnabledOutput, error) { + req, out := c.SetIdentityDkimEnabledRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSetIdentityFeedbackForwardingEnabled = "SetIdentityFeedbackForwardingEnabled" + +// SetIdentityFeedbackForwardingEnabledRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityFeedbackForwardingEnabled operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetIdentityFeedbackForwardingEnabled for more information on using the SetIdentityFeedbackForwardingEnabled +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SetIdentityFeedbackForwardingEnabledRequest method. +// req, resp := client.SetIdentityFeedbackForwardingEnabledRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SetIdentityFeedbackForwardingEnabled +func (c *SES) SetIdentityFeedbackForwardingEnabledRequest(input *SetIdentityFeedbackForwardingEnabledInput) (req *request.Request, output *SetIdentityFeedbackForwardingEnabledOutput) { + op := &request.Operation{ + Name: opSetIdentityFeedbackForwardingEnabled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityFeedbackForwardingEnabledInput{} + } + + output = &SetIdentityFeedbackForwardingEnabledOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SetIdentityFeedbackForwardingEnabled API operation for Amazon Simple Email Service. +// +// Given an identity (an email address or a domain), enables or disables whether +// Amazon SES forwards bounce and complaint notifications as email. Feedback +// forwarding can only be disabled when Amazon Simple Notification Service (Amazon +// SNS) topics are specified for both bounces and complaints. +// +// Feedback forwarding does not apply to delivery notifications. Delivery notifications +// are only available through Amazon SNS. +// +// You can execute this operation no more than once per second. +// +// For more information about using notifications with Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation SetIdentityFeedbackForwardingEnabled for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SetIdentityFeedbackForwardingEnabled +func (c *SES) SetIdentityFeedbackForwardingEnabled(input *SetIdentityFeedbackForwardingEnabledInput) (*SetIdentityFeedbackForwardingEnabledOutput, error) { + req, out := c.SetIdentityFeedbackForwardingEnabledRequest(input) + return out, req.Send() +} + +// SetIdentityFeedbackForwardingEnabledWithContext is the same as SetIdentityFeedbackForwardingEnabled with the addition of +// the ability to pass a context and additional request options. +// +// See SetIdentityFeedbackForwardingEnabled for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) SetIdentityFeedbackForwardingEnabledWithContext(ctx aws.Context, input *SetIdentityFeedbackForwardingEnabledInput, opts ...request.Option) (*SetIdentityFeedbackForwardingEnabledOutput, error) { + req, out := c.SetIdentityFeedbackForwardingEnabledRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSetIdentityHeadersInNotificationsEnabled = "SetIdentityHeadersInNotificationsEnabled" + +// SetIdentityHeadersInNotificationsEnabledRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityHeadersInNotificationsEnabled operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetIdentityHeadersInNotificationsEnabled for more information on using the SetIdentityHeadersInNotificationsEnabled +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SetIdentityHeadersInNotificationsEnabledRequest method. +// req, resp := client.SetIdentityHeadersInNotificationsEnabledRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SetIdentityHeadersInNotificationsEnabled +func (c *SES) SetIdentityHeadersInNotificationsEnabledRequest(input *SetIdentityHeadersInNotificationsEnabledInput) (req *request.Request, output *SetIdentityHeadersInNotificationsEnabledOutput) { + op := &request.Operation{ + Name: opSetIdentityHeadersInNotificationsEnabled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityHeadersInNotificationsEnabledInput{} + } + + output = &SetIdentityHeadersInNotificationsEnabledOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SetIdentityHeadersInNotificationsEnabled API operation for Amazon Simple Email Service. +// +// Given an identity (an email address or a domain), sets whether Amazon SES +// includes the original email headers in the Amazon Simple Notification Service +// (Amazon SNS) notifications of a specified type. +// +// You can execute this operation no more than once per second. +// +// For more information about using notifications with Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation SetIdentityHeadersInNotificationsEnabled for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SetIdentityHeadersInNotificationsEnabled +func (c *SES) SetIdentityHeadersInNotificationsEnabled(input *SetIdentityHeadersInNotificationsEnabledInput) (*SetIdentityHeadersInNotificationsEnabledOutput, error) { + req, out := c.SetIdentityHeadersInNotificationsEnabledRequest(input) + return out, req.Send() +} + +// SetIdentityHeadersInNotificationsEnabledWithContext is the same as SetIdentityHeadersInNotificationsEnabled with the addition of +// the ability to pass a context and additional request options. +// +// See SetIdentityHeadersInNotificationsEnabled for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) SetIdentityHeadersInNotificationsEnabledWithContext(ctx aws.Context, input *SetIdentityHeadersInNotificationsEnabledInput, opts ...request.Option) (*SetIdentityHeadersInNotificationsEnabledOutput, error) { + req, out := c.SetIdentityHeadersInNotificationsEnabledRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSetIdentityMailFromDomain = "SetIdentityMailFromDomain" + +// SetIdentityMailFromDomainRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityMailFromDomain operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetIdentityMailFromDomain for more information on using the SetIdentityMailFromDomain +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SetIdentityMailFromDomainRequest method. +// req, resp := client.SetIdentityMailFromDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SetIdentityMailFromDomain +func (c *SES) SetIdentityMailFromDomainRequest(input *SetIdentityMailFromDomainInput) (req *request.Request, output *SetIdentityMailFromDomainOutput) { + op := &request.Operation{ + Name: opSetIdentityMailFromDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityMailFromDomainInput{} + } + + output = &SetIdentityMailFromDomainOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SetIdentityMailFromDomain API operation for Amazon Simple Email Service. +// +// Enables or disables the custom MAIL FROM domain setup for a verified identity +// (an email address or a domain). +// +// To send emails using the specified MAIL FROM domain, you must add an MX record +// to your MAIL FROM domain's DNS settings. If you want your emails to pass +// Sender Policy Framework (SPF) checks, you must also add or update an SPF +// record. For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-set.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation SetIdentityMailFromDomain for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SetIdentityMailFromDomain +func (c *SES) SetIdentityMailFromDomain(input *SetIdentityMailFromDomainInput) (*SetIdentityMailFromDomainOutput, error) { + req, out := c.SetIdentityMailFromDomainRequest(input) + return out, req.Send() +} + +// SetIdentityMailFromDomainWithContext is the same as SetIdentityMailFromDomain with the addition of +// the ability to pass a context and additional request options. +// +// See SetIdentityMailFromDomain for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) SetIdentityMailFromDomainWithContext(ctx aws.Context, input *SetIdentityMailFromDomainInput, opts ...request.Option) (*SetIdentityMailFromDomainOutput, error) { + req, out := c.SetIdentityMailFromDomainRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSetIdentityNotificationTopic = "SetIdentityNotificationTopic" + +// SetIdentityNotificationTopicRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityNotificationTopic operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetIdentityNotificationTopic for more information on using the SetIdentityNotificationTopic +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SetIdentityNotificationTopicRequest method. +// req, resp := client.SetIdentityNotificationTopicRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SetIdentityNotificationTopic +func (c *SES) SetIdentityNotificationTopicRequest(input *SetIdentityNotificationTopicInput) (req *request.Request, output *SetIdentityNotificationTopicOutput) { + op := &request.Operation{ + Name: opSetIdentityNotificationTopic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityNotificationTopicInput{} + } + + output = &SetIdentityNotificationTopicOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SetIdentityNotificationTopic API operation for Amazon Simple Email Service. +// +// Sets an Amazon Simple Notification Service (Amazon SNS) topic to use when +// delivering notifications. When you use this operation, you specify a verified +// identity, such as an email address or domain. When you send an email that +// uses the chosen identity in the Source field, Amazon SES sends notifications +// to the topic you specified. You can send bounce, complaint, or delivery notifications +// (or any combination of the three) to the Amazon SNS topic that you specify. +// +// You can execute this operation no more than once per second. +// +// For more information about feedback notification, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation SetIdentityNotificationTopic for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SetIdentityNotificationTopic +func (c *SES) SetIdentityNotificationTopic(input *SetIdentityNotificationTopicInput) (*SetIdentityNotificationTopicOutput, error) { + req, out := c.SetIdentityNotificationTopicRequest(input) + return out, req.Send() +} + +// SetIdentityNotificationTopicWithContext is the same as SetIdentityNotificationTopic with the addition of +// the ability to pass a context and additional request options. +// +// See SetIdentityNotificationTopic for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) SetIdentityNotificationTopicWithContext(ctx aws.Context, input *SetIdentityNotificationTopicInput, opts ...request.Option) (*SetIdentityNotificationTopicOutput, error) { + req, out := c.SetIdentityNotificationTopicRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSetReceiptRulePosition = "SetReceiptRulePosition" + +// SetReceiptRulePositionRequest generates a "aws/request.Request" representing the +// client's request for the SetReceiptRulePosition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetReceiptRulePosition for more information on using the SetReceiptRulePosition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SetReceiptRulePositionRequest method. +// req, resp := client.SetReceiptRulePositionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SetReceiptRulePosition +func (c *SES) SetReceiptRulePositionRequest(input *SetReceiptRulePositionInput) (req *request.Request, output *SetReceiptRulePositionOutput) { + op := &request.Operation{ + Name: opSetReceiptRulePosition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetReceiptRulePositionInput{} + } + + output = &SetReceiptRulePositionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SetReceiptRulePosition API operation for Amazon Simple Email Service. +// +// Sets the position of the specified receipt rule in the receipt rule set. +// +// For information about managing receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation SetReceiptRulePosition for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRuleSetDoesNotExistException "RuleSetDoesNotExist" +// Indicates that the provided receipt rule set does not exist. +// +// * ErrCodeRuleDoesNotExistException "RuleDoesNotExist" +// Indicates that the provided receipt rule does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SetReceiptRulePosition +func (c *SES) SetReceiptRulePosition(input *SetReceiptRulePositionInput) (*SetReceiptRulePositionOutput, error) { + req, out := c.SetReceiptRulePositionRequest(input) + return out, req.Send() +} + +// SetReceiptRulePositionWithContext is the same as SetReceiptRulePosition with the addition of +// the ability to pass a context and additional request options. +// +// See SetReceiptRulePosition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) SetReceiptRulePositionWithContext(ctx aws.Context, input *SetReceiptRulePositionInput, opts ...request.Option) (*SetReceiptRulePositionOutput, error) { + req, out := c.SetReceiptRulePositionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTestRenderTemplate = "TestRenderTemplate" + +// TestRenderTemplateRequest generates a "aws/request.Request" representing the +// client's request for the TestRenderTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TestRenderTemplate for more information on using the TestRenderTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TestRenderTemplateRequest method. +// req, resp := client.TestRenderTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/TestRenderTemplate +func (c *SES) TestRenderTemplateRequest(input *TestRenderTemplateInput) (req *request.Request, output *TestRenderTemplateOutput) { + op := &request.Operation{ + Name: opTestRenderTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TestRenderTemplateInput{} + } + + output = &TestRenderTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// TestRenderTemplate API operation for Amazon Simple Email Service. +// +// Creates a preview of the MIME content of an email when provided with a template +// and a set of replacement data. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation TestRenderTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTemplateDoesNotExistException "TemplateDoesNotExist" +// Indicates that the Template object you specified does not exist in your Amazon +// SES account. +// +// * ErrCodeInvalidRenderingParameterException "InvalidRenderingParameter" +// Indicates that one or more of the replacement values you provided is invalid. +// This error may occur when the TemplateData object contains invalid JSON. +// +// * ErrCodeMissingRenderingAttributeException "MissingRenderingAttribute" +// Indicates that one or more of the replacement values for the specified template +// was not specified. Ensure that the TemplateData object contains references +// to all of the replacement tags in the specified template. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/TestRenderTemplate +func (c *SES) TestRenderTemplate(input *TestRenderTemplateInput) (*TestRenderTemplateOutput, error) { + req, out := c.TestRenderTemplateRequest(input) + return out, req.Send() +} + +// TestRenderTemplateWithContext is the same as TestRenderTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See TestRenderTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) TestRenderTemplateWithContext(ctx aws.Context, input *TestRenderTemplateInput, opts ...request.Option) (*TestRenderTemplateOutput, error) { + req, out := c.TestRenderTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateAccountSendingEnabled = "UpdateAccountSendingEnabled" + +// UpdateAccountSendingEnabledRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAccountSendingEnabled operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateAccountSendingEnabled for more information on using the UpdateAccountSendingEnabled +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateAccountSendingEnabledRequest method. +// req, resp := client.UpdateAccountSendingEnabledRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateAccountSendingEnabled +func (c *SES) UpdateAccountSendingEnabledRequest(input *UpdateAccountSendingEnabledInput) (req *request.Request, output *UpdateAccountSendingEnabledOutput) { + op := &request.Operation{ + Name: opUpdateAccountSendingEnabled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAccountSendingEnabledInput{} + } + + output = &UpdateAccountSendingEnabledOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateAccountSendingEnabled API operation for Amazon Simple Email Service. +// +// Enables or disables email sending across your entire Amazon SES account in +// the current AWS Region. You can use this operation in conjunction with Amazon +// CloudWatch alarms to temporarily pause email sending across your Amazon SES +// account in a given AWS Region when reputation metrics (such as your bounce +// or complaint rates) reach certain thresholds. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation UpdateAccountSendingEnabled for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateAccountSendingEnabled +func (c *SES) UpdateAccountSendingEnabled(input *UpdateAccountSendingEnabledInput) (*UpdateAccountSendingEnabledOutput, error) { + req, out := c.UpdateAccountSendingEnabledRequest(input) + return out, req.Send() +} + +// UpdateAccountSendingEnabledWithContext is the same as UpdateAccountSendingEnabled with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAccountSendingEnabled for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) UpdateAccountSendingEnabledWithContext(ctx aws.Context, input *UpdateAccountSendingEnabledInput, opts ...request.Option) (*UpdateAccountSendingEnabledOutput, error) { + req, out := c.UpdateAccountSendingEnabledRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateConfigurationSetEventDestination = "UpdateConfigurationSetEventDestination" + +// UpdateConfigurationSetEventDestinationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateConfigurationSetEventDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateConfigurationSetEventDestination for more information on using the UpdateConfigurationSetEventDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateConfigurationSetEventDestinationRequest method. +// req, resp := client.UpdateConfigurationSetEventDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateConfigurationSetEventDestination +func (c *SES) UpdateConfigurationSetEventDestinationRequest(input *UpdateConfigurationSetEventDestinationInput) (req *request.Request, output *UpdateConfigurationSetEventDestinationOutput) { + op := &request.Operation{ + Name: opUpdateConfigurationSetEventDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateConfigurationSetEventDestinationInput{} + } + + output = &UpdateConfigurationSetEventDestinationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateConfigurationSetEventDestination API operation for Amazon Simple Email Service. +// +// Updates the event destination of a configuration set. Event destinations +// are associated with configuration sets, which enable you to publish email +// sending events to Amazon CloudWatch, Amazon Kinesis Firehose, or Amazon Simple +// Notification Service (Amazon SNS). For information about using configuration +// sets, see Monitoring Your Amazon SES Sending Activity (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html) +// in the Amazon SES Developer Guide. +// +// When you create or update an event destination, you must provide one, and +// only one, destination. The destination can be Amazon CloudWatch, Amazon Kinesis +// Firehose, or Amazon Simple Notification Service (Amazon SNS). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation UpdateConfigurationSetEventDestination for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// * ErrCodeEventDestinationDoesNotExistException "EventDestinationDoesNotExist" +// Indicates that the event destination does not exist. +// +// * ErrCodeInvalidCloudWatchDestinationException "InvalidCloudWatchDestination" +// Indicates that the Amazon CloudWatch destination is invalid. See the error +// message for details. +// +// * ErrCodeInvalidFirehoseDestinationException "InvalidFirehoseDestination" +// Indicates that the Amazon Kinesis Firehose destination is invalid. See the +// error message for details. +// +// * ErrCodeInvalidSNSDestinationException "InvalidSNSDestination" +// Indicates that the Amazon Simple Notification Service (Amazon SNS) destination +// is invalid. See the error message for details. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateConfigurationSetEventDestination +func (c *SES) UpdateConfigurationSetEventDestination(input *UpdateConfigurationSetEventDestinationInput) (*UpdateConfigurationSetEventDestinationOutput, error) { + req, out := c.UpdateConfigurationSetEventDestinationRequest(input) + return out, req.Send() +} + +// UpdateConfigurationSetEventDestinationWithContext is the same as UpdateConfigurationSetEventDestination with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateConfigurationSetEventDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) UpdateConfigurationSetEventDestinationWithContext(ctx aws.Context, input *UpdateConfigurationSetEventDestinationInput, opts ...request.Option) (*UpdateConfigurationSetEventDestinationOutput, error) { + req, out := c.UpdateConfigurationSetEventDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateConfigurationSetReputationMetricsEnabled = "UpdateConfigurationSetReputationMetricsEnabled" + +// UpdateConfigurationSetReputationMetricsEnabledRequest generates a "aws/request.Request" representing the +// client's request for the UpdateConfigurationSetReputationMetricsEnabled operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateConfigurationSetReputationMetricsEnabled for more information on using the UpdateConfigurationSetReputationMetricsEnabled +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateConfigurationSetReputationMetricsEnabledRequest method. +// req, resp := client.UpdateConfigurationSetReputationMetricsEnabledRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateConfigurationSetReputationMetricsEnabled +func (c *SES) UpdateConfigurationSetReputationMetricsEnabledRequest(input *UpdateConfigurationSetReputationMetricsEnabledInput) (req *request.Request, output *UpdateConfigurationSetReputationMetricsEnabledOutput) { + op := &request.Operation{ + Name: opUpdateConfigurationSetReputationMetricsEnabled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateConfigurationSetReputationMetricsEnabledInput{} + } + + output = &UpdateConfigurationSetReputationMetricsEnabledOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateConfigurationSetReputationMetricsEnabled API operation for Amazon Simple Email Service. +// +// Enables or disables the publishing of reputation metrics for emails sent +// using a specific configuration set in a given AWS Region. Reputation metrics +// include bounce and complaint rates. These metrics are published to Amazon +// CloudWatch. By using CloudWatch, you can create alarms when bounce or complaint +// rates exceed certain thresholds. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation UpdateConfigurationSetReputationMetricsEnabled for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateConfigurationSetReputationMetricsEnabled +func (c *SES) UpdateConfigurationSetReputationMetricsEnabled(input *UpdateConfigurationSetReputationMetricsEnabledInput) (*UpdateConfigurationSetReputationMetricsEnabledOutput, error) { + req, out := c.UpdateConfigurationSetReputationMetricsEnabledRequest(input) + return out, req.Send() +} + +// UpdateConfigurationSetReputationMetricsEnabledWithContext is the same as UpdateConfigurationSetReputationMetricsEnabled with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateConfigurationSetReputationMetricsEnabled for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) UpdateConfigurationSetReputationMetricsEnabledWithContext(ctx aws.Context, input *UpdateConfigurationSetReputationMetricsEnabledInput, opts ...request.Option) (*UpdateConfigurationSetReputationMetricsEnabledOutput, error) { + req, out := c.UpdateConfigurationSetReputationMetricsEnabledRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateConfigurationSetSendingEnabled = "UpdateConfigurationSetSendingEnabled" + +// UpdateConfigurationSetSendingEnabledRequest generates a "aws/request.Request" representing the +// client's request for the UpdateConfigurationSetSendingEnabled operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateConfigurationSetSendingEnabled for more information on using the UpdateConfigurationSetSendingEnabled +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateConfigurationSetSendingEnabledRequest method. +// req, resp := client.UpdateConfigurationSetSendingEnabledRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateConfigurationSetSendingEnabled +func (c *SES) UpdateConfigurationSetSendingEnabledRequest(input *UpdateConfigurationSetSendingEnabledInput) (req *request.Request, output *UpdateConfigurationSetSendingEnabledOutput) { + op := &request.Operation{ + Name: opUpdateConfigurationSetSendingEnabled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateConfigurationSetSendingEnabledInput{} + } + + output = &UpdateConfigurationSetSendingEnabledOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateConfigurationSetSendingEnabled API operation for Amazon Simple Email Service. +// +// Enables or disables email sending for messages sent using a specific configuration +// set in a given AWS Region. You can use this operation in conjunction with +// Amazon CloudWatch alarms to temporarily pause email sending for a configuration +// set when the reputation metrics for that configuration set (such as your +// bounce on complaint rate) exceed certain thresholds. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation UpdateConfigurationSetSendingEnabled for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateConfigurationSetSendingEnabled +func (c *SES) UpdateConfigurationSetSendingEnabled(input *UpdateConfigurationSetSendingEnabledInput) (*UpdateConfigurationSetSendingEnabledOutput, error) { + req, out := c.UpdateConfigurationSetSendingEnabledRequest(input) + return out, req.Send() +} + +// UpdateConfigurationSetSendingEnabledWithContext is the same as UpdateConfigurationSetSendingEnabled with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateConfigurationSetSendingEnabled for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) UpdateConfigurationSetSendingEnabledWithContext(ctx aws.Context, input *UpdateConfigurationSetSendingEnabledInput, opts ...request.Option) (*UpdateConfigurationSetSendingEnabledOutput, error) { + req, out := c.UpdateConfigurationSetSendingEnabledRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateConfigurationSetTrackingOptions = "UpdateConfigurationSetTrackingOptions" + +// UpdateConfigurationSetTrackingOptionsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateConfigurationSetTrackingOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateConfigurationSetTrackingOptions for more information on using the UpdateConfigurationSetTrackingOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateConfigurationSetTrackingOptionsRequest method. +// req, resp := client.UpdateConfigurationSetTrackingOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateConfigurationSetTrackingOptions +func (c *SES) UpdateConfigurationSetTrackingOptionsRequest(input *UpdateConfigurationSetTrackingOptionsInput) (req *request.Request, output *UpdateConfigurationSetTrackingOptionsOutput) { + op := &request.Operation{ + Name: opUpdateConfigurationSetTrackingOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateConfigurationSetTrackingOptionsInput{} + } + + output = &UpdateConfigurationSetTrackingOptionsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateConfigurationSetTrackingOptions API operation for Amazon Simple Email Service. +// +// Modifies an association between a configuration set and a custom domain for +// open and click event tracking. +// +// By default, images and links used for tracking open and click events are +// hosted on domains operated by Amazon SES. You can configure a subdomain of +// your own to handle these events. For information about using custom domains, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation UpdateConfigurationSetTrackingOptions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// * ErrCodeTrackingOptionsDoesNotExistException "TrackingOptionsDoesNotExistException" +// Indicates that the TrackingOptions object you specified does not exist. +// +// * ErrCodeInvalidTrackingOptionsException "InvalidTrackingOptions" +// Indicates that the custom domain to be used for open and click tracking redirects +// is invalid. This error appears most often in the following situations: +// +// * When the tracking domain you specified is not verified in Amazon SES. +// +// * When the tracking domain you specified is not a valid domain or subdomain. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateConfigurationSetTrackingOptions +func (c *SES) UpdateConfigurationSetTrackingOptions(input *UpdateConfigurationSetTrackingOptionsInput) (*UpdateConfigurationSetTrackingOptionsOutput, error) { + req, out := c.UpdateConfigurationSetTrackingOptionsRequest(input) + return out, req.Send() +} + +// UpdateConfigurationSetTrackingOptionsWithContext is the same as UpdateConfigurationSetTrackingOptions with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateConfigurationSetTrackingOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) UpdateConfigurationSetTrackingOptionsWithContext(ctx aws.Context, input *UpdateConfigurationSetTrackingOptionsInput, opts ...request.Option) (*UpdateConfigurationSetTrackingOptionsOutput, error) { + req, out := c.UpdateConfigurationSetTrackingOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateCustomVerificationEmailTemplate = "UpdateCustomVerificationEmailTemplate" + +// UpdateCustomVerificationEmailTemplateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateCustomVerificationEmailTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateCustomVerificationEmailTemplate for more information on using the UpdateCustomVerificationEmailTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateCustomVerificationEmailTemplateRequest method. +// req, resp := client.UpdateCustomVerificationEmailTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateCustomVerificationEmailTemplate +func (c *SES) UpdateCustomVerificationEmailTemplateRequest(input *UpdateCustomVerificationEmailTemplateInput) (req *request.Request, output *UpdateCustomVerificationEmailTemplateOutput) { + op := &request.Operation{ + Name: opUpdateCustomVerificationEmailTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateCustomVerificationEmailTemplateInput{} + } + + output = &UpdateCustomVerificationEmailTemplateOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateCustomVerificationEmailTemplate API operation for Amazon Simple Email Service. +// +// Updates an existing custom verification email template. +// +// For more information about custom verification email templates, see Using +// Custom Verification Email Templates (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html) +// in the Amazon SES Developer Guide. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation UpdateCustomVerificationEmailTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCustomVerificationEmailTemplateDoesNotExistException "CustomVerificationEmailTemplateDoesNotExist" +// Indicates that a custom verification email template with the name you specified +// does not exist. +// +// * ErrCodeFromEmailAddressNotVerifiedException "FromEmailAddressNotVerified" +// Indicates that the sender address specified for a custom verification email +// is not verified, and is therefore not eligible to send the custom verification +// email. +// +// * ErrCodeCustomVerificationEmailInvalidContentException "CustomVerificationEmailInvalidContent" +// Indicates that custom verification email template provided content is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateCustomVerificationEmailTemplate +func (c *SES) UpdateCustomVerificationEmailTemplate(input *UpdateCustomVerificationEmailTemplateInput) (*UpdateCustomVerificationEmailTemplateOutput, error) { + req, out := c.UpdateCustomVerificationEmailTemplateRequest(input) + return out, req.Send() +} + +// UpdateCustomVerificationEmailTemplateWithContext is the same as UpdateCustomVerificationEmailTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateCustomVerificationEmailTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) UpdateCustomVerificationEmailTemplateWithContext(ctx aws.Context, input *UpdateCustomVerificationEmailTemplateInput, opts ...request.Option) (*UpdateCustomVerificationEmailTemplateOutput, error) { + req, out := c.UpdateCustomVerificationEmailTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateReceiptRule = "UpdateReceiptRule" + +// UpdateReceiptRuleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateReceiptRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateReceiptRule for more information on using the UpdateReceiptRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateReceiptRuleRequest method. +// req, resp := client.UpdateReceiptRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateReceiptRule +func (c *SES) UpdateReceiptRuleRequest(input *UpdateReceiptRuleInput) (req *request.Request, output *UpdateReceiptRuleOutput) { + op := &request.Operation{ + Name: opUpdateReceiptRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateReceiptRuleInput{} + } + + output = &UpdateReceiptRuleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateReceiptRule API operation for Amazon Simple Email Service. +// +// Updates a receipt rule. +// +// For information about managing receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation UpdateReceiptRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidSnsTopicException "InvalidSnsTopic" +// Indicates that the provided Amazon SNS topic is invalid, or that Amazon SES +// could not publish to the topic, possibly due to permissions issues. For information +// about giving permissions, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// * ErrCodeInvalidS3ConfigurationException "InvalidS3Configuration" +// Indicates that the provided Amazon S3 bucket or AWS KMS encryption key is +// invalid, or that Amazon SES could not publish to the bucket, possibly due +// to permissions issues. For information about giving permissions, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// * ErrCodeInvalidLambdaFunctionException "InvalidLambdaFunction" +// Indicates that the provided AWS Lambda function is invalid, or that Amazon +// SES could not execute the provided function, possibly due to permissions +// issues. For information about giving permissions, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// * ErrCodeRuleSetDoesNotExistException "RuleSetDoesNotExist" +// Indicates that the provided receipt rule set does not exist. +// +// * ErrCodeRuleDoesNotExistException "RuleDoesNotExist" +// Indicates that the provided receipt rule does not exist. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// Indicates that a resource could not be created because of service limits. +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateReceiptRule +func (c *SES) UpdateReceiptRule(input *UpdateReceiptRuleInput) (*UpdateReceiptRuleOutput, error) { + req, out := c.UpdateReceiptRuleRequest(input) + return out, req.Send() +} + +// UpdateReceiptRuleWithContext is the same as UpdateReceiptRule with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateReceiptRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) UpdateReceiptRuleWithContext(ctx aws.Context, input *UpdateReceiptRuleInput, opts ...request.Option) (*UpdateReceiptRuleOutput, error) { + req, out := c.UpdateReceiptRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTemplate = "UpdateTemplate" + +// UpdateTemplateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTemplate for more information on using the UpdateTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateTemplateRequest method. +// req, resp := client.UpdateTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateTemplate +func (c *SES) UpdateTemplateRequest(input *UpdateTemplateInput) (req *request.Request, output *UpdateTemplateOutput) { + op := &request.Operation{ + Name: opUpdateTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTemplateInput{} + } + + output = &UpdateTemplateOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateTemplate API operation for Amazon Simple Email Service. +// +// Updates an email template. Email templates enable you to send personalized +// email to one or more destinations in a single API operation. For more information, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation UpdateTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTemplateDoesNotExistException "TemplateDoesNotExist" +// Indicates that the Template object you specified does not exist in your Amazon +// SES account. +// +// * ErrCodeInvalidTemplateException "InvalidTemplate" +// Indicates that the template that you specified could not be rendered. This +// issue may occur when a template refers to a partial that does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateTemplate +func (c *SES) UpdateTemplate(input *UpdateTemplateInput) (*UpdateTemplateOutput, error) { + req, out := c.UpdateTemplateRequest(input) + return out, req.Send() +} + +// UpdateTemplateWithContext is the same as UpdateTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) UpdateTemplateWithContext(ctx aws.Context, input *UpdateTemplateInput, opts ...request.Option) (*UpdateTemplateOutput, error) { + req, out := c.UpdateTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opVerifyDomainDkim = "VerifyDomainDkim" + +// VerifyDomainDkimRequest generates a "aws/request.Request" representing the +// client's request for the VerifyDomainDkim operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See VerifyDomainDkim for more information on using the VerifyDomainDkim +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the VerifyDomainDkimRequest method. +// req, resp := client.VerifyDomainDkimRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/VerifyDomainDkim +func (c *SES) VerifyDomainDkimRequest(input *VerifyDomainDkimInput) (req *request.Request, output *VerifyDomainDkimOutput) { + op := &request.Operation{ + Name: opVerifyDomainDkim, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyDomainDkimInput{} + } + + output = &VerifyDomainDkimOutput{} + req = c.newRequest(op, input, output) + return +} + +// VerifyDomainDkim API operation for Amazon Simple Email Service. +// +// Returns a set of DKIM tokens for a domain. DKIM tokens are character strings +// that represent your domain's identity. Using these tokens, you will need +// to create DNS CNAME records that point to DKIM public keys hosted by Amazon +// SES. Amazon Web Services will eventually detect that you have updated your +// DNS records; this detection process may take up to 72 hours. Upon successful +// detection, Amazon SES will be able to DKIM-sign email originating from that +// domain. +// +// You can execute this operation no more than once per second. +// +// To enable or disable Easy DKIM signing for a domain, use the SetIdentityDkimEnabled +// operation. +// +// For more information about creating DNS records using DKIM tokens, go to +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation VerifyDomainDkim for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/VerifyDomainDkim +func (c *SES) VerifyDomainDkim(input *VerifyDomainDkimInput) (*VerifyDomainDkimOutput, error) { + req, out := c.VerifyDomainDkimRequest(input) + return out, req.Send() +} + +// VerifyDomainDkimWithContext is the same as VerifyDomainDkim with the addition of +// the ability to pass a context and additional request options. +// +// See VerifyDomainDkim for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) VerifyDomainDkimWithContext(ctx aws.Context, input *VerifyDomainDkimInput, opts ...request.Option) (*VerifyDomainDkimOutput, error) { + req, out := c.VerifyDomainDkimRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opVerifyDomainIdentity = "VerifyDomainIdentity" + +// VerifyDomainIdentityRequest generates a "aws/request.Request" representing the +// client's request for the VerifyDomainIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See VerifyDomainIdentity for more information on using the VerifyDomainIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the VerifyDomainIdentityRequest method. +// req, resp := client.VerifyDomainIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/VerifyDomainIdentity +func (c *SES) VerifyDomainIdentityRequest(input *VerifyDomainIdentityInput) (req *request.Request, output *VerifyDomainIdentityOutput) { + op := &request.Operation{ + Name: opVerifyDomainIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyDomainIdentityInput{} + } + + output = &VerifyDomainIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// VerifyDomainIdentity API operation for Amazon Simple Email Service. +// +// Adds a domain to the list of identities for your Amazon SES account in the +// current AWS Region and attempts to verify it. For more information about +// verifying domains, see Verifying Email Addresses and Domains (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) +// in the Amazon SES Developer Guide. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation VerifyDomainIdentity for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/VerifyDomainIdentity +func (c *SES) VerifyDomainIdentity(input *VerifyDomainIdentityInput) (*VerifyDomainIdentityOutput, error) { + req, out := c.VerifyDomainIdentityRequest(input) + return out, req.Send() +} + +// VerifyDomainIdentityWithContext is the same as VerifyDomainIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See VerifyDomainIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) VerifyDomainIdentityWithContext(ctx aws.Context, input *VerifyDomainIdentityInput, opts ...request.Option) (*VerifyDomainIdentityOutput, error) { + req, out := c.VerifyDomainIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opVerifyEmailAddress = "VerifyEmailAddress" + +// VerifyEmailAddressRequest generates a "aws/request.Request" representing the +// client's request for the VerifyEmailAddress operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See VerifyEmailAddress for more information on using the VerifyEmailAddress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the VerifyEmailAddressRequest method. +// req, resp := client.VerifyEmailAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/VerifyEmailAddress +func (c *SES) VerifyEmailAddressRequest(input *VerifyEmailAddressInput) (req *request.Request, output *VerifyEmailAddressOutput) { + op := &request.Operation{ + Name: opVerifyEmailAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyEmailAddressInput{} + } + + output = &VerifyEmailAddressOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// VerifyEmailAddress API operation for Amazon Simple Email Service. +// +// Deprecated. Use the VerifyEmailIdentity operation to verify a new email address. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation VerifyEmailAddress for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/VerifyEmailAddress +func (c *SES) VerifyEmailAddress(input *VerifyEmailAddressInput) (*VerifyEmailAddressOutput, error) { + req, out := c.VerifyEmailAddressRequest(input) + return out, req.Send() +} + +// VerifyEmailAddressWithContext is the same as VerifyEmailAddress with the addition of +// the ability to pass a context and additional request options. +// +// See VerifyEmailAddress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) VerifyEmailAddressWithContext(ctx aws.Context, input *VerifyEmailAddressInput, opts ...request.Option) (*VerifyEmailAddressOutput, error) { + req, out := c.VerifyEmailAddressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opVerifyEmailIdentity = "VerifyEmailIdentity" + +// VerifyEmailIdentityRequest generates a "aws/request.Request" representing the +// client's request for the VerifyEmailIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See VerifyEmailIdentity for more information on using the VerifyEmailIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the VerifyEmailIdentityRequest method. +// req, resp := client.VerifyEmailIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/VerifyEmailIdentity +func (c *SES) VerifyEmailIdentityRequest(input *VerifyEmailIdentityInput) (req *request.Request, output *VerifyEmailIdentityOutput) { + op := &request.Operation{ + Name: opVerifyEmailIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyEmailIdentityInput{} + } + + output = &VerifyEmailIdentityOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// VerifyEmailIdentity API operation for Amazon Simple Email Service. +// +// Adds an email address to the list of identities for your Amazon SES account +// in the current AWS region and attempts to verify it. As a result of executing +// this operation, a verification email is sent to the specified address. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation VerifyEmailIdentity for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/VerifyEmailIdentity +func (c *SES) VerifyEmailIdentity(input *VerifyEmailIdentityInput) (*VerifyEmailIdentityOutput, error) { + req, out := c.VerifyEmailIdentityRequest(input) + return out, req.Send() +} + +// VerifyEmailIdentityWithContext is the same as VerifyEmailIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See VerifyEmailIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) VerifyEmailIdentityWithContext(ctx aws.Context, input *VerifyEmailIdentityInput, opts ...request.Option) (*VerifyEmailIdentityOutput, error) { + req, out := c.VerifyEmailIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// When included in a receipt rule, this action adds a header to the received +// email. +// +// For information about adding a header using a receipt rule, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-add-header.html). +type AddHeaderAction struct { + _ struct{} `type:"structure"` + + // The name of the header to add. Must be between 1 and 50 characters, inclusive, + // and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only. + // + // HeaderName is a required field + HeaderName *string `type:"string" required:"true"` + + // Must be less than 2048 characters, and must not contain newline characters + // ("\r" or "\n"). + // + // HeaderValue is a required field + HeaderValue *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddHeaderAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddHeaderAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddHeaderAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddHeaderAction"} + if s.HeaderName == nil { + invalidParams.Add(request.NewErrParamRequired("HeaderName")) + } + if s.HeaderValue == nil { + invalidParams.Add(request.NewErrParamRequired("HeaderValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHeaderName sets the HeaderName field's value. +func (s *AddHeaderAction) SetHeaderName(v string) *AddHeaderAction { + s.HeaderName = &v + return s +} + +// SetHeaderValue sets the HeaderValue field's value. +func (s *AddHeaderAction) SetHeaderValue(v string) *AddHeaderAction { + s.HeaderValue = &v + return s +} + +// Represents the body of the message. You can specify text, HTML, or both. +// If you use both, then the message should display correctly in the widest +// variety of email clients. +type Body struct { + _ struct{} `type:"structure"` + + // The content of the message, in HTML format. Use this for email clients that + // can process HTML. You can include clickable links, formatted text, and much + // more in an HTML message. + Html *Content `type:"structure"` + + // The content of the message, in text format. Use this for text-based email + // clients, or clients on high-latency networks (such as mobile devices). + Text *Content `type:"structure"` +} + +// String returns the string representation +func (s Body) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Body) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Body) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Body"} + if s.Html != nil { + if err := s.Html.Validate(); err != nil { + invalidParams.AddNested("Html", err.(request.ErrInvalidParams)) + } + } + if s.Text != nil { + if err := s.Text.Validate(); err != nil { + invalidParams.AddNested("Text", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHtml sets the Html field's value. +func (s *Body) SetHtml(v *Content) *Body { + s.Html = v + return s +} + +// SetText sets the Text field's value. +func (s *Body) SetText(v *Content) *Body { + s.Text = v + return s +} + +// When included in a receipt rule, this action rejects the received email by +// returning a bounce response to the sender and, optionally, publishes a notification +// to Amazon Simple Notification Service (Amazon SNS). +// +// For information about sending a bounce message in response to a received +// email, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-bounce.html). +type BounceAction struct { + _ struct{} `type:"structure"` + + // Human-readable text to include in the bounce message. + // + // Message is a required field + Message *string `type:"string" required:"true"` + + // The email address of the sender of the bounced email. This is the address + // from which the bounce message will be sent. + // + // Sender is a required field + Sender *string `type:"string" required:"true"` + + // The SMTP reply code, as defined by RFC 5321 (https://tools.ietf.org/html/rfc5321). + // + // SmtpReplyCode is a required field + SmtpReplyCode *string `type:"string" required:"true"` + + // The SMTP enhanced status code, as defined by RFC 3463 (https://tools.ietf.org/html/rfc3463). + StatusCode *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the + // bounce action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s BounceAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BounceAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BounceAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BounceAction"} + if s.Message == nil { + invalidParams.Add(request.NewErrParamRequired("Message")) + } + if s.Sender == nil { + invalidParams.Add(request.NewErrParamRequired("Sender")) + } + if s.SmtpReplyCode == nil { + invalidParams.Add(request.NewErrParamRequired("SmtpReplyCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMessage sets the Message field's value. +func (s *BounceAction) SetMessage(v string) *BounceAction { + s.Message = &v + return s +} + +// SetSender sets the Sender field's value. +func (s *BounceAction) SetSender(v string) *BounceAction { + s.Sender = &v + return s +} + +// SetSmtpReplyCode sets the SmtpReplyCode field's value. +func (s *BounceAction) SetSmtpReplyCode(v string) *BounceAction { + s.SmtpReplyCode = &v + return s +} + +// SetStatusCode sets the StatusCode field's value. +func (s *BounceAction) SetStatusCode(v string) *BounceAction { + s.StatusCode = &v + return s +} + +// SetTopicArn sets the TopicArn field's value. +func (s *BounceAction) SetTopicArn(v string) *BounceAction { + s.TopicArn = &v + return s +} + +// Recipient-related information to include in the Delivery Status Notification +// (DSN) when an email that Amazon SES receives on your behalf bounces. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +type BouncedRecipientInfo struct { + _ struct{} `type:"structure"` + + // The reason for the bounce. You must provide either this parameter or RecipientDsnFields. + BounceType *string `type:"string" enum:"BounceType"` + + // The email address of the recipient of the bounced email. + // + // Recipient is a required field + Recipient *string `type:"string" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to receive email for the recipient of the bounced email. For more information + // about sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + RecipientArn *string `type:"string"` + + // Recipient-related DSN fields, most of which would normally be filled in automatically + // when provided with a BounceType. You must provide either this parameter or + // BounceType. + RecipientDsnFields *RecipientDsnFields `type:"structure"` +} + +// String returns the string representation +func (s BouncedRecipientInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BouncedRecipientInfo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BouncedRecipientInfo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BouncedRecipientInfo"} + if s.Recipient == nil { + invalidParams.Add(request.NewErrParamRequired("Recipient")) + } + if s.RecipientDsnFields != nil { + if err := s.RecipientDsnFields.Validate(); err != nil { + invalidParams.AddNested("RecipientDsnFields", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBounceType sets the BounceType field's value. +func (s *BouncedRecipientInfo) SetBounceType(v string) *BouncedRecipientInfo { + s.BounceType = &v + return s +} + +// SetRecipient sets the Recipient field's value. +func (s *BouncedRecipientInfo) SetRecipient(v string) *BouncedRecipientInfo { + s.Recipient = &v + return s +} + +// SetRecipientArn sets the RecipientArn field's value. +func (s *BouncedRecipientInfo) SetRecipientArn(v string) *BouncedRecipientInfo { + s.RecipientArn = &v + return s +} + +// SetRecipientDsnFields sets the RecipientDsnFields field's value. +func (s *BouncedRecipientInfo) SetRecipientDsnFields(v *RecipientDsnFields) *BouncedRecipientInfo { + s.RecipientDsnFields = v + return s +} + +// An array that contains one or more Destinations, as well as the tags and +// replacement data associated with each of those Destinations. +type BulkEmailDestination struct { + _ struct{} `type:"structure"` + + // Represents the destination of the message, consisting of To:, CC:, and BCC: + // fields. + // + // Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531 + // (https://tools.ietf.org/html/rfc6531). For this reason, the local part of + // a destination email address (the part of the email address that precedes + // the @ sign) may only contain 7-bit ASCII characters (https://en.wikipedia.org/wiki/Email_address#Local-part). + // If the domain part of an address (the part after the @ sign) contains non-ASCII + // characters, they must be encoded using Punycode, as described in RFC3492 + // (https://tools.ietf.org/html/rfc3492.html). + // + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // A list of tags, in the form of name/value pairs, to apply to an email that + // you send using SendBulkTemplatedEmail. Tags correspond to characteristics + // of the email that you define, so that you can publish email sending events. + ReplacementTags []*MessageTag `type:"list"` + + // A list of replacement values to apply to the template. This parameter is + // a JSON object, typically consisting of key-value pairs in which the keys + // correspond to replacement tags in the email template. + ReplacementTemplateData *string `type:"string"` +} + +// String returns the string representation +func (s BulkEmailDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BulkEmailDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BulkEmailDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BulkEmailDestination"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.ReplacementTags != nil { + for i, v := range s.ReplacementTags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplacementTags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *BulkEmailDestination) SetDestination(v *Destination) *BulkEmailDestination { + s.Destination = v + return s +} + +// SetReplacementTags sets the ReplacementTags field's value. +func (s *BulkEmailDestination) SetReplacementTags(v []*MessageTag) *BulkEmailDestination { + s.ReplacementTags = v + return s +} + +// SetReplacementTemplateData sets the ReplacementTemplateData field's value. +func (s *BulkEmailDestination) SetReplacementTemplateData(v string) *BulkEmailDestination { + s.ReplacementTemplateData = &v + return s +} + +// An object that contains the response from the SendBulkTemplatedEmail operation. +type BulkEmailDestinationStatus struct { + _ struct{} `type:"structure"` + + // A description of an error that prevented a message being sent using the SendBulkTemplatedEmail + // operation. + Error *string `type:"string"` + + // The unique message identifier returned from the SendBulkTemplatedEmail operation. + MessageId *string `type:"string"` + + // The status of a message sent using the SendBulkTemplatedEmail operation. + // + // Possible values for this parameter include: + // + // * Success: Amazon SES accepted the message, and will attempt to deliver + // it to the recipients. + // + // * MessageRejected: The message was rejected because it contained a virus. + // + // * MailFromDomainNotVerified: The sender's email address or domain was + // not verified. + // + // * ConfigurationSetDoesNotExist: The configuration set you specified does + // not exist. + // + // * TemplateDoesNotExist: The template you specified does not exist. + // + // * AccountSuspended: Your account has been shut down because of issues + // related to your email sending practices. + // + // * AccountThrottled: The number of emails you can send has been reduced + // because your account has exceeded its allocated sending limit. + // + // * AccountDailyQuotaExceeded: You have reached or exceeded the maximum + // number of emails you can send from your account in a 24-hour period. + // + // * InvalidSendingPoolName: The configuration set you specified refers to + // an IP pool that does not exist. + // + // * AccountSendingPaused: Email sending for the Amazon SES account was disabled + // using the UpdateAccountSendingEnabled operation. + // + // * ConfigurationSetSendingPaused: Email sending for this configuration + // set was disabled using the UpdateConfigurationSetSendingEnabled operation. + // + // * InvalidParameterValue: One or more of the parameters you specified when + // calling this operation was invalid. See the error message for additional + // information. + // + // * TransientFailure: Amazon SES was unable to process your request because + // of a temporary issue. + // + // * Failed: Amazon SES was unable to process your request. See the error + // message for additional information. + Status *string `type:"string" enum:"BulkEmailStatus"` +} + +// String returns the string representation +func (s BulkEmailDestinationStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BulkEmailDestinationStatus) GoString() string { + return s.String() +} + +// SetError sets the Error field's value. +func (s *BulkEmailDestinationStatus) SetError(v string) *BulkEmailDestinationStatus { + s.Error = &v + return s +} + +// SetMessageId sets the MessageId field's value. +func (s *BulkEmailDestinationStatus) SetMessageId(v string) *BulkEmailDestinationStatus { + s.MessageId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *BulkEmailDestinationStatus) SetStatus(v string) *BulkEmailDestinationStatus { + s.Status = &v + return s +} + +// Represents a request to create a receipt rule set by cloning an existing +// one. You use receipt rule sets to receive email with Amazon SES. For more +// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type CloneReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the rule set to clone. + // + // OriginalRuleSetName is a required field + OriginalRuleSetName *string `type:"string" required:"true"` + + // The name of the rule set to create. The name must: + // + // * This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), + // underscores (_), or dashes (-). + // + // * Start and end with a letter or number. + // + // * Contain less than 64 characters. + // + // RuleSetName is a required field + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CloneReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloneReceiptRuleSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CloneReceiptRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CloneReceiptRuleSetInput"} + if s.OriginalRuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("OriginalRuleSetName")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOriginalRuleSetName sets the OriginalRuleSetName field's value. +func (s *CloneReceiptRuleSetInput) SetOriginalRuleSetName(v string) *CloneReceiptRuleSetInput { + s.OriginalRuleSetName = &v + return s +} + +// SetRuleSetName sets the RuleSetName field's value. +func (s *CloneReceiptRuleSetInput) SetRuleSetName(v string) *CloneReceiptRuleSetInput { + s.RuleSetName = &v + return s +} + +// An empty element returned on a successful request. +type CloneReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CloneReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloneReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Contains information associated with an Amazon CloudWatch event destination +// to which email sending events are published. +// +// Event destinations, such as Amazon CloudWatch, are associated with configuration +// sets, which enable you to publish email sending events. For information about +// using configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +type CloudWatchDestination struct { + _ struct{} `type:"structure"` + + // A list of dimensions upon which to categorize your emails when you publish + // email sending events to Amazon CloudWatch. + // + // DimensionConfigurations is a required field + DimensionConfigurations []*CloudWatchDimensionConfiguration `type:"list" required:"true"` +} + +// String returns the string representation +func (s CloudWatchDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudWatchDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CloudWatchDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CloudWatchDestination"} + if s.DimensionConfigurations == nil { + invalidParams.Add(request.NewErrParamRequired("DimensionConfigurations")) + } + if s.DimensionConfigurations != nil { + for i, v := range s.DimensionConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DimensionConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDimensionConfigurations sets the DimensionConfigurations field's value. +func (s *CloudWatchDestination) SetDimensionConfigurations(v []*CloudWatchDimensionConfiguration) *CloudWatchDestination { + s.DimensionConfigurations = v + return s +} + +// Contains the dimension configuration to use when you publish email sending +// events to Amazon CloudWatch. +// +// For information about publishing email sending events to Amazon CloudWatch, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +type CloudWatchDimensionConfiguration struct { + _ struct{} `type:"structure"` + + // The default value of the dimension that is published to Amazon CloudWatch + // if you do not provide the value of the dimension when you send an email. + // The default value must: + // + // * This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), + // underscores (_), or dashes (-). + // + // * Contain less than 256 characters. + // + // DefaultDimensionValue is a required field + DefaultDimensionValue *string `type:"string" required:"true"` + + // The name of an Amazon CloudWatch dimension associated with an email sending + // metric. The name must: + // + // * This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), + // underscores (_), or dashes (-). + // + // * Contain less than 256 characters. + // + // DimensionName is a required field + DimensionName *string `type:"string" required:"true"` + + // The place where Amazon SES finds the value of a dimension to publish to Amazon + // CloudWatch. If you want Amazon SES to use the message tags that you specify + // using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail/SendRawEmail + // API, choose messageTag. If you want Amazon SES to use your own email headers, + // choose emailHeader. + // + // DimensionValueSource is a required field + DimensionValueSource *string `type:"string" required:"true" enum:"DimensionValueSource"` +} + +// String returns the string representation +func (s CloudWatchDimensionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudWatchDimensionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CloudWatchDimensionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CloudWatchDimensionConfiguration"} + if s.DefaultDimensionValue == nil { + invalidParams.Add(request.NewErrParamRequired("DefaultDimensionValue")) + } + if s.DimensionName == nil { + invalidParams.Add(request.NewErrParamRequired("DimensionName")) + } + if s.DimensionValueSource == nil { + invalidParams.Add(request.NewErrParamRequired("DimensionValueSource")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDefaultDimensionValue sets the DefaultDimensionValue field's value. +func (s *CloudWatchDimensionConfiguration) SetDefaultDimensionValue(v string) *CloudWatchDimensionConfiguration { + s.DefaultDimensionValue = &v + return s +} + +// SetDimensionName sets the DimensionName field's value. +func (s *CloudWatchDimensionConfiguration) SetDimensionName(v string) *CloudWatchDimensionConfiguration { + s.DimensionName = &v + return s +} + +// SetDimensionValueSource sets the DimensionValueSource field's value. +func (s *CloudWatchDimensionConfiguration) SetDimensionValueSource(v string) *CloudWatchDimensionConfiguration { + s.DimensionValueSource = &v + return s +} + +// The name of the configuration set. +// +// Configuration sets let you create groups of rules that you can apply to the +// emails you send using Amazon SES. For more information about using configuration +// sets, see Using Amazon SES Configuration Sets (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/using-configuration-sets.html) +// in the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/). +type ConfigurationSet struct { + _ struct{} `type:"structure"` + + // The name of the configuration set. The name must meet the following requirements: + // + // * Contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or + // dashes (-). + // + // * Contain 64 characters or fewer. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfigurationSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationSet) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfigurationSet) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfigurationSet"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *ConfigurationSet) SetName(v string) *ConfigurationSet { + s.Name = &v + return s +} + +// Represents textual data, plus an optional character set specification. +// +// By default, the text must be 7-bit ASCII, due to the constraints of the SMTP +// protocol. If the text must contain any other characters, then you must also +// specify a character set. Examples include UTF-8, ISO-8859-1, and Shift_JIS. +type Content struct { + _ struct{} `type:"structure"` + + // The character set of the content. + Charset *string `type:"string"` + + // The textual data of the content. + // + // Data is a required field + Data *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Content) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Content) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Content) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Content"} + if s.Data == nil { + invalidParams.Add(request.NewErrParamRequired("Data")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCharset sets the Charset field's value. +func (s *Content) SetCharset(v string) *Content { + s.Charset = &v + return s +} + +// SetData sets the Data field's value. +func (s *Content) SetData(v string) *Content { + s.Data = &v + return s +} + +// Represents a request to create a configuration set event destination. A configuration +// set event destination, which can be either Amazon CloudWatch or Amazon Kinesis +// Firehose, describes an AWS service in which Amazon SES publishes the email +// sending events associated with a configuration set. For information about +// using configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +type CreateConfigurationSetEventDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that the event destination should be associated + // with. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `type:"string" required:"true"` + + // An object that describes the AWS service that email sending event information + // will be published to. + // + // EventDestination is a required field + EventDestination *EventDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateConfigurationSetEventDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConfigurationSetEventDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateConfigurationSetEventDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateConfigurationSetEventDestinationInput"} + if s.ConfigurationSetName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationSetName")) + } + if s.EventDestination == nil { + invalidParams.Add(request.NewErrParamRequired("EventDestination")) + } + if s.EventDestination != nil { + if err := s.EventDestination.Validate(); err != nil { + invalidParams.AddNested("EventDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *CreateConfigurationSetEventDestinationInput) SetConfigurationSetName(v string) *CreateConfigurationSetEventDestinationInput { + s.ConfigurationSetName = &v + return s +} + +// SetEventDestination sets the EventDestination field's value. +func (s *CreateConfigurationSetEventDestinationInput) SetEventDestination(v *EventDestination) *CreateConfigurationSetEventDestinationInput { + s.EventDestination = v + return s +} + +// An empty element returned on a successful request. +type CreateConfigurationSetEventDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateConfigurationSetEventDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConfigurationSetEventDestinationOutput) GoString() string { + return s.String() +} + +// Represents a request to create a configuration set. Configuration sets enable +// you to publish email sending events. For information about using configuration +// sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +type CreateConfigurationSetInput struct { + _ struct{} `type:"structure"` + + // A data structure that contains the name of the configuration set. + // + // ConfigurationSet is a required field + ConfigurationSet *ConfigurationSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateConfigurationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConfigurationSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateConfigurationSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateConfigurationSetInput"} + if s.ConfigurationSet == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationSet")) + } + if s.ConfigurationSet != nil { + if err := s.ConfigurationSet.Validate(); err != nil { + invalidParams.AddNested("ConfigurationSet", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSet sets the ConfigurationSet field's value. +func (s *CreateConfigurationSetInput) SetConfigurationSet(v *ConfigurationSet) *CreateConfigurationSetInput { + s.ConfigurationSet = v + return s +} + +// An empty element returned on a successful request. +type CreateConfigurationSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateConfigurationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConfigurationSetOutput) GoString() string { + return s.String() +} + +// Represents a request to create an open and click tracking option object in +// a configuration set. +type CreateConfigurationSetTrackingOptionsInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that the tracking options should be associated + // with. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `type:"string" required:"true"` + + // A domain that is used to redirect email recipients to an Amazon SES-operated + // domain. This domain captures open and click events generated by Amazon SES + // emails. + // + // For more information, see Configuring Custom Domains to Handle Open and Click + // Tracking (ses/latest/DeveloperGuide/configure-custom-open-click-domains.html) + // in the Amazon SES Developer Guide. + // + // TrackingOptions is a required field + TrackingOptions *TrackingOptions `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateConfigurationSetTrackingOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConfigurationSetTrackingOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateConfigurationSetTrackingOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateConfigurationSetTrackingOptionsInput"} + if s.ConfigurationSetName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationSetName")) + } + if s.TrackingOptions == nil { + invalidParams.Add(request.NewErrParamRequired("TrackingOptions")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *CreateConfigurationSetTrackingOptionsInput) SetConfigurationSetName(v string) *CreateConfigurationSetTrackingOptionsInput { + s.ConfigurationSetName = &v + return s +} + +// SetTrackingOptions sets the TrackingOptions field's value. +func (s *CreateConfigurationSetTrackingOptionsInput) SetTrackingOptions(v *TrackingOptions) *CreateConfigurationSetTrackingOptionsInput { + s.TrackingOptions = v + return s +} + +// An empty element returned on a successful request. +type CreateConfigurationSetTrackingOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateConfigurationSetTrackingOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConfigurationSetTrackingOptionsOutput) GoString() string { + return s.String() +} + +// Represents a request to create a custom verification email template. +type CreateCustomVerificationEmailTemplateInput struct { + _ struct{} `type:"structure"` + + // The URL that the recipient of the verification email is sent to if his or + // her address is not successfully verified. + // + // FailureRedirectionURL is a required field + FailureRedirectionURL *string `type:"string" required:"true"` + + // The email address that the custom verification email is sent from. + // + // FromEmailAddress is a required field + FromEmailAddress *string `type:"string" required:"true"` + + // The URL that the recipient of the verification email is sent to if his or + // her address is successfully verified. + // + // SuccessRedirectionURL is a required field + SuccessRedirectionURL *string `type:"string" required:"true"` + + // The content of the custom verification email. The total size of the email + // must be less than 10 MB. The message body may contain HTML, with some limitations. + // For more information, see Custom Verification Email Frequently Asked Questions + // (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html#custom-verification-emails-faq) + // in the Amazon SES Developer Guide. + // + // TemplateContent is a required field + TemplateContent *string `type:"string" required:"true"` + + // The name of the custom verification email template. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` + + // The subject line of the custom verification email. + // + // TemplateSubject is a required field + TemplateSubject *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateCustomVerificationEmailTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomVerificationEmailTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCustomVerificationEmailTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCustomVerificationEmailTemplateInput"} + if s.FailureRedirectionURL == nil { + invalidParams.Add(request.NewErrParamRequired("FailureRedirectionURL")) + } + if s.FromEmailAddress == nil { + invalidParams.Add(request.NewErrParamRequired("FromEmailAddress")) + } + if s.SuccessRedirectionURL == nil { + invalidParams.Add(request.NewErrParamRequired("SuccessRedirectionURL")) + } + if s.TemplateContent == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateContent")) + } + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + if s.TemplateSubject == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateSubject")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFailureRedirectionURL sets the FailureRedirectionURL field's value. +func (s *CreateCustomVerificationEmailTemplateInput) SetFailureRedirectionURL(v string) *CreateCustomVerificationEmailTemplateInput { + s.FailureRedirectionURL = &v + return s +} + +// SetFromEmailAddress sets the FromEmailAddress field's value. +func (s *CreateCustomVerificationEmailTemplateInput) SetFromEmailAddress(v string) *CreateCustomVerificationEmailTemplateInput { + s.FromEmailAddress = &v + return s +} + +// SetSuccessRedirectionURL sets the SuccessRedirectionURL field's value. +func (s *CreateCustomVerificationEmailTemplateInput) SetSuccessRedirectionURL(v string) *CreateCustomVerificationEmailTemplateInput { + s.SuccessRedirectionURL = &v + return s +} + +// SetTemplateContent sets the TemplateContent field's value. +func (s *CreateCustomVerificationEmailTemplateInput) SetTemplateContent(v string) *CreateCustomVerificationEmailTemplateInput { + s.TemplateContent = &v + return s +} + +// SetTemplateName sets the TemplateName field's value. +func (s *CreateCustomVerificationEmailTemplateInput) SetTemplateName(v string) *CreateCustomVerificationEmailTemplateInput { + s.TemplateName = &v + return s +} + +// SetTemplateSubject sets the TemplateSubject field's value. +func (s *CreateCustomVerificationEmailTemplateInput) SetTemplateSubject(v string) *CreateCustomVerificationEmailTemplateInput { + s.TemplateSubject = &v + return s +} + +type CreateCustomVerificationEmailTemplateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateCustomVerificationEmailTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomVerificationEmailTemplateOutput) GoString() string { + return s.String() +} + +// Represents a request to create a new IP address filter. You use IP address +// filters when you receive email with Amazon SES. For more information, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type CreateReceiptFilterInput struct { + _ struct{} `type:"structure"` + + // A data structure that describes the IP address filter to create, which consists + // of a name, an IP address range, and whether to allow or block mail from it. + // + // Filter is a required field + Filter *ReceiptFilter `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateReceiptFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReceiptFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReceiptFilterInput"} + if s.Filter == nil { + invalidParams.Add(request.NewErrParamRequired("Filter")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *CreateReceiptFilterInput) SetFilter(v *ReceiptFilter) *CreateReceiptFilterInput { + s.Filter = v + return s +} + +// An empty element returned on a successful request. +type CreateReceiptFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateReceiptFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptFilterOutput) GoString() string { + return s.String() +} + +// Represents a request to create a receipt rule. You use receipt rules to receive +// email with Amazon SES. For more information, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type CreateReceiptRuleInput struct { + _ struct{} `type:"structure"` + + // The name of an existing rule after which the new rule will be placed. If + // this parameter is null, the new rule will be inserted at the beginning of + // the rule list. + After *string `type:"string"` + + // A data structure that contains the specified rule's name, actions, recipients, + // domains, enabled status, scan status, and TLS policy. + // + // Rule is a required field + Rule *ReceiptRule `type:"structure" required:"true"` + + // The name of the rule set that the receipt rule will be added to. + // + // RuleSetName is a required field + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReceiptRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReceiptRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReceiptRuleInput"} + if s.Rule == nil { + invalidParams.Add(request.NewErrParamRequired("Rule")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + if s.Rule != nil { + if err := s.Rule.Validate(); err != nil { + invalidParams.AddNested("Rule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAfter sets the After field's value. +func (s *CreateReceiptRuleInput) SetAfter(v string) *CreateReceiptRuleInput { + s.After = &v + return s +} + +// SetRule sets the Rule field's value. +func (s *CreateReceiptRuleInput) SetRule(v *ReceiptRule) *CreateReceiptRuleInput { + s.Rule = v + return s +} + +// SetRuleSetName sets the RuleSetName field's value. +func (s *CreateReceiptRuleInput) SetRuleSetName(v string) *CreateReceiptRuleInput { + s.RuleSetName = &v + return s +} + +// An empty element returned on a successful request. +type CreateReceiptRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateReceiptRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptRuleOutput) GoString() string { + return s.String() +} + +// Represents a request to create an empty receipt rule set. You use receipt +// rule sets to receive email with Amazon SES. For more information, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type CreateReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the rule set to create. The name must: + // + // * This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), + // underscores (_), or dashes (-). + // + // * Start and end with a letter or number. + // + // * Contain less than 64 characters. + // + // RuleSetName is a required field + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptRuleSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReceiptRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReceiptRuleSetInput"} + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRuleSetName sets the RuleSetName field's value. +func (s *CreateReceiptRuleSetInput) SetRuleSetName(v string) *CreateReceiptRuleSetInput { + s.RuleSetName = &v + return s +} + +// An empty element returned on a successful request. +type CreateReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents a request to create an email template. For more information, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +type CreateTemplateInput struct { + _ struct{} `type:"structure"` + + // The content of the email, composed of a subject line, an HTML part, and a + // text-only part. + // + // Template is a required field + Template *Template `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTemplateInput"} + if s.Template == nil { + invalidParams.Add(request.NewErrParamRequired("Template")) + } + if s.Template != nil { + if err := s.Template.Validate(); err != nil { + invalidParams.AddNested("Template", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTemplate sets the Template field's value. +func (s *CreateTemplateInput) SetTemplate(v *Template) *CreateTemplateInput { + s.Template = v + return s +} + +type CreateTemplateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTemplateOutput) GoString() string { + return s.String() +} + +// Contains information about a custom verification email template. +type CustomVerificationEmailTemplate struct { + _ struct{} `type:"structure"` + + // The URL that the recipient of the verification email is sent to if his or + // her address is not successfully verified. + FailureRedirectionURL *string `type:"string"` + + // The email address that the custom verification email is sent from. + FromEmailAddress *string `type:"string"` + + // The URL that the recipient of the verification email is sent to if his or + // her address is successfully verified. + SuccessRedirectionURL *string `type:"string"` + + // The name of the custom verification email template. + TemplateName *string `type:"string"` + + // The subject line of the custom verification email. + TemplateSubject *string `type:"string"` +} + +// String returns the string representation +func (s CustomVerificationEmailTemplate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomVerificationEmailTemplate) GoString() string { + return s.String() +} + +// SetFailureRedirectionURL sets the FailureRedirectionURL field's value. +func (s *CustomVerificationEmailTemplate) SetFailureRedirectionURL(v string) *CustomVerificationEmailTemplate { + s.FailureRedirectionURL = &v + return s +} + +// SetFromEmailAddress sets the FromEmailAddress field's value. +func (s *CustomVerificationEmailTemplate) SetFromEmailAddress(v string) *CustomVerificationEmailTemplate { + s.FromEmailAddress = &v + return s +} + +// SetSuccessRedirectionURL sets the SuccessRedirectionURL field's value. +func (s *CustomVerificationEmailTemplate) SetSuccessRedirectionURL(v string) *CustomVerificationEmailTemplate { + s.SuccessRedirectionURL = &v + return s +} + +// SetTemplateName sets the TemplateName field's value. +func (s *CustomVerificationEmailTemplate) SetTemplateName(v string) *CustomVerificationEmailTemplate { + s.TemplateName = &v + return s +} + +// SetTemplateSubject sets the TemplateSubject field's value. +func (s *CustomVerificationEmailTemplate) SetTemplateSubject(v string) *CustomVerificationEmailTemplate { + s.TemplateSubject = &v + return s +} + +// Represents a request to delete a configuration set event destination. Configuration +// set event destinations are associated with configuration sets, which enable +// you to publish email sending events. For information about using configuration +// sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +type DeleteConfigurationSetEventDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set from which to delete the event destination. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `type:"string" required:"true"` + + // The name of the event destination to delete. + // + // EventDestinationName is a required field + EventDestinationName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConfigurationSetEventDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigurationSetEventDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConfigurationSetEventDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteConfigurationSetEventDestinationInput"} + if s.ConfigurationSetName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationSetName")) + } + if s.EventDestinationName == nil { + invalidParams.Add(request.NewErrParamRequired("EventDestinationName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *DeleteConfigurationSetEventDestinationInput) SetConfigurationSetName(v string) *DeleteConfigurationSetEventDestinationInput { + s.ConfigurationSetName = &v + return s +} + +// SetEventDestinationName sets the EventDestinationName field's value. +func (s *DeleteConfigurationSetEventDestinationInput) SetEventDestinationName(v string) *DeleteConfigurationSetEventDestinationInput { + s.EventDestinationName = &v + return s +} + +// An empty element returned on a successful request. +type DeleteConfigurationSetEventDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteConfigurationSetEventDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigurationSetEventDestinationOutput) GoString() string { + return s.String() +} + +// Represents a request to delete a configuration set. Configuration sets enable +// you to publish email sending events. For information about using configuration +// sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +type DeleteConfigurationSetInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set to delete. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConfigurationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigurationSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConfigurationSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteConfigurationSetInput"} + if s.ConfigurationSetName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *DeleteConfigurationSetInput) SetConfigurationSetName(v string) *DeleteConfigurationSetInput { + s.ConfigurationSetName = &v + return s +} + +// An empty element returned on a successful request. +type DeleteConfigurationSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteConfigurationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigurationSetOutput) GoString() string { + return s.String() +} + +// Represents a request to delete open and click tracking options in a configuration +// set. +type DeleteConfigurationSetTrackingOptionsInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set from which you want to delete the tracking + // options. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConfigurationSetTrackingOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigurationSetTrackingOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConfigurationSetTrackingOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteConfigurationSetTrackingOptionsInput"} + if s.ConfigurationSetName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *DeleteConfigurationSetTrackingOptionsInput) SetConfigurationSetName(v string) *DeleteConfigurationSetTrackingOptionsInput { + s.ConfigurationSetName = &v + return s +} + +// An empty element returned on a successful request. +type DeleteConfigurationSetTrackingOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteConfigurationSetTrackingOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigurationSetTrackingOptionsOutput) GoString() string { + return s.String() +} + +// Represents a request to delete an existing custom verification email template. +type DeleteCustomVerificationEmailTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the custom verification email template that you want to delete. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCustomVerificationEmailTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomVerificationEmailTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCustomVerificationEmailTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCustomVerificationEmailTemplateInput"} + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTemplateName sets the TemplateName field's value. +func (s *DeleteCustomVerificationEmailTemplateInput) SetTemplateName(v string) *DeleteCustomVerificationEmailTemplateInput { + s.TemplateName = &v + return s +} + +type DeleteCustomVerificationEmailTemplateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCustomVerificationEmailTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomVerificationEmailTemplateOutput) GoString() string { + return s.String() +} + +// Represents a request to delete one of your Amazon SES identities (an email +// address or domain). +type DeleteIdentityInput struct { + _ struct{} `type:"structure"` + + // The identity to be removed from the list of identities for the AWS Account. + // + // Identity is a required field + Identity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIdentityInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentity sets the Identity field's value. +func (s *DeleteIdentityInput) SetIdentity(v string) *DeleteIdentityInput { + s.Identity = &v + return s +} + +// An empty element returned on a successful request. +type DeleteIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityOutput) GoString() string { + return s.String() +} + +// Represents a request to delete a sending authorization policy for an identity. +// Sending authorization is an Amazon SES feature that enables you to authorize +// other senders to use your identities. For information, see the Amazon SES +// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +type DeleteIdentityPolicyInput struct { + _ struct{} `type:"structure"` + + // The identity that is associated with the policy that you want to delete. + // You can specify the identity by using its name or by using its Amazon Resource + // Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // To successfully call this API, you must own the identity. + // + // Identity is a required field + Identity *string `type:"string" required:"true"` + + // The name of the policy to be deleted. + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIdentityPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIdentityPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIdentityPolicyInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentity sets the Identity field's value. +func (s *DeleteIdentityPolicyInput) SetIdentity(v string) *DeleteIdentityPolicyInput { + s.Identity = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *DeleteIdentityPolicyInput) SetPolicyName(v string) *DeleteIdentityPolicyInput { + s.PolicyName = &v + return s +} + +// An empty element returned on a successful request. +type DeleteIdentityPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteIdentityPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityPolicyOutput) GoString() string { + return s.String() +} + +// Represents a request to delete an IP address filter. You use IP address filters +// when you receive email with Amazon SES. For more information, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type DeleteReceiptFilterInput struct { + _ struct{} `type:"structure"` + + // The name of the IP address filter to delete. + // + // FilterName is a required field + FilterName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReceiptFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReceiptFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReceiptFilterInput"} + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterName sets the FilterName field's value. +func (s *DeleteReceiptFilterInput) SetFilterName(v string) *DeleteReceiptFilterInput { + s.FilterName = &v + return s +} + +// An empty element returned on a successful request. +type DeleteReceiptFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReceiptFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptFilterOutput) GoString() string { + return s.String() +} + +// Represents a request to delete a receipt rule. You use receipt rules to receive +// email with Amazon SES. For more information, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type DeleteReceiptRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule to delete. + // + // RuleName is a required field + RuleName *string `type:"string" required:"true"` + + // The name of the receipt rule set that contains the receipt rule to delete. + // + // RuleSetName is a required field + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReceiptRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReceiptRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReceiptRuleInput"} + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRuleName sets the RuleName field's value. +func (s *DeleteReceiptRuleInput) SetRuleName(v string) *DeleteReceiptRuleInput { + s.RuleName = &v + return s +} + +// SetRuleSetName sets the RuleSetName field's value. +func (s *DeleteReceiptRuleInput) SetRuleSetName(v string) *DeleteReceiptRuleInput { + s.RuleSetName = &v + return s +} + +// An empty element returned on a successful request. +type DeleteReceiptRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReceiptRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptRuleOutput) GoString() string { + return s.String() +} + +// Represents a request to delete a receipt rule set and all of the receipt +// rules it contains. You use receipt rule sets to receive email with Amazon +// SES. For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type DeleteReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule set to delete. + // + // RuleSetName is a required field + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptRuleSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReceiptRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReceiptRuleSetInput"} + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRuleSetName sets the RuleSetName field's value. +func (s *DeleteReceiptRuleSetInput) SetRuleSetName(v string) *DeleteReceiptRuleSetInput { + s.RuleSetName = &v + return s +} + +// An empty element returned on a successful request. +type DeleteReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents a request to delete an email template. For more information, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +type DeleteTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the template to be deleted. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTemplateInput"} + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTemplateName sets the TemplateName field's value. +func (s *DeleteTemplateInput) SetTemplateName(v string) *DeleteTemplateInput { + s.TemplateName = &v + return s +} + +type DeleteTemplateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTemplateOutput) GoString() string { + return s.String() +} + +// Represents a request to delete an email address from the list of email addresses +// you have attempted to verify under your AWS account. +type DeleteVerifiedEmailAddressInput struct { + _ struct{} `type:"structure"` + + // An email address to be removed from the list of verified addresses. + // + // EmailAddress is a required field + EmailAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVerifiedEmailAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVerifiedEmailAddressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVerifiedEmailAddressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVerifiedEmailAddressInput"} + if s.EmailAddress == nil { + invalidParams.Add(request.NewErrParamRequired("EmailAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *DeleteVerifiedEmailAddressInput) SetEmailAddress(v string) *DeleteVerifiedEmailAddressInput { + s.EmailAddress = &v + return s +} + +type DeleteVerifiedEmailAddressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVerifiedEmailAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVerifiedEmailAddressOutput) GoString() string { + return s.String() +} + +// Represents a request to return the metadata and receipt rules for the receipt +// rule set that is currently active. You use receipt rule sets to receive email +// with Amazon SES. For more information, see the Amazon SES Developer Guide +// (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type DescribeActiveReceiptRuleSetInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeActiveReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActiveReceiptRuleSetInput) GoString() string { + return s.String() +} + +// Represents the metadata and receipt rules for the receipt rule set that is +// currently active. +type DescribeActiveReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` + + // The metadata for the currently active receipt rule set. The metadata consists + // of the rule set name and a timestamp of when the rule set was created. + Metadata *ReceiptRuleSetMetadata `type:"structure"` + + // The receipt rules that belong to the active rule set. + Rules []*ReceiptRule `type:"list"` +} + +// String returns the string representation +func (s DescribeActiveReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActiveReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// SetMetadata sets the Metadata field's value. +func (s *DescribeActiveReceiptRuleSetOutput) SetMetadata(v *ReceiptRuleSetMetadata) *DescribeActiveReceiptRuleSetOutput { + s.Metadata = v + return s +} + +// SetRules sets the Rules field's value. +func (s *DescribeActiveReceiptRuleSetOutput) SetRules(v []*ReceiptRule) *DescribeActiveReceiptRuleSetOutput { + s.Rules = v + return s +} + +// Represents a request to return the details of a configuration set. Configuration +// sets enable you to publish email sending events. For information about using +// configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +type DescribeConfigurationSetInput struct { + _ struct{} `type:"structure"` + + // A list of configuration set attributes to return. + ConfigurationSetAttributeNames []*string `type:"list"` + + // The name of the configuration set to describe. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeConfigurationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeConfigurationSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeConfigurationSetInput"} + if s.ConfigurationSetName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetAttributeNames sets the ConfigurationSetAttributeNames field's value. +func (s *DescribeConfigurationSetInput) SetConfigurationSetAttributeNames(v []*string) *DescribeConfigurationSetInput { + s.ConfigurationSetAttributeNames = v + return s +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *DescribeConfigurationSetInput) SetConfigurationSetName(v string) *DescribeConfigurationSetInput { + s.ConfigurationSetName = &v + return s +} + +// Represents the details of a configuration set. Configuration sets enable +// you to publish email sending events. For information about using configuration +// sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +type DescribeConfigurationSetOutput struct { + _ struct{} `type:"structure"` + + // The configuration set object associated with the specified configuration + // set. + ConfigurationSet *ConfigurationSet `type:"structure"` + + // A list of event destinations associated with the configuration set. + EventDestinations []*EventDestination `type:"list"` + + // An object that represents the reputation settings for the configuration set. + ReputationOptions *ReputationOptions `type:"structure"` + + // The name of the custom open and click tracking domain associated with the + // configuration set. + TrackingOptions *TrackingOptions `type:"structure"` +} + +// String returns the string representation +func (s DescribeConfigurationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationSetOutput) GoString() string { + return s.String() +} + +// SetConfigurationSet sets the ConfigurationSet field's value. +func (s *DescribeConfigurationSetOutput) SetConfigurationSet(v *ConfigurationSet) *DescribeConfigurationSetOutput { + s.ConfigurationSet = v + return s +} + +// SetEventDestinations sets the EventDestinations field's value. +func (s *DescribeConfigurationSetOutput) SetEventDestinations(v []*EventDestination) *DescribeConfigurationSetOutput { + s.EventDestinations = v + return s +} + +// SetReputationOptions sets the ReputationOptions field's value. +func (s *DescribeConfigurationSetOutput) SetReputationOptions(v *ReputationOptions) *DescribeConfigurationSetOutput { + s.ReputationOptions = v + return s +} + +// SetTrackingOptions sets the TrackingOptions field's value. +func (s *DescribeConfigurationSetOutput) SetTrackingOptions(v *TrackingOptions) *DescribeConfigurationSetOutput { + s.TrackingOptions = v + return s +} + +// Represents a request to return the details of a receipt rule. You use receipt +// rules to receive email with Amazon SES. For more information, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type DescribeReceiptRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule. + // + // RuleName is a required field + RuleName *string `type:"string" required:"true"` + + // The name of the receipt rule set that the receipt rule belongs to. + // + // RuleSetName is a required field + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeReceiptRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReceiptRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeReceiptRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReceiptRuleInput"} + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRuleName sets the RuleName field's value. +func (s *DescribeReceiptRuleInput) SetRuleName(v string) *DescribeReceiptRuleInput { + s.RuleName = &v + return s +} + +// SetRuleSetName sets the RuleSetName field's value. +func (s *DescribeReceiptRuleInput) SetRuleSetName(v string) *DescribeReceiptRuleInput { + s.RuleSetName = &v + return s +} + +// Represents the details of a receipt rule. +type DescribeReceiptRuleOutput struct { + _ struct{} `type:"structure"` + + // A data structure that contains the specified receipt rule's name, actions, + // recipients, domains, enabled status, scan status, and Transport Layer Security + // (TLS) policy. + Rule *ReceiptRule `type:"structure"` +} + +// String returns the string representation +func (s DescribeReceiptRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReceiptRuleOutput) GoString() string { + return s.String() +} + +// SetRule sets the Rule field's value. +func (s *DescribeReceiptRuleOutput) SetRule(v *ReceiptRule) *DescribeReceiptRuleOutput { + s.Rule = v + return s +} + +// Represents a request to return the details of a receipt rule set. You use +// receipt rule sets to receive email with Amazon SES. For more information, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type DescribeReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule set to describe. + // + // RuleSetName is a required field + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReceiptRuleSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeReceiptRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReceiptRuleSetInput"} + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRuleSetName sets the RuleSetName field's value. +func (s *DescribeReceiptRuleSetInput) SetRuleSetName(v string) *DescribeReceiptRuleSetInput { + s.RuleSetName = &v + return s +} + +// Represents the details of the specified receipt rule set. +type DescribeReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` + + // The metadata for the receipt rule set, which consists of the rule set name + // and the timestamp of when the rule set was created. + Metadata *ReceiptRuleSetMetadata `type:"structure"` + + // A list of the receipt rules that belong to the specified receipt rule set. + Rules []*ReceiptRule `type:"list"` +} + +// String returns the string representation +func (s DescribeReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// SetMetadata sets the Metadata field's value. +func (s *DescribeReceiptRuleSetOutput) SetMetadata(v *ReceiptRuleSetMetadata) *DescribeReceiptRuleSetOutput { + s.Metadata = v + return s +} + +// SetRules sets the Rules field's value. +func (s *DescribeReceiptRuleSetOutput) SetRules(v []*ReceiptRule) *DescribeReceiptRuleSetOutput { + s.Rules = v + return s +} + +// Represents the destination of the message, consisting of To:, CC:, and BCC: +// fields. +// +// Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531 +// (https://tools.ietf.org/html/rfc6531). For this reason, the local part of +// a destination email address (the part of the email address that precedes +// the @ sign) may only contain 7-bit ASCII characters (https://en.wikipedia.org/wiki/Email_address#Local-part). +// If the domain part of an address (the part after the @ sign) contains non-ASCII +// characters, they must be encoded using Punycode, as described in RFC3492 +// (https://tools.ietf.org/html/rfc3492.html). +type Destination struct { + _ struct{} `type:"structure"` + + // The BCC: field(s) of the message. + BccAddresses []*string `type:"list"` + + // The CC: field(s) of the message. + CcAddresses []*string `type:"list"` + + // The To: field(s) of the message. + ToAddresses []*string `type:"list"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// SetBccAddresses sets the BccAddresses field's value. +func (s *Destination) SetBccAddresses(v []*string) *Destination { + s.BccAddresses = v + return s +} + +// SetCcAddresses sets the CcAddresses field's value. +func (s *Destination) SetCcAddresses(v []*string) *Destination { + s.CcAddresses = v + return s +} + +// SetToAddresses sets the ToAddresses field's value. +func (s *Destination) SetToAddresses(v []*string) *Destination { + s.ToAddresses = v + return s +} + +// Contains information about the event destination that the specified email +// sending events will be published to. +// +// When you create or update an event destination, you must provide one, and +// only one, destination. The destination can be Amazon CloudWatch, Amazon Kinesis +// Firehose or Amazon Simple Notification Service (Amazon SNS). +// +// Event destinations are associated with configuration sets, which enable you +// to publish email sending events to Amazon CloudWatch, Amazon Kinesis Firehose, +// or Amazon Simple Notification Service (Amazon SNS). For information about +// using configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +type EventDestination struct { + _ struct{} `type:"structure"` + + // An object that contains the names, default values, and sources of the dimensions + // associated with an Amazon CloudWatch event destination. + CloudWatchDestination *CloudWatchDestination `type:"structure"` + + // Sets whether Amazon SES publishes events to this destination when you send + // an email with the associated configuration set. Set to true to enable publishing + // to this destination; set to false to prevent publishing to this destination. + // The default value is false. + Enabled *bool `type:"boolean"` + + // An object that contains the delivery stream ARN and the IAM role ARN associated + // with an Amazon Kinesis Firehose event destination. + KinesisFirehoseDestination *KinesisFirehoseDestination `type:"structure"` + + // The type of email sending events to publish to the event destination. + // + // MatchingEventTypes is a required field + MatchingEventTypes []*string `type:"list" required:"true"` + + // The name of the event destination. The name must: + // + // * This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), + // underscores (_), or dashes (-). + // + // * Contain less than 64 characters. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // An object that contains the topic ARN associated with an Amazon Simple Notification + // Service (Amazon SNS) event destination. + SNSDestination *SNSDestination `type:"structure"` +} + +// String returns the string representation +func (s EventDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EventDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EventDestination"} + if s.MatchingEventTypes == nil { + invalidParams.Add(request.NewErrParamRequired("MatchingEventTypes")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.CloudWatchDestination != nil { + if err := s.CloudWatchDestination.Validate(); err != nil { + invalidParams.AddNested("CloudWatchDestination", err.(request.ErrInvalidParams)) + } + } + if s.KinesisFirehoseDestination != nil { + if err := s.KinesisFirehoseDestination.Validate(); err != nil { + invalidParams.AddNested("KinesisFirehoseDestination", err.(request.ErrInvalidParams)) + } + } + if s.SNSDestination != nil { + if err := s.SNSDestination.Validate(); err != nil { + invalidParams.AddNested("SNSDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCloudWatchDestination sets the CloudWatchDestination field's value. +func (s *EventDestination) SetCloudWatchDestination(v *CloudWatchDestination) *EventDestination { + s.CloudWatchDestination = v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *EventDestination) SetEnabled(v bool) *EventDestination { + s.Enabled = &v + return s +} + +// SetKinesisFirehoseDestination sets the KinesisFirehoseDestination field's value. +func (s *EventDestination) SetKinesisFirehoseDestination(v *KinesisFirehoseDestination) *EventDestination { + s.KinesisFirehoseDestination = v + return s +} + +// SetMatchingEventTypes sets the MatchingEventTypes field's value. +func (s *EventDestination) SetMatchingEventTypes(v []*string) *EventDestination { + s.MatchingEventTypes = v + return s +} + +// SetName sets the Name field's value. +func (s *EventDestination) SetName(v string) *EventDestination { + s.Name = &v + return s +} + +// SetSNSDestination sets the SNSDestination field's value. +func (s *EventDestination) SetSNSDestination(v *SNSDestination) *EventDestination { + s.SNSDestination = v + return s +} + +// Additional X-headers to include in the Delivery Status Notification (DSN) +// when an email that Amazon SES receives on your behalf bounces. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +type ExtensionField struct { + _ struct{} `type:"structure"` + + // The name of the header to add. Must be between 1 and 50 characters, inclusive, + // and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The value of the header to add. Must be less than 2048 characters, and must + // not contain newline characters ("\r" or "\n"). + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ExtensionField) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExtensionField) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExtensionField) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExtensionField"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *ExtensionField) SetName(v string) *ExtensionField { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *ExtensionField) SetValue(v string) *ExtensionField { + s.Value = &v + return s +} + +type GetAccountSendingEnabledInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAccountSendingEnabledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountSendingEnabledInput) GoString() string { + return s.String() +} + +// Represents a request to return the email sending status for your Amazon SES +// account in the current AWS Region. +type GetAccountSendingEnabledOutput struct { + _ struct{} `type:"structure"` + + // Describes whether email sending is enabled or disabled for your Amazon SES + // account in the current AWS Region. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetAccountSendingEnabledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountSendingEnabledOutput) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *GetAccountSendingEnabledOutput) SetEnabled(v bool) *GetAccountSendingEnabledOutput { + s.Enabled = &v + return s +} + +// Represents a request to retrieve an existing custom verification email template. +type GetCustomVerificationEmailTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the custom verification email template that you want to retrieve. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetCustomVerificationEmailTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCustomVerificationEmailTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCustomVerificationEmailTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCustomVerificationEmailTemplateInput"} + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTemplateName sets the TemplateName field's value. +func (s *GetCustomVerificationEmailTemplateInput) SetTemplateName(v string) *GetCustomVerificationEmailTemplateInput { + s.TemplateName = &v + return s +} + +// The content of the custom verification email template. +type GetCustomVerificationEmailTemplateOutput struct { + _ struct{} `type:"structure"` + + // The URL that the recipient of the verification email is sent to if his or + // her address is not successfully verified. + FailureRedirectionURL *string `type:"string"` + + // The email address that the custom verification email is sent from. + FromEmailAddress *string `type:"string"` + + // The URL that the recipient of the verification email is sent to if his or + // her address is successfully verified. + SuccessRedirectionURL *string `type:"string"` + + // The content of the custom verification email. + TemplateContent *string `type:"string"` + + // The name of the custom verification email template. + TemplateName *string `type:"string"` + + // The subject line of the custom verification email. + TemplateSubject *string `type:"string"` +} + +// String returns the string representation +func (s GetCustomVerificationEmailTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCustomVerificationEmailTemplateOutput) GoString() string { + return s.String() +} + +// SetFailureRedirectionURL sets the FailureRedirectionURL field's value. +func (s *GetCustomVerificationEmailTemplateOutput) SetFailureRedirectionURL(v string) *GetCustomVerificationEmailTemplateOutput { + s.FailureRedirectionURL = &v + return s +} + +// SetFromEmailAddress sets the FromEmailAddress field's value. +func (s *GetCustomVerificationEmailTemplateOutput) SetFromEmailAddress(v string) *GetCustomVerificationEmailTemplateOutput { + s.FromEmailAddress = &v + return s +} + +// SetSuccessRedirectionURL sets the SuccessRedirectionURL field's value. +func (s *GetCustomVerificationEmailTemplateOutput) SetSuccessRedirectionURL(v string) *GetCustomVerificationEmailTemplateOutput { + s.SuccessRedirectionURL = &v + return s +} + +// SetTemplateContent sets the TemplateContent field's value. +func (s *GetCustomVerificationEmailTemplateOutput) SetTemplateContent(v string) *GetCustomVerificationEmailTemplateOutput { + s.TemplateContent = &v + return s +} + +// SetTemplateName sets the TemplateName field's value. +func (s *GetCustomVerificationEmailTemplateOutput) SetTemplateName(v string) *GetCustomVerificationEmailTemplateOutput { + s.TemplateName = &v + return s +} + +// SetTemplateSubject sets the TemplateSubject field's value. +func (s *GetCustomVerificationEmailTemplateOutput) SetTemplateSubject(v string) *GetCustomVerificationEmailTemplateOutput { + s.TemplateSubject = &v + return s +} + +// Represents a request for the status of Amazon SES Easy DKIM signing for an +// identity. For domain identities, this request also returns the DKIM tokens +// that are required for Easy DKIM signing, and whether Amazon SES successfully +// verified that these tokens were published. For more information about Easy +// DKIM, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). +type GetIdentityDkimAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of one or more verified identities - email addresses, domains, or + // both. + // + // Identities is a required field + Identities []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityDkimAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityDkimAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityDkimAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityDkimAttributesInput"} + if s.Identities == nil { + invalidParams.Add(request.NewErrParamRequired("Identities")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentities sets the Identities field's value. +func (s *GetIdentityDkimAttributesInput) SetIdentities(v []*string) *GetIdentityDkimAttributesInput { + s.Identities = v + return s +} + +// Represents the status of Amazon SES Easy DKIM signing for an identity. For +// domain identities, this response also contains the DKIM tokens that are required +// for Easy DKIM signing, and whether Amazon SES successfully verified that +// these tokens were published. +type GetIdentityDkimAttributesOutput struct { + _ struct{} `type:"structure"` + + // The DKIM attributes for an email address or a domain. + // + // DkimAttributes is a required field + DkimAttributes map[string]*IdentityDkimAttributes `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityDkimAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityDkimAttributesOutput) GoString() string { + return s.String() +} + +// SetDkimAttributes sets the DkimAttributes field's value. +func (s *GetIdentityDkimAttributesOutput) SetDkimAttributes(v map[string]*IdentityDkimAttributes) *GetIdentityDkimAttributesOutput { + s.DkimAttributes = v + return s +} + +// Represents a request to return the Amazon SES custom MAIL FROM attributes +// for a list of identities. For information about using a custom MAIL FROM +// domain, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html). +type GetIdentityMailFromDomainAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of one or more identities. + // + // Identities is a required field + Identities []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityMailFromDomainAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityMailFromDomainAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityMailFromDomainAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityMailFromDomainAttributesInput"} + if s.Identities == nil { + invalidParams.Add(request.NewErrParamRequired("Identities")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentities sets the Identities field's value. +func (s *GetIdentityMailFromDomainAttributesInput) SetIdentities(v []*string) *GetIdentityMailFromDomainAttributesInput { + s.Identities = v + return s +} + +// Represents the custom MAIL FROM attributes for a list of identities. +type GetIdentityMailFromDomainAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of identities to custom MAIL FROM attributes. + // + // MailFromDomainAttributes is a required field + MailFromDomainAttributes map[string]*IdentityMailFromDomainAttributes `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityMailFromDomainAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityMailFromDomainAttributesOutput) GoString() string { + return s.String() +} + +// SetMailFromDomainAttributes sets the MailFromDomainAttributes field's value. +func (s *GetIdentityMailFromDomainAttributesOutput) SetMailFromDomainAttributes(v map[string]*IdentityMailFromDomainAttributes) *GetIdentityMailFromDomainAttributesOutput { + s.MailFromDomainAttributes = v + return s +} + +// Represents a request to return the notification attributes for a list of +// identities you verified with Amazon SES. For information about Amazon SES +// notifications, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +type GetIdentityNotificationAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of one or more identities. You can specify an identity by using its + // name or by using its Amazon Resource Name (ARN). Examples: user@example.com, + // example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // Identities is a required field + Identities []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityNotificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityNotificationAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityNotificationAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityNotificationAttributesInput"} + if s.Identities == nil { + invalidParams.Add(request.NewErrParamRequired("Identities")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentities sets the Identities field's value. +func (s *GetIdentityNotificationAttributesInput) SetIdentities(v []*string) *GetIdentityNotificationAttributesInput { + s.Identities = v + return s +} + +// Represents the notification attributes for a list of identities. +type GetIdentityNotificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of Identity to IdentityNotificationAttributes. + // + // NotificationAttributes is a required field + NotificationAttributes map[string]*IdentityNotificationAttributes `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityNotificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityNotificationAttributesOutput) GoString() string { + return s.String() +} + +// SetNotificationAttributes sets the NotificationAttributes field's value. +func (s *GetIdentityNotificationAttributesOutput) SetNotificationAttributes(v map[string]*IdentityNotificationAttributes) *GetIdentityNotificationAttributesOutput { + s.NotificationAttributes = v + return s +} + +// Represents a request to return the requested sending authorization policies +// for an identity. Sending authorization is an Amazon SES feature that enables +// you to authorize other senders to use your identities. For information, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +type GetIdentityPoliciesInput struct { + _ struct{} `type:"structure"` + + // The identity for which the policies will be retrieved. You can specify an + // identity by using its name or by using its Amazon Resource Name (ARN). Examples: + // user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // To successfully call this API, you must own the identity. + // + // Identity is a required field + Identity *string `type:"string" required:"true"` + + // A list of the names of policies to be retrieved. You can retrieve a maximum + // of 20 policies at a time. If you do not know the names of the policies that + // are attached to the identity, you can use ListIdentityPolicies. + // + // PolicyNames is a required field + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityPoliciesInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + if s.PolicyNames == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentity sets the Identity field's value. +func (s *GetIdentityPoliciesInput) SetIdentity(v string) *GetIdentityPoliciesInput { + s.Identity = &v + return s +} + +// SetPolicyNames sets the PolicyNames field's value. +func (s *GetIdentityPoliciesInput) SetPolicyNames(v []*string) *GetIdentityPoliciesInput { + s.PolicyNames = v + return s +} + +// Represents the requested sending authorization policies. +type GetIdentityPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A map of policy names to policies. + // + // Policies is a required field + Policies map[string]*string `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoliciesOutput) GoString() string { + return s.String() +} + +// SetPolicies sets the Policies field's value. +func (s *GetIdentityPoliciesOutput) SetPolicies(v map[string]*string) *GetIdentityPoliciesOutput { + s.Policies = v + return s +} + +// Represents a request to return the Amazon SES verification status of a list +// of identities. For domain identities, this request also returns the verification +// token. For information about verifying identities with Amazon SES, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). +type GetIdentityVerificationAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of identities. + // + // Identities is a required field + Identities []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityVerificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityVerificationAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityVerificationAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityVerificationAttributesInput"} + if s.Identities == nil { + invalidParams.Add(request.NewErrParamRequired("Identities")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentities sets the Identities field's value. +func (s *GetIdentityVerificationAttributesInput) SetIdentities(v []*string) *GetIdentityVerificationAttributesInput { + s.Identities = v + return s +} + +// The Amazon SES verification status of a list of identities. For domain identities, +// this response also contains the verification token. +type GetIdentityVerificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of Identities to IdentityVerificationAttributes objects. + // + // VerificationAttributes is a required field + VerificationAttributes map[string]*IdentityVerificationAttributes `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityVerificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityVerificationAttributesOutput) GoString() string { + return s.String() +} + +// SetVerificationAttributes sets the VerificationAttributes field's value. +func (s *GetIdentityVerificationAttributesOutput) SetVerificationAttributes(v map[string]*IdentityVerificationAttributes) *GetIdentityVerificationAttributesOutput { + s.VerificationAttributes = v + return s +} + +type GetSendQuotaInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetSendQuotaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSendQuotaInput) GoString() string { + return s.String() +} + +// Represents your Amazon SES daily sending quota, maximum send rate, and the +// number of emails you have sent in the last 24 hours. +type GetSendQuotaOutput struct { + _ struct{} `type:"structure"` + + // The maximum number of emails the user is allowed to send in a 24-hour interval. + // A value of -1 signifies an unlimited quota. + Max24HourSend *float64 `type:"double"` + + // The maximum number of emails that Amazon SES can accept from the user's account + // per second. + // + // The rate at which Amazon SES accepts the user's messages might be less than + // the maximum send rate. + MaxSendRate *float64 `type:"double"` + + // The number of emails sent during the previous 24 hours. + SentLast24Hours *float64 `type:"double"` +} + +// String returns the string representation +func (s GetSendQuotaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSendQuotaOutput) GoString() string { + return s.String() +} + +// SetMax24HourSend sets the Max24HourSend field's value. +func (s *GetSendQuotaOutput) SetMax24HourSend(v float64) *GetSendQuotaOutput { + s.Max24HourSend = &v + return s +} + +// SetMaxSendRate sets the MaxSendRate field's value. +func (s *GetSendQuotaOutput) SetMaxSendRate(v float64) *GetSendQuotaOutput { + s.MaxSendRate = &v + return s +} + +// SetSentLast24Hours sets the SentLast24Hours field's value. +func (s *GetSendQuotaOutput) SetSentLast24Hours(v float64) *GetSendQuotaOutput { + s.SentLast24Hours = &v + return s +} + +type GetSendStatisticsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetSendStatisticsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSendStatisticsInput) GoString() string { + return s.String() +} + +// Represents a list of data points. This list contains aggregated data from +// the previous two weeks of your sending activity with Amazon SES. +type GetSendStatisticsOutput struct { + _ struct{} `type:"structure"` + + // A list of data points, each of which represents 15 minutes of activity. + SendDataPoints []*SendDataPoint `type:"list"` +} + +// String returns the string representation +func (s GetSendStatisticsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSendStatisticsOutput) GoString() string { + return s.String() +} + +// SetSendDataPoints sets the SendDataPoints field's value. +func (s *GetSendStatisticsOutput) SetSendDataPoints(v []*SendDataPoint) *GetSendStatisticsOutput { + s.SendDataPoints = v + return s +} + +type GetTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the template you want to retrieve. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTemplateInput"} + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTemplateName sets the TemplateName field's value. +func (s *GetTemplateInput) SetTemplateName(v string) *GetTemplateInput { + s.TemplateName = &v + return s +} + +type GetTemplateOutput struct { + _ struct{} `type:"structure"` + + // The content of the email, composed of a subject line, an HTML part, and a + // text-only part. + Template *Template `type:"structure"` +} + +// String returns the string representation +func (s GetTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTemplateOutput) GoString() string { + return s.String() +} + +// SetTemplate sets the Template field's value. +func (s *GetTemplateOutput) SetTemplate(v *Template) *GetTemplateOutput { + s.Template = v + return s +} + +// Represents the DKIM attributes of a verified email address or a domain. +type IdentityDkimAttributes struct { + _ struct{} `type:"structure"` + + // True if DKIM signing is enabled for email sent from the identity; false otherwise. + // The default value is true. + // + // DkimEnabled is a required field + DkimEnabled *bool `type:"boolean" required:"true"` + + // A set of character strings that represent the domain's identity. Using these + // tokens, you will need to create DNS CNAME records that point to DKIM public + // keys hosted by Amazon SES. Amazon Web Services will eventually detect that + // you have updated your DNS records; this detection process may take up to + // 72 hours. Upon successful detection, Amazon SES will be able to DKIM-sign + // email originating from that domain. (This only applies to domain identities, + // not email address identities.) + // + // For more information about creating DNS records using DKIM tokens, go to + // the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). + DkimTokens []*string `type:"list"` + + // Describes whether Amazon SES has successfully verified the DKIM DNS records + // (tokens) published in the domain name's DNS. (This only applies to domain + // identities, not email address identities.) + // + // DkimVerificationStatus is a required field + DkimVerificationStatus *string `type:"string" required:"true" enum:"VerificationStatus"` +} + +// String returns the string representation +func (s IdentityDkimAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityDkimAttributes) GoString() string { + return s.String() +} + +// SetDkimEnabled sets the DkimEnabled field's value. +func (s *IdentityDkimAttributes) SetDkimEnabled(v bool) *IdentityDkimAttributes { + s.DkimEnabled = &v + return s +} + +// SetDkimTokens sets the DkimTokens field's value. +func (s *IdentityDkimAttributes) SetDkimTokens(v []*string) *IdentityDkimAttributes { + s.DkimTokens = v + return s +} + +// SetDkimVerificationStatus sets the DkimVerificationStatus field's value. +func (s *IdentityDkimAttributes) SetDkimVerificationStatus(v string) *IdentityDkimAttributes { + s.DkimVerificationStatus = &v + return s +} + +// Represents the custom MAIL FROM domain attributes of a verified identity +// (email address or domain). +type IdentityMailFromDomainAttributes struct { + _ struct{} `type:"structure"` + + // The action that Amazon SES takes if it cannot successfully read the required + // MX record when you send an email. A value of UseDefaultValue indicates that + // if Amazon SES cannot read the required MX record, it uses amazonses.com (or + // a subdomain of that) as the MAIL FROM domain. A value of RejectMessage indicates + // that if Amazon SES cannot read the required MX record, Amazon SES returns + // a MailFromDomainNotVerified error and does not send the email. + // + // The custom MAIL FROM setup states that result in this behavior are Pending, + // Failed, and TemporaryFailure. + // + // BehaviorOnMXFailure is a required field + BehaviorOnMXFailure *string `type:"string" required:"true" enum:"BehaviorOnMXFailure"` + + // The custom MAIL FROM domain that the identity is configured to use. + // + // MailFromDomain is a required field + MailFromDomain *string `type:"string" required:"true"` + + // The state that indicates whether Amazon SES has successfully read the MX + // record required for custom MAIL FROM domain setup. If the state is Success, + // Amazon SES uses the specified custom MAIL FROM domain when the verified identity + // sends an email. All other states indicate that Amazon SES takes the action + // described by BehaviorOnMXFailure. + // + // MailFromDomainStatus is a required field + MailFromDomainStatus *string `type:"string" required:"true" enum:"CustomMailFromStatus"` +} + +// String returns the string representation +func (s IdentityMailFromDomainAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityMailFromDomainAttributes) GoString() string { + return s.String() +} + +// SetBehaviorOnMXFailure sets the BehaviorOnMXFailure field's value. +func (s *IdentityMailFromDomainAttributes) SetBehaviorOnMXFailure(v string) *IdentityMailFromDomainAttributes { + s.BehaviorOnMXFailure = &v + return s +} + +// SetMailFromDomain sets the MailFromDomain field's value. +func (s *IdentityMailFromDomainAttributes) SetMailFromDomain(v string) *IdentityMailFromDomainAttributes { + s.MailFromDomain = &v + return s +} + +// SetMailFromDomainStatus sets the MailFromDomainStatus field's value. +func (s *IdentityMailFromDomainAttributes) SetMailFromDomainStatus(v string) *IdentityMailFromDomainAttributes { + s.MailFromDomainStatus = &v + return s +} + +// Represents the notification attributes of an identity, including whether +// an identity has Amazon Simple Notification Service (Amazon SNS) topics set +// for bounce, complaint, and/or delivery notifications, and whether feedback +// forwarding is enabled for bounce and complaint notifications. +type IdentityNotificationAttributes struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will + // publish bounce notifications. + // + // BounceTopic is a required field + BounceTopic *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will + // publish complaint notifications. + // + // ComplaintTopic is a required field + ComplaintTopic *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will + // publish delivery notifications. + // + // DeliveryTopic is a required field + DeliveryTopic *string `type:"string" required:"true"` + + // Describes whether Amazon SES will forward bounce and complaint notifications + // as email. true indicates that Amazon SES will forward bounce and complaint + // notifications as email, while false indicates that bounce and complaint notifications + // will be published only to the specified bounce and complaint Amazon SNS topics. + // + // ForwardingEnabled is a required field + ForwardingEnabled *bool `type:"boolean" required:"true"` + + // Describes whether Amazon SES includes the original email headers in Amazon + // SNS notifications of type Bounce. A value of true specifies that Amazon SES + // will include headers in bounce notifications, and a value of false specifies + // that Amazon SES will not include headers in bounce notifications. + HeadersInBounceNotificationsEnabled *bool `type:"boolean"` + + // Describes whether Amazon SES includes the original email headers in Amazon + // SNS notifications of type Complaint. A value of true specifies that Amazon + // SES will include headers in complaint notifications, and a value of false + // specifies that Amazon SES will not include headers in complaint notifications. + HeadersInComplaintNotificationsEnabled *bool `type:"boolean"` + + // Describes whether Amazon SES includes the original email headers in Amazon + // SNS notifications of type Delivery. A value of true specifies that Amazon + // SES will include headers in delivery notifications, and a value of false + // specifies that Amazon SES will not include headers in delivery notifications. + HeadersInDeliveryNotificationsEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s IdentityNotificationAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityNotificationAttributes) GoString() string { + return s.String() +} + +// SetBounceTopic sets the BounceTopic field's value. +func (s *IdentityNotificationAttributes) SetBounceTopic(v string) *IdentityNotificationAttributes { + s.BounceTopic = &v + return s +} + +// SetComplaintTopic sets the ComplaintTopic field's value. +func (s *IdentityNotificationAttributes) SetComplaintTopic(v string) *IdentityNotificationAttributes { + s.ComplaintTopic = &v + return s +} + +// SetDeliveryTopic sets the DeliveryTopic field's value. +func (s *IdentityNotificationAttributes) SetDeliveryTopic(v string) *IdentityNotificationAttributes { + s.DeliveryTopic = &v + return s +} + +// SetForwardingEnabled sets the ForwardingEnabled field's value. +func (s *IdentityNotificationAttributes) SetForwardingEnabled(v bool) *IdentityNotificationAttributes { + s.ForwardingEnabled = &v + return s +} + +// SetHeadersInBounceNotificationsEnabled sets the HeadersInBounceNotificationsEnabled field's value. +func (s *IdentityNotificationAttributes) SetHeadersInBounceNotificationsEnabled(v bool) *IdentityNotificationAttributes { + s.HeadersInBounceNotificationsEnabled = &v + return s +} + +// SetHeadersInComplaintNotificationsEnabled sets the HeadersInComplaintNotificationsEnabled field's value. +func (s *IdentityNotificationAttributes) SetHeadersInComplaintNotificationsEnabled(v bool) *IdentityNotificationAttributes { + s.HeadersInComplaintNotificationsEnabled = &v + return s +} + +// SetHeadersInDeliveryNotificationsEnabled sets the HeadersInDeliveryNotificationsEnabled field's value. +func (s *IdentityNotificationAttributes) SetHeadersInDeliveryNotificationsEnabled(v bool) *IdentityNotificationAttributes { + s.HeadersInDeliveryNotificationsEnabled = &v + return s +} + +// Represents the verification attributes of a single identity. +type IdentityVerificationAttributes struct { + _ struct{} `type:"structure"` + + // The verification status of the identity: "Pending", "Success", "Failed", + // or "TemporaryFailure". + // + // VerificationStatus is a required field + VerificationStatus *string `type:"string" required:"true" enum:"VerificationStatus"` + + // The verification token for a domain identity. Null for email address identities. + VerificationToken *string `type:"string"` +} + +// String returns the string representation +func (s IdentityVerificationAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityVerificationAttributes) GoString() string { + return s.String() +} + +// SetVerificationStatus sets the VerificationStatus field's value. +func (s *IdentityVerificationAttributes) SetVerificationStatus(v string) *IdentityVerificationAttributes { + s.VerificationStatus = &v + return s +} + +// SetVerificationToken sets the VerificationToken field's value. +func (s *IdentityVerificationAttributes) SetVerificationToken(v string) *IdentityVerificationAttributes { + s.VerificationToken = &v + return s +} + +// Contains the delivery stream ARN and the IAM role ARN associated with an +// Amazon Kinesis Firehose event destination. +// +// Event destinations, such as Amazon Kinesis Firehose, are associated with +// configuration sets, which enable you to publish email sending events. For +// information about using configuration sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +type KinesisFirehoseDestination struct { + _ struct{} `type:"structure"` + + // The ARN of the Amazon Kinesis Firehose stream that email sending events should + // be published to. + // + // DeliveryStreamARN is a required field + DeliveryStreamARN *string `type:"string" required:"true"` + + // The ARN of the IAM role under which Amazon SES publishes email sending events + // to the Amazon Kinesis Firehose stream. + // + // IAMRoleARN is a required field + IAMRoleARN *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s KinesisFirehoseDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KinesisFirehoseDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *KinesisFirehoseDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KinesisFirehoseDestination"} + if s.DeliveryStreamARN == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryStreamARN")) + } + if s.IAMRoleARN == nil { + invalidParams.Add(request.NewErrParamRequired("IAMRoleARN")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeliveryStreamARN sets the DeliveryStreamARN field's value. +func (s *KinesisFirehoseDestination) SetDeliveryStreamARN(v string) *KinesisFirehoseDestination { + s.DeliveryStreamARN = &v + return s +} + +// SetIAMRoleARN sets the IAMRoleARN field's value. +func (s *KinesisFirehoseDestination) SetIAMRoleARN(v string) *KinesisFirehoseDestination { + s.IAMRoleARN = &v + return s +} + +// When included in a receipt rule, this action calls an AWS Lambda function +// and, optionally, publishes a notification to Amazon Simple Notification Service +// (Amazon SNS). +// +// To enable Amazon SES to call your AWS Lambda function or to publish to an +// Amazon SNS topic of another account, Amazon SES must have permission to access +// those resources. For information about giving permissions, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// For information about using AWS Lambda actions in receipt rules, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-lambda.html). +type LambdaAction struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AWS Lambda function. An example of + // an AWS Lambda function ARN is arn:aws:lambda:us-west-2:account-id:function:MyFunction. + // For more information about AWS Lambda, see the AWS Lambda Developer Guide + // (http://docs.aws.amazon.com/lambda/latest/dg/welcome.html). + // + // FunctionArn is a required field + FunctionArn *string `type:"string" required:"true"` + + // The invocation type of the AWS Lambda function. An invocation type of RequestResponse + // means that the execution of the function will immediately result in a response, + // and a value of Event means that the function will be invoked asynchronously. + // The default value is Event. For information about AWS Lambda invocation types, + // see the AWS Lambda Developer Guide (http://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html). + // + // There is a 30-second timeout on RequestResponse invocations. You should use + // Event invocation in most cases. Use RequestResponse only when you want to + // make a mail flow decision, such as whether to stop the receipt rule or the + // receipt rule set. + InvocationType *string `type:"string" enum:"InvocationType"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the + // Lambda action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s LambdaAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaAction"} + if s.FunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFunctionArn sets the FunctionArn field's value. +func (s *LambdaAction) SetFunctionArn(v string) *LambdaAction { + s.FunctionArn = &v + return s +} + +// SetInvocationType sets the InvocationType field's value. +func (s *LambdaAction) SetInvocationType(v string) *LambdaAction { + s.InvocationType = &v + return s +} + +// SetTopicArn sets the TopicArn field's value. +func (s *LambdaAction) SetTopicArn(v string) *LambdaAction { + s.TopicArn = &v + return s +} + +// Represents a request to list the configuration sets associated with your +// AWS account. Configuration sets enable you to publish email sending events. +// For information about using configuration sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +type ListConfigurationSetsInput struct { + _ struct{} `type:"structure"` + + // The number of configuration sets to return. + MaxItems *int64 `type:"integer"` + + // A token returned from a previous call to ListConfigurationSets to indicate + // the position of the configuration set in the configuration set list. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListConfigurationSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListConfigurationSetsInput) GoString() string { + return s.String() +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListConfigurationSetsInput) SetMaxItems(v int64) *ListConfigurationSetsInput { + s.MaxItems = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListConfigurationSetsInput) SetNextToken(v string) *ListConfigurationSetsInput { + s.NextToken = &v + return s +} + +// A list of configuration sets associated with your AWS account. Configuration +// sets enable you to publish email sending events. For information about using +// configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +type ListConfigurationSetsOutput struct { + _ struct{} `type:"structure"` + + // A list of configuration sets. + ConfigurationSets []*ConfigurationSet `type:"list"` + + // A token indicating that there are additional configuration sets available + // to be listed. Pass this token to successive calls of ListConfigurationSets. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListConfigurationSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListConfigurationSetsOutput) GoString() string { + return s.String() +} + +// SetConfigurationSets sets the ConfigurationSets field's value. +func (s *ListConfigurationSetsOutput) SetConfigurationSets(v []*ConfigurationSet) *ListConfigurationSetsOutput { + s.ConfigurationSets = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListConfigurationSetsOutput) SetNextToken(v string) *ListConfigurationSetsOutput { + s.NextToken = &v + return s +} + +// Represents a request to list the existing custom verification email templates +// for your account. +// +// For more information about custom verification email templates, see Using +// Custom Verification Email Templates (ses/latest/DeveloperGuide/custom-verification-emails.html) +// in the Amazon SES Developer Guide. +type ListCustomVerificationEmailTemplatesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of custom verification email templates to return. This + // value must be at least 1 and less than or equal to 50. If you do not specify + // a value, or if you specify a value less than 1 or greater than 50, the operation + // will return up to 50 results. + MaxResults *int64 `min:"1" type:"integer"` + + // An array the contains the name and creation time stamp for each template + // in your Amazon SES account. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListCustomVerificationEmailTemplatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCustomVerificationEmailTemplatesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListCustomVerificationEmailTemplatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListCustomVerificationEmailTemplatesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListCustomVerificationEmailTemplatesInput) SetMaxResults(v int64) *ListCustomVerificationEmailTemplatesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListCustomVerificationEmailTemplatesInput) SetNextToken(v string) *ListCustomVerificationEmailTemplatesInput { + s.NextToken = &v + return s +} + +// A paginated list of custom verification email templates. +type ListCustomVerificationEmailTemplatesOutput struct { + _ struct{} `type:"structure"` + + // A list of the custom verification email templates that exist in your account. + CustomVerificationEmailTemplates []*CustomVerificationEmailTemplate `type:"list"` + + // A token indicating that there are additional custom verification email templates + // available to be listed. Pass this token to a subsequent call to ListTemplates + // to retrieve the next 50 custom verification email templates. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListCustomVerificationEmailTemplatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCustomVerificationEmailTemplatesOutput) GoString() string { + return s.String() +} + +// SetCustomVerificationEmailTemplates sets the CustomVerificationEmailTemplates field's value. +func (s *ListCustomVerificationEmailTemplatesOutput) SetCustomVerificationEmailTemplates(v []*CustomVerificationEmailTemplate) *ListCustomVerificationEmailTemplatesOutput { + s.CustomVerificationEmailTemplates = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListCustomVerificationEmailTemplatesOutput) SetNextToken(v string) *ListCustomVerificationEmailTemplatesOutput { + s.NextToken = &v + return s +} + +// Represents a request to return a list of all identities (email addresses +// and domains) that you have attempted to verify under your AWS account, regardless +// of verification status. +type ListIdentitiesInput struct { + _ struct{} `type:"structure"` + + // The type of the identities to list. Possible values are "EmailAddress" and + // "Domain". If this parameter is omitted, then all identities will be listed. + IdentityType *string `type:"string" enum:"IdentityType"` + + // The maximum number of identities per page. Possible values are 1-1000 inclusive. + MaxItems *int64 `type:"integer"` + + // The token to use for pagination. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListIdentitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentitiesInput) GoString() string { + return s.String() +} + +// SetIdentityType sets the IdentityType field's value. +func (s *ListIdentitiesInput) SetIdentityType(v string) *ListIdentitiesInput { + s.IdentityType = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListIdentitiesInput) SetMaxItems(v int64) *ListIdentitiesInput { + s.MaxItems = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListIdentitiesInput) SetNextToken(v string) *ListIdentitiesInput { + s.NextToken = &v + return s +} + +// A list of all identities that you have attempted to verify under your AWS +// account, regardless of verification status. +type ListIdentitiesOutput struct { + _ struct{} `type:"structure"` + + // A list of identities. + // + // Identities is a required field + Identities []*string `type:"list" required:"true"` + + // The token used for pagination. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListIdentitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentitiesOutput) GoString() string { + return s.String() +} + +// SetIdentities sets the Identities field's value. +func (s *ListIdentitiesOutput) SetIdentities(v []*string) *ListIdentitiesOutput { + s.Identities = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListIdentitiesOutput) SetNextToken(v string) *ListIdentitiesOutput { + s.NextToken = &v + return s +} + +// Represents a request to return a list of sending authorization policies that +// are attached to an identity. Sending authorization is an Amazon SES feature +// that enables you to authorize other senders to use your identities. For information, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +type ListIdentityPoliciesInput struct { + _ struct{} `type:"structure"` + + // The identity that is associated with the policy for which the policies will + // be listed. You can specify an identity by using its name or by using its + // Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // To successfully call this API, you must own the identity. + // + // Identity is a required field + Identity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListIdentityPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListIdentityPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListIdentityPoliciesInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentity sets the Identity field's value. +func (s *ListIdentityPoliciesInput) SetIdentity(v string) *ListIdentityPoliciesInput { + s.Identity = &v + return s +} + +// A list of names of sending authorization policies that apply to an identity. +type ListIdentityPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of names of policies that apply to the specified identity. + // + // PolicyNames is a required field + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListIdentityPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoliciesOutput) GoString() string { + return s.String() +} + +// SetPolicyNames sets the PolicyNames field's value. +func (s *ListIdentityPoliciesOutput) SetPolicyNames(v []*string) *ListIdentityPoliciesOutput { + s.PolicyNames = v + return s +} + +// Represents a request to list the IP address filters that exist under your +// AWS account. You use IP address filters when you receive email with Amazon +// SES. For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type ListReceiptFiltersInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListReceiptFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReceiptFiltersInput) GoString() string { + return s.String() +} + +// A list of IP address filters that exist under your AWS account. +type ListReceiptFiltersOutput struct { + _ struct{} `type:"structure"` + + // A list of IP address filter data structures, which each consist of a name, + // an IP address range, and whether to allow or block mail from it. + Filters []*ReceiptFilter `type:"list"` +} + +// String returns the string representation +func (s ListReceiptFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReceiptFiltersOutput) GoString() string { + return s.String() +} + +// SetFilters sets the Filters field's value. +func (s *ListReceiptFiltersOutput) SetFilters(v []*ReceiptFilter) *ListReceiptFiltersOutput { + s.Filters = v + return s +} + +// Represents a request to list the receipt rule sets that exist under your +// AWS account. You use receipt rule sets to receive email with Amazon SES. +// For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type ListReceiptRuleSetsInput struct { + _ struct{} `type:"structure"` + + // A token returned from a previous call to ListReceiptRuleSets to indicate + // the position in the receipt rule set list. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListReceiptRuleSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReceiptRuleSetsInput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListReceiptRuleSetsInput) SetNextToken(v string) *ListReceiptRuleSetsInput { + s.NextToken = &v + return s +} + +// A list of receipt rule sets that exist under your AWS account. +type ListReceiptRuleSetsOutput struct { + _ struct{} `type:"structure"` + + // A token indicating that there are additional receipt rule sets available + // to be listed. Pass this token to successive calls of ListReceiptRuleSets + // to retrieve up to 100 receipt rule sets at a time. + NextToken *string `type:"string"` + + // The metadata for the currently active receipt rule set. The metadata consists + // of the rule set name and the timestamp of when the rule set was created. + RuleSets []*ReceiptRuleSetMetadata `type:"list"` +} + +// String returns the string representation +func (s ListReceiptRuleSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReceiptRuleSetsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListReceiptRuleSetsOutput) SetNextToken(v string) *ListReceiptRuleSetsOutput { + s.NextToken = &v + return s +} + +// SetRuleSets sets the RuleSets field's value. +func (s *ListReceiptRuleSetsOutput) SetRuleSets(v []*ReceiptRuleSetMetadata) *ListReceiptRuleSetsOutput { + s.RuleSets = v + return s +} + +type ListTemplatesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of templates to return. This value must be at least 1 + // and less than or equal to 10. If you do not specify a value, or if you specify + // a value less than 1 or greater than 10, the operation will return up to 10 + // results. + MaxItems *int64 `type:"integer"` + + // A token returned from a previous call to ListTemplates to indicate the position + // in the list of email templates. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListTemplatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTemplatesInput) GoString() string { + return s.String() +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListTemplatesInput) SetMaxItems(v int64) *ListTemplatesInput { + s.MaxItems = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTemplatesInput) SetNextToken(v string) *ListTemplatesInput { + s.NextToken = &v + return s +} + +type ListTemplatesOutput struct { + _ struct{} `type:"structure"` + + // A token indicating that there are additional email templates available to + // be listed. Pass this token to a subsequent call to ListTemplates to retrieve + // the next 50 email templates. + NextToken *string `type:"string"` + + // An array the contains the name and creation time stamp for each template + // in your Amazon SES account. + TemplatesMetadata []*TemplateMetadata `type:"list"` +} + +// String returns the string representation +func (s ListTemplatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTemplatesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTemplatesOutput) SetNextToken(v string) *ListTemplatesOutput { + s.NextToken = &v + return s +} + +// SetTemplatesMetadata sets the TemplatesMetadata field's value. +func (s *ListTemplatesOutput) SetTemplatesMetadata(v []*TemplateMetadata) *ListTemplatesOutput { + s.TemplatesMetadata = v + return s +} + +type ListVerifiedEmailAddressesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListVerifiedEmailAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVerifiedEmailAddressesInput) GoString() string { + return s.String() +} + +// A list of email addresses that you have verified with Amazon SES under your +// AWS account. +type ListVerifiedEmailAddressesOutput struct { + _ struct{} `type:"structure"` + + // A list of email addresses that have been verified. + VerifiedEmailAddresses []*string `type:"list"` +} + +// String returns the string representation +func (s ListVerifiedEmailAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVerifiedEmailAddressesOutput) GoString() string { + return s.String() +} + +// SetVerifiedEmailAddresses sets the VerifiedEmailAddresses field's value. +func (s *ListVerifiedEmailAddressesOutput) SetVerifiedEmailAddresses(v []*string) *ListVerifiedEmailAddressesOutput { + s.VerifiedEmailAddresses = v + return s +} + +// Represents the message to be sent, composed of a subject and a body. +type Message struct { + _ struct{} `type:"structure"` + + // The message body. + // + // Body is a required field + Body *Body `type:"structure" required:"true"` + + // The subject of the message: A short summary of the content, which will appear + // in the recipient's inbox. + // + // Subject is a required field + Subject *Content `type:"structure" required:"true"` +} + +// String returns the string representation +func (s Message) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Message) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Message) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Message"} + if s.Body == nil { + invalidParams.Add(request.NewErrParamRequired("Body")) + } + if s.Subject == nil { + invalidParams.Add(request.NewErrParamRequired("Subject")) + } + if s.Body != nil { + if err := s.Body.Validate(); err != nil { + invalidParams.AddNested("Body", err.(request.ErrInvalidParams)) + } + } + if s.Subject != nil { + if err := s.Subject.Validate(); err != nil { + invalidParams.AddNested("Subject", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBody sets the Body field's value. +func (s *Message) SetBody(v *Body) *Message { + s.Body = v + return s +} + +// SetSubject sets the Subject field's value. +func (s *Message) SetSubject(v *Content) *Message { + s.Subject = v + return s +} + +// Message-related information to include in the Delivery Status Notification +// (DSN) when an email that Amazon SES receives on your behalf bounces. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +type MessageDsn struct { + _ struct{} `type:"structure"` + + // When the message was received by the reporting mail transfer agent (MTA), + // in RFC 822 (https://www.ietf.org/rfc/rfc0822.txt) date-time format. + ArrivalDate *time.Time `type:"timestamp"` + + // Additional X-headers to include in the DSN. + ExtensionFields []*ExtensionField `type:"list"` + + // The reporting MTA that attempted to deliver the message, formatted as specified + // in RFC 3464 (https://tools.ietf.org/html/rfc3464) (mta-name-type; mta-name). + // The default value is dns; inbound-smtp.[region].amazonaws.com. + // + // ReportingMta is a required field + ReportingMta *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MessageDsn) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MessageDsn) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MessageDsn) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MessageDsn"} + if s.ReportingMta == nil { + invalidParams.Add(request.NewErrParamRequired("ReportingMta")) + } + if s.ExtensionFields != nil { + for i, v := range s.ExtensionFields { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ExtensionFields", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArrivalDate sets the ArrivalDate field's value. +func (s *MessageDsn) SetArrivalDate(v time.Time) *MessageDsn { + s.ArrivalDate = &v + return s +} + +// SetExtensionFields sets the ExtensionFields field's value. +func (s *MessageDsn) SetExtensionFields(v []*ExtensionField) *MessageDsn { + s.ExtensionFields = v + return s +} + +// SetReportingMta sets the ReportingMta field's value. +func (s *MessageDsn) SetReportingMta(v string) *MessageDsn { + s.ReportingMta = &v + return s +} + +// Contains the name and value of a tag that you can provide to SendEmail or +// SendRawEmail to apply to an email. +// +// Message tags, which you use with configuration sets, enable you to publish +// email sending events. For information about using configuration sets, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +type MessageTag struct { + _ struct{} `type:"structure"` + + // The name of the tag. The name must: + // + // * This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), + // underscores (_), or dashes (-). + // + // * Contain less than 256 characters. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The value of the tag. The value must: + // + // * This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), + // underscores (_), or dashes (-). + // + // * Contain less than 256 characters. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MessageTag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MessageTag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MessageTag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MessageTag"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *MessageTag) SetName(v string) *MessageTag { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MessageTag) SetValue(v string) *MessageTag { + s.Value = &v + return s +} + +// Represents a request to add or update a sending authorization policy for +// an identity. Sending authorization is an Amazon SES feature that enables +// you to authorize other senders to use your identities. For information, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +type PutIdentityPolicyInput struct { + _ struct{} `type:"structure"` + + // The identity that the policy will apply to. You can specify an identity by + // using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, + // example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // To successfully call this API, you must own the identity. + // + // Identity is a required field + Identity *string `type:"string" required:"true"` + + // The text of the policy in JSON format. The policy cannot exceed 4 KB. + // + // For information about the syntax of sending authorization policies, see the + // Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-policies.html). + // + // Policy is a required field + Policy *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + // + // The policy name cannot exceed 64 characters and can only include alphanumeric + // characters, dashes, and underscores. + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutIdentityPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutIdentityPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutIdentityPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutIdentityPolicyInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentity sets the Identity field's value. +func (s *PutIdentityPolicyInput) SetIdentity(v string) *PutIdentityPolicyInput { + s.Identity = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *PutIdentityPolicyInput) SetPolicy(v string) *PutIdentityPolicyInput { + s.Policy = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *PutIdentityPolicyInput) SetPolicyName(v string) *PutIdentityPolicyInput { + s.PolicyName = &v + return s +} + +// An empty element returned on a successful request. +type PutIdentityPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutIdentityPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutIdentityPolicyOutput) GoString() string { + return s.String() +} + +// Represents the raw data of the message. +type RawMessage struct { + _ struct{} `type:"structure"` + + // The raw data of the message. This data needs to base64-encoded if you are + // accessing Amazon SES directly through the HTTPS interface. If you are accessing + // Amazon SES using an AWS SDK, the SDK takes care of the base 64-encoding for + // you. In all cases, the client must ensure that the message format complies + // with Internet email standards regarding email header fields, MIME types, + // and MIME encoding. + // + // The To:, CC:, and BCC: headers in the raw message can contain a group list. + // + // If you are using SendRawEmail with sending authorization, you can include + // X-headers in the raw message to specify the "Source," "From," and "Return-Path" + // addresses. For more information, see the documentation for SendRawEmail. + // + // Do not include these X-headers in the DKIM signature, because they are removed + // by Amazon SES before sending the email. + // + // For more information, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html). + // + // Data is automatically base64 encoded/decoded by the SDK. + // + // Data is a required field + Data []byte `type:"blob" required:"true"` +} + +// String returns the string representation +func (s RawMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RawMessage) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RawMessage) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RawMessage"} + if s.Data == nil { + invalidParams.Add(request.NewErrParamRequired("Data")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetData sets the Data field's value. +func (s *RawMessage) SetData(v []byte) *RawMessage { + s.Data = v + return s +} + +// An action that Amazon SES can take when it receives an email on behalf of +// one or more email addresses or domains that you own. An instance of this +// data type can represent only one action. +// +// For information about setting up receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +type ReceiptAction struct { + _ struct{} `type:"structure"` + + // Adds a header to the received email. + AddHeaderAction *AddHeaderAction `type:"structure"` + + // Rejects the received email by returning a bounce response to the sender and, + // optionally, publishes a notification to Amazon Simple Notification Service + // (Amazon SNS). + BounceAction *BounceAction `type:"structure"` + + // Calls an AWS Lambda function, and optionally, publishes a notification to + // Amazon SNS. + LambdaAction *LambdaAction `type:"structure"` + + // Saves the received message to an Amazon Simple Storage Service (Amazon S3) + // bucket and, optionally, publishes a notification to Amazon SNS. + S3Action *S3Action `type:"structure"` + + // Publishes the email content within a notification to Amazon SNS. + SNSAction *SNSAction `type:"structure"` + + // Terminates the evaluation of the receipt rule set and optionally publishes + // a notification to Amazon SNS. + StopAction *StopAction `type:"structure"` + + // Calls Amazon WorkMail and, optionally, publishes a notification to Amazon + // Amazon SNS. + WorkmailAction *WorkmailAction `type:"structure"` +} + +// String returns the string representation +func (s ReceiptAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReceiptAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReceiptAction"} + if s.AddHeaderAction != nil { + if err := s.AddHeaderAction.Validate(); err != nil { + invalidParams.AddNested("AddHeaderAction", err.(request.ErrInvalidParams)) + } + } + if s.BounceAction != nil { + if err := s.BounceAction.Validate(); err != nil { + invalidParams.AddNested("BounceAction", err.(request.ErrInvalidParams)) + } + } + if s.LambdaAction != nil { + if err := s.LambdaAction.Validate(); err != nil { + invalidParams.AddNested("LambdaAction", err.(request.ErrInvalidParams)) + } + } + if s.S3Action != nil { + if err := s.S3Action.Validate(); err != nil { + invalidParams.AddNested("S3Action", err.(request.ErrInvalidParams)) + } + } + if s.SNSAction != nil { + if err := s.SNSAction.Validate(); err != nil { + invalidParams.AddNested("SNSAction", err.(request.ErrInvalidParams)) + } + } + if s.StopAction != nil { + if err := s.StopAction.Validate(); err != nil { + invalidParams.AddNested("StopAction", err.(request.ErrInvalidParams)) + } + } + if s.WorkmailAction != nil { + if err := s.WorkmailAction.Validate(); err != nil { + invalidParams.AddNested("WorkmailAction", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddHeaderAction sets the AddHeaderAction field's value. +func (s *ReceiptAction) SetAddHeaderAction(v *AddHeaderAction) *ReceiptAction { + s.AddHeaderAction = v + return s +} + +// SetBounceAction sets the BounceAction field's value. +func (s *ReceiptAction) SetBounceAction(v *BounceAction) *ReceiptAction { + s.BounceAction = v + return s +} + +// SetLambdaAction sets the LambdaAction field's value. +func (s *ReceiptAction) SetLambdaAction(v *LambdaAction) *ReceiptAction { + s.LambdaAction = v + return s +} + +// SetS3Action sets the S3Action field's value. +func (s *ReceiptAction) SetS3Action(v *S3Action) *ReceiptAction { + s.S3Action = v + return s +} + +// SetSNSAction sets the SNSAction field's value. +func (s *ReceiptAction) SetSNSAction(v *SNSAction) *ReceiptAction { + s.SNSAction = v + return s +} + +// SetStopAction sets the StopAction field's value. +func (s *ReceiptAction) SetStopAction(v *StopAction) *ReceiptAction { + s.StopAction = v + return s +} + +// SetWorkmailAction sets the WorkmailAction field's value. +func (s *ReceiptAction) SetWorkmailAction(v *WorkmailAction) *ReceiptAction { + s.WorkmailAction = v + return s +} + +// A receipt IP address filter enables you to specify whether to accept or reject +// mail originating from an IP address or range of IP addresses. +// +// For information about setting up IP address filters, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). +type ReceiptFilter struct { + _ struct{} `type:"structure"` + + // A structure that provides the IP addresses to block or allow, and whether + // to block or allow incoming mail from them. + // + // IpFilter is a required field + IpFilter *ReceiptIpFilter `type:"structure" required:"true"` + + // The name of the IP address filter. The name must: + // + // * This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), + // underscores (_), or dashes (-). + // + // * Start and end with a letter or number. + // + // * Contain less than 64 characters. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ReceiptFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReceiptFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReceiptFilter"} + if s.IpFilter == nil { + invalidParams.Add(request.NewErrParamRequired("IpFilter")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.IpFilter != nil { + if err := s.IpFilter.Validate(); err != nil { + invalidParams.AddNested("IpFilter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIpFilter sets the IpFilter field's value. +func (s *ReceiptFilter) SetIpFilter(v *ReceiptIpFilter) *ReceiptFilter { + s.IpFilter = v + return s +} + +// SetName sets the Name field's value. +func (s *ReceiptFilter) SetName(v string) *ReceiptFilter { + s.Name = &v + return s +} + +// A receipt IP address filter enables you to specify whether to accept or reject +// mail originating from an IP address or range of IP addresses. +// +// For information about setting up IP address filters, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). +type ReceiptIpFilter struct { + _ struct{} `type:"structure"` + + // A single IP address or a range of IP addresses that you want to block or + // allow, specified in Classless Inter-Domain Routing (CIDR) notation. An example + // of a single email address is 10.0.0.1. An example of a range of IP addresses + // is 10.0.0.1/24. For more information about CIDR notation, see RFC 2317 (https://tools.ietf.org/html/rfc2317). + // + // Cidr is a required field + Cidr *string `type:"string" required:"true"` + + // Indicates whether to block or allow incoming mail from the specified IP addresses. + // + // Policy is a required field + Policy *string `type:"string" required:"true" enum:"ReceiptFilterPolicy"` +} + +// String returns the string representation +func (s ReceiptIpFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptIpFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReceiptIpFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReceiptIpFilter"} + if s.Cidr == nil { + invalidParams.Add(request.NewErrParamRequired("Cidr")) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidr sets the Cidr field's value. +func (s *ReceiptIpFilter) SetCidr(v string) *ReceiptIpFilter { + s.Cidr = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *ReceiptIpFilter) SetPolicy(v string) *ReceiptIpFilter { + s.Policy = &v + return s +} + +// Receipt rules enable you to specify which actions Amazon SES should take +// when it receives mail on behalf of one or more email addresses or domains +// that you own. +// +// Each receipt rule defines a set of email addresses or domains that it applies +// to. If the email addresses or domains match at least one recipient address +// of the message, Amazon SES executes all of the receipt rule's actions on +// the message. +// +// For information about setting up receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +type ReceiptRule struct { + _ struct{} `type:"structure"` + + // An ordered list of actions to perform on messages that match at least one + // of the recipient email addresses or domains specified in the receipt rule. + Actions []*ReceiptAction `type:"list"` + + // If true, the receipt rule is active. The default value is false. + Enabled *bool `type:"boolean"` + + // The name of the receipt rule. The name must: + // + // * This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), + // underscores (_), or dashes (-). + // + // * Start and end with a letter or number. + // + // * Contain less than 64 characters. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The recipient domains and email addresses that the receipt rule applies to. + // If this field is not specified, this rule will match all recipients under + // all verified domains. + Recipients []*string `type:"list"` + + // If true, then messages that this receipt rule applies to are scanned for + // spam and viruses. The default value is false. + ScanEnabled *bool `type:"boolean"` + + // Specifies whether Amazon SES should require that incoming email is delivered + // over a connection encrypted with Transport Layer Security (TLS). If this + // parameter is set to Require, Amazon SES will bounce emails that are not received + // over TLS. The default is Optional. + TlsPolicy *string `type:"string" enum:"TlsPolicy"` +} + +// String returns the string representation +func (s ReceiptRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReceiptRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReceiptRule"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Actions != nil { + for i, v := range s.Actions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Actions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActions sets the Actions field's value. +func (s *ReceiptRule) SetActions(v []*ReceiptAction) *ReceiptRule { + s.Actions = v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *ReceiptRule) SetEnabled(v bool) *ReceiptRule { + s.Enabled = &v + return s +} + +// SetName sets the Name field's value. +func (s *ReceiptRule) SetName(v string) *ReceiptRule { + s.Name = &v + return s +} + +// SetRecipients sets the Recipients field's value. +func (s *ReceiptRule) SetRecipients(v []*string) *ReceiptRule { + s.Recipients = v + return s +} + +// SetScanEnabled sets the ScanEnabled field's value. +func (s *ReceiptRule) SetScanEnabled(v bool) *ReceiptRule { + s.ScanEnabled = &v + return s +} + +// SetTlsPolicy sets the TlsPolicy field's value. +func (s *ReceiptRule) SetTlsPolicy(v string) *ReceiptRule { + s.TlsPolicy = &v + return s +} + +// Information about a receipt rule set. +// +// A receipt rule set is a collection of rules that specify what Amazon SES +// should do with mail it receives on behalf of your account's verified domains. +// +// For information about setting up receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +type ReceiptRuleSetMetadata struct { + _ struct{} `type:"structure"` + + // The date and time the receipt rule set was created. + CreatedTimestamp *time.Time `type:"timestamp"` + + // The name of the receipt rule set. The name must: + // + // * This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), + // underscores (_), or dashes (-). + // + // * Start and end with a letter or number. + // + // * Contain less than 64 characters. + Name *string `type:"string"` +} + +// String returns the string representation +func (s ReceiptRuleSetMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptRuleSetMetadata) GoString() string { + return s.String() +} + +// SetCreatedTimestamp sets the CreatedTimestamp field's value. +func (s *ReceiptRuleSetMetadata) SetCreatedTimestamp(v time.Time) *ReceiptRuleSetMetadata { + s.CreatedTimestamp = &v + return s +} + +// SetName sets the Name field's value. +func (s *ReceiptRuleSetMetadata) SetName(v string) *ReceiptRuleSetMetadata { + s.Name = &v + return s +} + +// Recipient-related information to include in the Delivery Status Notification +// (DSN) when an email that Amazon SES receives on your behalf bounces. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +type RecipientDsnFields struct { + _ struct{} `type:"structure"` + + // The action performed by the reporting mail transfer agent (MTA) as a result + // of its attempt to deliver the message to the recipient address. This is required + // by RFC 3464 (https://tools.ietf.org/html/rfc3464). + // + // Action is a required field + Action *string `type:"string" required:"true" enum:"DsnAction"` + + // An extended explanation of what went wrong; this is usually an SMTP response. + // See RFC 3463 (https://tools.ietf.org/html/rfc3463) for the correct formatting + // of this parameter. + DiagnosticCode *string `type:"string"` + + // Additional X-headers to include in the DSN. + ExtensionFields []*ExtensionField `type:"list"` + + // The email address that the message was ultimately delivered to. This corresponds + // to the Final-Recipient in the DSN. If not specified, FinalRecipient will + // be set to the Recipient specified in the BouncedRecipientInfo structure. + // Either FinalRecipient or the recipient in BouncedRecipientInfo must be a + // recipient of the original bounced message. + // + // Do not prepend the FinalRecipient email address with rfc 822;, as described + // in RFC 3798 (https://tools.ietf.org/html/rfc3798). + FinalRecipient *string `type:"string"` + + // The time the final delivery attempt was made, in RFC 822 (https://www.ietf.org/rfc/rfc0822.txt) + // date-time format. + LastAttemptDate *time.Time `type:"timestamp"` + + // The MTA to which the remote MTA attempted to deliver the message, formatted + // as specified in RFC 3464 (https://tools.ietf.org/html/rfc3464) (mta-name-type; + // mta-name). This parameter typically applies only to propagating synchronous + // bounces. + RemoteMta *string `type:"string"` + + // The status code that indicates what went wrong. This is required by RFC 3464 + // (https://tools.ietf.org/html/rfc3464). + // + // Status is a required field + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RecipientDsnFields) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecipientDsnFields) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RecipientDsnFields) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RecipientDsnFields"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.ExtensionFields != nil { + for i, v := range s.ExtensionFields { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ExtensionFields", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAction sets the Action field's value. +func (s *RecipientDsnFields) SetAction(v string) *RecipientDsnFields { + s.Action = &v + return s +} + +// SetDiagnosticCode sets the DiagnosticCode field's value. +func (s *RecipientDsnFields) SetDiagnosticCode(v string) *RecipientDsnFields { + s.DiagnosticCode = &v + return s +} + +// SetExtensionFields sets the ExtensionFields field's value. +func (s *RecipientDsnFields) SetExtensionFields(v []*ExtensionField) *RecipientDsnFields { + s.ExtensionFields = v + return s +} + +// SetFinalRecipient sets the FinalRecipient field's value. +func (s *RecipientDsnFields) SetFinalRecipient(v string) *RecipientDsnFields { + s.FinalRecipient = &v + return s +} + +// SetLastAttemptDate sets the LastAttemptDate field's value. +func (s *RecipientDsnFields) SetLastAttemptDate(v time.Time) *RecipientDsnFields { + s.LastAttemptDate = &v + return s +} + +// SetRemoteMta sets the RemoteMta field's value. +func (s *RecipientDsnFields) SetRemoteMta(v string) *RecipientDsnFields { + s.RemoteMta = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *RecipientDsnFields) SetStatus(v string) *RecipientDsnFields { + s.Status = &v + return s +} + +// Represents a request to reorder the receipt rules within a receipt rule set. +// You use receipt rule sets to receive email with Amazon SES. For more information, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type ReorderReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // A list of the specified receipt rule set's receipt rules in the order that + // you want to put them. + // + // RuleNames is a required field + RuleNames []*string `type:"list" required:"true"` + + // The name of the receipt rule set to reorder. + // + // RuleSetName is a required field + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ReorderReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReorderReceiptRuleSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReorderReceiptRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReorderReceiptRuleSetInput"} + if s.RuleNames == nil { + invalidParams.Add(request.NewErrParamRequired("RuleNames")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRuleNames sets the RuleNames field's value. +func (s *ReorderReceiptRuleSetInput) SetRuleNames(v []*string) *ReorderReceiptRuleSetInput { + s.RuleNames = v + return s +} + +// SetRuleSetName sets the RuleSetName field's value. +func (s *ReorderReceiptRuleSetInput) SetRuleSetName(v string) *ReorderReceiptRuleSetInput { + s.RuleSetName = &v + return s +} + +// An empty element returned on a successful request. +type ReorderReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReorderReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReorderReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Contains information about the reputation settings for a configuration set. +type ReputationOptions struct { + _ struct{} `type:"structure"` + + // The date and time at which the reputation metrics for the configuration set + // were last reset. Resetting these metrics is known as a fresh start. + // + // When you disable email sending for a configuration set using UpdateConfigurationSetSendingEnabled + // and later re-enable it, the reputation metrics for the configuration set + // (but not for the entire Amazon SES account) are reset. + // + // If email sending for the configuration set has never been disabled and later + // re-enabled, the value of this attribute is null. + LastFreshStart *time.Time `type:"timestamp"` + + // Describes whether or not Amazon SES publishes reputation metrics for the + // configuration set, such as bounce and complaint rates, to Amazon CloudWatch. + // + // If the value is true, reputation metrics are published. If the value is false, + // reputation metrics are not published. The default value is false. + ReputationMetricsEnabled *bool `type:"boolean"` + + // Describes whether email sending is enabled or disabled for the configuration + // set. If the value is true, then Amazon SES will send emails that use the + // configuration set. If the value is false, Amazon SES will not send emails + // that use the configuration set. The default value is true. You can change + // this setting using UpdateConfigurationSetSendingEnabled. + SendingEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s ReputationOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReputationOptions) GoString() string { + return s.String() +} + +// SetLastFreshStart sets the LastFreshStart field's value. +func (s *ReputationOptions) SetLastFreshStart(v time.Time) *ReputationOptions { + s.LastFreshStart = &v + return s +} + +// SetReputationMetricsEnabled sets the ReputationMetricsEnabled field's value. +func (s *ReputationOptions) SetReputationMetricsEnabled(v bool) *ReputationOptions { + s.ReputationMetricsEnabled = &v + return s +} + +// SetSendingEnabled sets the SendingEnabled field's value. +func (s *ReputationOptions) SetSendingEnabled(v bool) *ReputationOptions { + s.SendingEnabled = &v + return s +} + +// When included in a receipt rule, this action saves the received message to +// an Amazon Simple Storage Service (Amazon S3) bucket and, optionally, publishes +// a notification to Amazon Simple Notification Service (Amazon SNS). +// +// To enable Amazon SES to write emails to your Amazon S3 bucket, use an AWS +// KMS key to encrypt your emails, or publish to an Amazon SNS topic of another +// account, Amazon SES must have permission to access those resources. For information +// about giving permissions, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// When you save your emails to an Amazon S3 bucket, the maximum email size +// (including headers) is 30 MB. Emails larger than that will bounce. +// +// For information about specifying Amazon S3 actions in receipt rules, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-s3.html). +type S3Action struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket that incoming email will be saved to. + // + // BucketName is a required field + BucketName *string `type:"string" required:"true"` + + // The customer master key that Amazon SES should use to encrypt your emails + // before saving them to the Amazon S3 bucket. You can use the default master + // key or a custom master key you created in AWS KMS as follows: + // + // * To use the default master key, provide an ARN in the form of arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses. + // For example, if your AWS account ID is 123456789012 and you want to use + // the default master key in the US West (Oregon) region, the ARN of the + // default master key would be arn:aws:kms:us-west-2:123456789012:alias/aws/ses. + // If you use the default master key, you don't need to perform any extra + // steps to give Amazon SES permission to use the key. + // + // * To use a custom master key you created in AWS KMS, provide the ARN of + // the master key and ensure that you add a statement to your key's policy + // to give Amazon SES permission to use it. For more information about giving + // permissions, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). + // + // For more information about key policies, see the AWS KMS Developer Guide + // (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html). If + // you do not specify a master key, Amazon SES will not encrypt your emails. + // + // Your mail is encrypted by Amazon SES using the Amazon S3 encryption client + // before the mail is submitted to Amazon S3 for storage. It is not encrypted + // using Amazon S3 server-side encryption. This means that you must use the + // Amazon S3 encryption client to decrypt the email after retrieving it from + // Amazon S3, as the service has no access to use your AWS KMS keys for decryption. + // This encryption client is currently available with the AWS SDK for Java (http://aws.amazon.com/sdk-for-java/) + // and AWS SDK for Ruby (http://aws.amazon.com/sdk-for-ruby/) only. For more + // information about client-side encryption using AWS KMS master keys, see the + // Amazon S3 Developer Guide (http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). + KmsKeyArn *string `type:"string"` + + // The key prefix of the Amazon S3 bucket. The key prefix is similar to a directory + // name that enables you to store similar data under the same directory in a + // bucket. + ObjectKeyPrefix *string `type:"string"` + + // The ARN of the Amazon SNS topic to notify when the message is saved to the + // Amazon S3 bucket. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s S3Action) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Action) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3Action) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3Action"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucketName sets the BucketName field's value. +func (s *S3Action) SetBucketName(v string) *S3Action { + s.BucketName = &v + return s +} + +// SetKmsKeyArn sets the KmsKeyArn field's value. +func (s *S3Action) SetKmsKeyArn(v string) *S3Action { + s.KmsKeyArn = &v + return s +} + +// SetObjectKeyPrefix sets the ObjectKeyPrefix field's value. +func (s *S3Action) SetObjectKeyPrefix(v string) *S3Action { + s.ObjectKeyPrefix = &v + return s +} + +// SetTopicArn sets the TopicArn field's value. +func (s *S3Action) SetTopicArn(v string) *S3Action { + s.TopicArn = &v + return s +} + +// When included in a receipt rule, this action publishes a notification to +// Amazon Simple Notification Service (Amazon SNS). This action includes a complete +// copy of the email content in the Amazon SNS notifications. Amazon SNS notifications +// for all other actions simply provide information about the email. They do +// not include the email content itself. +// +// If you own the Amazon SNS topic, you don't need to do anything to give Amazon +// SES permission to publish emails to it. However, if you don't own the Amazon +// SNS topic, you need to attach a policy to the topic to give Amazon SES permissions +// to access it. For information about giving permissions, see the Amazon SES +// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// You can only publish emails that are 150 KB or less (including the header) +// to Amazon SNS. Larger emails will bounce. If you anticipate emails larger +// than 150 KB, use the S3 action instead. +// +// For information about using a receipt rule to publish an Amazon SNS notification, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-sns.html). +type SNSAction struct { + _ struct{} `type:"structure"` + + // The encoding to use for the email within the Amazon SNS notification. UTF-8 + // is easier to use, but may not preserve all special characters when a message + // was encoded with a different encoding format. Base64 preserves all special + // characters. The default value is UTF-8. + Encoding *string `type:"string" enum:"SNSActionEncoding"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify. An example + // of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + // + // TopicArn is a required field + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SNSAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SNSAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SNSAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SNSAction"} + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncoding sets the Encoding field's value. +func (s *SNSAction) SetEncoding(v string) *SNSAction { + s.Encoding = &v + return s +} + +// SetTopicArn sets the TopicArn field's value. +func (s *SNSAction) SetTopicArn(v string) *SNSAction { + s.TopicArn = &v + return s +} + +// Contains the topic ARN associated with an Amazon Simple Notification Service +// (Amazon SNS) event destination. +// +// Event destinations, such as Amazon SNS, are associated with configuration +// sets, which enable you to publish email sending events. For information about +// using configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +type SNSDestination struct { + _ struct{} `type:"structure"` + + // The ARN of the Amazon SNS topic that email sending events will be published + // to. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + // + // TopicARN is a required field + TopicARN *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SNSDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SNSDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SNSDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SNSDestination"} + if s.TopicARN == nil { + invalidParams.Add(request.NewErrParamRequired("TopicARN")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTopicARN sets the TopicARN field's value. +func (s *SNSDestination) SetTopicARN(v string) *SNSDestination { + s.TopicARN = &v + return s +} + +// Represents a request to send a bounce message to the sender of an email you +// received through Amazon SES. +type SendBounceInput struct { + _ struct{} `type:"structure"` + + // The address to use in the "From" header of the bounce message. This must + // be an identity that you have verified with Amazon SES. + // + // BounceSender is a required field + BounceSender *string `type:"string" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to use the address in the "From" header of the bounce. For more information + // about sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + BounceSenderArn *string `type:"string"` + + // A list of recipients of the bounced message, including the information required + // to create the Delivery Status Notifications (DSNs) for the recipients. You + // must specify at least one BouncedRecipientInfo in the list. + // + // BouncedRecipientInfoList is a required field + BouncedRecipientInfoList []*BouncedRecipientInfo `type:"list" required:"true"` + + // Human-readable text for the bounce message to explain the failure. If not + // specified, the text will be auto-generated based on the bounced recipient + // information. + Explanation *string `type:"string"` + + // Message-related DSN fields. If not specified, Amazon SES will choose the + // values. + MessageDsn *MessageDsn `type:"structure"` + + // The message ID of the message to be bounced. + // + // OriginalMessageId is a required field + OriginalMessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendBounceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendBounceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendBounceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendBounceInput"} + if s.BounceSender == nil { + invalidParams.Add(request.NewErrParamRequired("BounceSender")) + } + if s.BouncedRecipientInfoList == nil { + invalidParams.Add(request.NewErrParamRequired("BouncedRecipientInfoList")) + } + if s.OriginalMessageId == nil { + invalidParams.Add(request.NewErrParamRequired("OriginalMessageId")) + } + if s.BouncedRecipientInfoList != nil { + for i, v := range s.BouncedRecipientInfoList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "BouncedRecipientInfoList", i), err.(request.ErrInvalidParams)) + } + } + } + if s.MessageDsn != nil { + if err := s.MessageDsn.Validate(); err != nil { + invalidParams.AddNested("MessageDsn", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBounceSender sets the BounceSender field's value. +func (s *SendBounceInput) SetBounceSender(v string) *SendBounceInput { + s.BounceSender = &v + return s +} + +// SetBounceSenderArn sets the BounceSenderArn field's value. +func (s *SendBounceInput) SetBounceSenderArn(v string) *SendBounceInput { + s.BounceSenderArn = &v + return s +} + +// SetBouncedRecipientInfoList sets the BouncedRecipientInfoList field's value. +func (s *SendBounceInput) SetBouncedRecipientInfoList(v []*BouncedRecipientInfo) *SendBounceInput { + s.BouncedRecipientInfoList = v + return s +} + +// SetExplanation sets the Explanation field's value. +func (s *SendBounceInput) SetExplanation(v string) *SendBounceInput { + s.Explanation = &v + return s +} + +// SetMessageDsn sets the MessageDsn field's value. +func (s *SendBounceInput) SetMessageDsn(v *MessageDsn) *SendBounceInput { + s.MessageDsn = v + return s +} + +// SetOriginalMessageId sets the OriginalMessageId field's value. +func (s *SendBounceInput) SetOriginalMessageId(v string) *SendBounceInput { + s.OriginalMessageId = &v + return s +} + +// Represents a unique message ID. +type SendBounceOutput struct { + _ struct{} `type:"structure"` + + // The message ID of the bounce message. + MessageId *string `type:"string"` +} + +// String returns the string representation +func (s SendBounceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendBounceOutput) GoString() string { + return s.String() +} + +// SetMessageId sets the MessageId field's value. +func (s *SendBounceOutput) SetMessageId(v string) *SendBounceOutput { + s.MessageId = &v + return s +} + +// Represents a request to send a templated email to multiple destinations using +// Amazon SES. For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +type SendBulkTemplatedEmailInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set to use when you send an email using SendBulkTemplatedEmail. + ConfigurationSetName *string `type:"string"` + + // A list of tags, in the form of name/value pairs, to apply to an email that + // you send to a destination using SendBulkTemplatedEmail. + DefaultTags []*MessageTag `type:"list"` + + // A list of replacement values to apply to the template when replacement data + // is not specified in a Destination object. These values act as a default or + // fallback option when no other data is available. + // + // The template data is a JSON object, typically consisting of key-value pairs + // in which the keys correspond to replacement tags in the email template. + DefaultTemplateData *string `type:"string"` + + // One or more Destination objects. All of the recipients in a Destination will + // receive the same version of the email. You can specify up to 50 Destination + // objects within a Destinations array. + // + // Destinations is a required field + Destinations []*BulkEmailDestination `type:"list" required:"true"` + + // The reply-to email address(es) for the message. If the recipient replies + // to the message, each reply-to address will receive the reply. + ReplyToAddresses []*string `type:"list"` + + // The email address that bounces and complaints will be forwarded to when feedback + // forwarding is enabled. If the message cannot be delivered to the recipient, + // then an error message will be returned from the recipient's ISP; this message + // will then be forwarded to the email address specified by the ReturnPath parameter. + // The ReturnPath parameter is never overwritten. This email address must be + // either individually verified with Amazon SES, or from a domain that has been + // verified with Amazon SES. + ReturnPath *string `type:"string"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to use the email address specified in the ReturnPath parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to use feedback@example.com, + // then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the ReturnPath to be feedback@example.com. + // + // For more information about sending authorization, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + ReturnPathArn *string `type:"string"` + + // The email address that is sending the email. This email address must be either + // individually verified with Amazon SES, or from a domain that has been verified + // with Amazon SES. For information about verifying identities, see the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). + // + // If you are sending on behalf of another user and have been permitted to do + // so by a sending authorization policy, then you must also specify the SourceArn + // parameter. For more information about sending authorization, see the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // + // Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531 + // (https://tools.ietf.org/html/rfc6531). For this reason, the local part of + // a source email address (the part of the email address that precedes the @ + // sign) may only contain 7-bit ASCII characters (https://en.wikipedia.org/wiki/Email_address#Local-part). + // If the domain part of an address (the part after the @ sign) contains non-ASCII + // characters, they must be encoded using Punycode, as described in RFC3492 + // (https://tools.ietf.org/html/rfc3492.html). The sender name (also known as + // the friendly name) may contain non-ASCII characters. These characters must + // be encoded using MIME encoded-word syntax, as described in RFC 2047 (https://tools.ietf.org/html/rfc2047). + // MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. + // + // Source is a required field + Source *string `type:"string" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to send for the email address specified in the Source parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to send from user@example.com, + // then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the Source to be user@example.com. + // + // For more information about sending authorization, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + SourceArn *string `type:"string"` + + // The template to use when sending this email. + // + // Template is a required field + Template *string `type:"string" required:"true"` + + // The ARN of the template to use when sending this email. + TemplateArn *string `type:"string"` +} + +// String returns the string representation +func (s SendBulkTemplatedEmailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendBulkTemplatedEmailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendBulkTemplatedEmailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendBulkTemplatedEmailInput"} + if s.Destinations == nil { + invalidParams.Add(request.NewErrParamRequired("Destinations")) + } + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + if s.Template == nil { + invalidParams.Add(request.NewErrParamRequired("Template")) + } + if s.DefaultTags != nil { + for i, v := range s.DefaultTags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DefaultTags", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Destinations != nil { + for i, v := range s.Destinations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Destinations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *SendBulkTemplatedEmailInput) SetConfigurationSetName(v string) *SendBulkTemplatedEmailInput { + s.ConfigurationSetName = &v + return s +} + +// SetDefaultTags sets the DefaultTags field's value. +func (s *SendBulkTemplatedEmailInput) SetDefaultTags(v []*MessageTag) *SendBulkTemplatedEmailInput { + s.DefaultTags = v + return s +} + +// SetDefaultTemplateData sets the DefaultTemplateData field's value. +func (s *SendBulkTemplatedEmailInput) SetDefaultTemplateData(v string) *SendBulkTemplatedEmailInput { + s.DefaultTemplateData = &v + return s +} + +// SetDestinations sets the Destinations field's value. +func (s *SendBulkTemplatedEmailInput) SetDestinations(v []*BulkEmailDestination) *SendBulkTemplatedEmailInput { + s.Destinations = v + return s +} + +// SetReplyToAddresses sets the ReplyToAddresses field's value. +func (s *SendBulkTemplatedEmailInput) SetReplyToAddresses(v []*string) *SendBulkTemplatedEmailInput { + s.ReplyToAddresses = v + return s +} + +// SetReturnPath sets the ReturnPath field's value. +func (s *SendBulkTemplatedEmailInput) SetReturnPath(v string) *SendBulkTemplatedEmailInput { + s.ReturnPath = &v + return s +} + +// SetReturnPathArn sets the ReturnPathArn field's value. +func (s *SendBulkTemplatedEmailInput) SetReturnPathArn(v string) *SendBulkTemplatedEmailInput { + s.ReturnPathArn = &v + return s +} + +// SetSource sets the Source field's value. +func (s *SendBulkTemplatedEmailInput) SetSource(v string) *SendBulkTemplatedEmailInput { + s.Source = &v + return s +} + +// SetSourceArn sets the SourceArn field's value. +func (s *SendBulkTemplatedEmailInput) SetSourceArn(v string) *SendBulkTemplatedEmailInput { + s.SourceArn = &v + return s +} + +// SetTemplate sets the Template field's value. +func (s *SendBulkTemplatedEmailInput) SetTemplate(v string) *SendBulkTemplatedEmailInput { + s.Template = &v + return s +} + +// SetTemplateArn sets the TemplateArn field's value. +func (s *SendBulkTemplatedEmailInput) SetTemplateArn(v string) *SendBulkTemplatedEmailInput { + s.TemplateArn = &v + return s +} + +type SendBulkTemplatedEmailOutput struct { + _ struct{} `type:"structure"` + + // The unique message identifier returned from the SendBulkTemplatedEmail action. + // + // Status is a required field + Status []*BulkEmailDestinationStatus `type:"list" required:"true"` +} + +// String returns the string representation +func (s SendBulkTemplatedEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendBulkTemplatedEmailOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *SendBulkTemplatedEmailOutput) SetStatus(v []*BulkEmailDestinationStatus) *SendBulkTemplatedEmailOutput { + s.Status = v + return s +} + +// Represents a request to send a custom verification email to a specified recipient. +type SendCustomVerificationEmailInput struct { + _ struct{} `type:"structure"` + + // Name of a configuration set to use when sending the verification email. + ConfigurationSetName *string `type:"string"` + + // The email address to verify. + // + // EmailAddress is a required field + EmailAddress *string `type:"string" required:"true"` + + // The name of the custom verification email template to use when sending the + // verification email. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendCustomVerificationEmailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendCustomVerificationEmailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendCustomVerificationEmailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendCustomVerificationEmailInput"} + if s.EmailAddress == nil { + invalidParams.Add(request.NewErrParamRequired("EmailAddress")) + } + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *SendCustomVerificationEmailInput) SetConfigurationSetName(v string) *SendCustomVerificationEmailInput { + s.ConfigurationSetName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *SendCustomVerificationEmailInput) SetEmailAddress(v string) *SendCustomVerificationEmailInput { + s.EmailAddress = &v + return s +} + +// SetTemplateName sets the TemplateName field's value. +func (s *SendCustomVerificationEmailInput) SetTemplateName(v string) *SendCustomVerificationEmailInput { + s.TemplateName = &v + return s +} + +// The response received when attempting to send the custom verification email. +type SendCustomVerificationEmailOutput struct { + _ struct{} `type:"structure"` + + // The unique message identifier returned from the SendCustomVerificationEmail + // operation. + MessageId *string `type:"string"` +} + +// String returns the string representation +func (s SendCustomVerificationEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendCustomVerificationEmailOutput) GoString() string { + return s.String() +} + +// SetMessageId sets the MessageId field's value. +func (s *SendCustomVerificationEmailOutput) SetMessageId(v string) *SendCustomVerificationEmailOutput { + s.MessageId = &v + return s +} + +// Represents sending statistics data. Each SendDataPoint contains statistics +// for a 15-minute period of sending activity. +type SendDataPoint struct { + _ struct{} `type:"structure"` + + // Number of emails that have bounced. + Bounces *int64 `type:"long"` + + // Number of unwanted emails that were rejected by recipients. + Complaints *int64 `type:"long"` + + // Number of emails that have been sent. + DeliveryAttempts *int64 `type:"long"` + + // Number of emails rejected by Amazon SES. + Rejects *int64 `type:"long"` + + // Time of the data point. + Timestamp *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s SendDataPoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendDataPoint) GoString() string { + return s.String() +} + +// SetBounces sets the Bounces field's value. +func (s *SendDataPoint) SetBounces(v int64) *SendDataPoint { + s.Bounces = &v + return s +} + +// SetComplaints sets the Complaints field's value. +func (s *SendDataPoint) SetComplaints(v int64) *SendDataPoint { + s.Complaints = &v + return s +} + +// SetDeliveryAttempts sets the DeliveryAttempts field's value. +func (s *SendDataPoint) SetDeliveryAttempts(v int64) *SendDataPoint { + s.DeliveryAttempts = &v + return s +} + +// SetRejects sets the Rejects field's value. +func (s *SendDataPoint) SetRejects(v int64) *SendDataPoint { + s.Rejects = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *SendDataPoint) SetTimestamp(v time.Time) *SendDataPoint { + s.Timestamp = &v + return s +} + +// Represents a request to send a single formatted email using Amazon SES. For +// more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-formatted.html). +type SendEmailInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set to use when you send an email using SendEmail. + ConfigurationSetName *string `type:"string"` + + // The destination for this email, composed of To:, CC:, and BCC: fields. + // + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // The message to be sent. + // + // Message is a required field + Message *Message `type:"structure" required:"true"` + + // The reply-to email address(es) for the message. If the recipient replies + // to the message, each reply-to address will receive the reply. + ReplyToAddresses []*string `type:"list"` + + // The email address that bounces and complaints will be forwarded to when feedback + // forwarding is enabled. If the message cannot be delivered to the recipient, + // then an error message will be returned from the recipient's ISP; this message + // will then be forwarded to the email address specified by the ReturnPath parameter. + // The ReturnPath parameter is never overwritten. This email address must be + // either individually verified with Amazon SES, or from a domain that has been + // verified with Amazon SES. + ReturnPath *string `type:"string"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to use the email address specified in the ReturnPath parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to use feedback@example.com, + // then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the ReturnPath to be feedback@example.com. + // + // For more information about sending authorization, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + ReturnPathArn *string `type:"string"` + + // The email address that is sending the email. This email address must be either + // individually verified with Amazon SES, or from a domain that has been verified + // with Amazon SES. For information about verifying identities, see the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). + // + // If you are sending on behalf of another user and have been permitted to do + // so by a sending authorization policy, then you must also specify the SourceArn + // parameter. For more information about sending authorization, see the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // + // Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531 + // (https://tools.ietf.org/html/rfc6531). For this reason, the local part of + // a source email address (the part of the email address that precedes the @ + // sign) may only contain 7-bit ASCII characters (https://en.wikipedia.org/wiki/Email_address#Local-part). + // If the domain part of an address (the part after the @ sign) contains non-ASCII + // characters, they must be encoded using Punycode, as described in RFC3492 + // (https://tools.ietf.org/html/rfc3492.html). The sender name (also known as + // the friendly name) may contain non-ASCII characters. These characters must + // be encoded using MIME encoded-word syntax, as described in RFC 2047 (https://tools.ietf.org/html/rfc2047). + // MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. + // + // Source is a required field + Source *string `type:"string" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to send for the email address specified in the Source parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to send from user@example.com, + // then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the Source to be user@example.com. + // + // For more information about sending authorization, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + SourceArn *string `type:"string"` + + // A list of tags, in the form of name/value pairs, to apply to an email that + // you send using SendEmail. Tags correspond to characteristics of the email + // that you define, so that you can publish email sending events. + Tags []*MessageTag `type:"list"` +} + +// String returns the string representation +func (s SendEmailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendEmailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendEmailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendEmailInput"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Message == nil { + invalidParams.Add(request.NewErrParamRequired("Message")) + } + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + if s.Message != nil { + if err := s.Message.Validate(); err != nil { + invalidParams.AddNested("Message", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *SendEmailInput) SetConfigurationSetName(v string) *SendEmailInput { + s.ConfigurationSetName = &v + return s +} + +// SetDestination sets the Destination field's value. +func (s *SendEmailInput) SetDestination(v *Destination) *SendEmailInput { + s.Destination = v + return s +} + +// SetMessage sets the Message field's value. +func (s *SendEmailInput) SetMessage(v *Message) *SendEmailInput { + s.Message = v + return s +} + +// SetReplyToAddresses sets the ReplyToAddresses field's value. +func (s *SendEmailInput) SetReplyToAddresses(v []*string) *SendEmailInput { + s.ReplyToAddresses = v + return s +} + +// SetReturnPath sets the ReturnPath field's value. +func (s *SendEmailInput) SetReturnPath(v string) *SendEmailInput { + s.ReturnPath = &v + return s +} + +// SetReturnPathArn sets the ReturnPathArn field's value. +func (s *SendEmailInput) SetReturnPathArn(v string) *SendEmailInput { + s.ReturnPathArn = &v + return s +} + +// SetSource sets the Source field's value. +func (s *SendEmailInput) SetSource(v string) *SendEmailInput { + s.Source = &v + return s +} + +// SetSourceArn sets the SourceArn field's value. +func (s *SendEmailInput) SetSourceArn(v string) *SendEmailInput { + s.SourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *SendEmailInput) SetTags(v []*MessageTag) *SendEmailInput { + s.Tags = v + return s +} + +// Represents a unique message ID. +type SendEmailOutput struct { + _ struct{} `type:"structure"` + + // The unique message identifier returned from the SendEmail action. + // + // MessageId is a required field + MessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendEmailOutput) GoString() string { + return s.String() +} + +// SetMessageId sets the MessageId field's value. +func (s *SendEmailOutput) SetMessageId(v string) *SendEmailOutput { + s.MessageId = &v + return s +} + +// Represents a request to send a single raw email using Amazon SES. For more +// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html). +type SendRawEmailInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set to use when you send an email using SendRawEmail. + ConfigurationSetName *string `type:"string"` + + // A list of destinations for the message, consisting of To:, CC:, and BCC: + // addresses. + Destinations []*string `type:"list"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to specify a particular "From" address in the header of the raw email. + // + // Instead of using this parameter, you can use the X-header X-SES-FROM-ARN + // in the raw message of the email. If you use both the FromArn parameter and + // the corresponding X-header, Amazon SES uses the value of the FromArn parameter. + // + // For information about when to use this parameter, see the description of + // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + FromArn *string `type:"string"` + + // The raw email message itself. The message has to meet the following criteria: + // + // * The message has to contain a header and a body, separated by a blank + // line. + // + // * All of the required header fields must be present in the message. + // + // * Each part of a multipart MIME message must be formatted properly. + // + // * Attachments must be of a content type that Amazon SES supports. For + // a list on unsupported content types, see Unsupported Attachment Types + // (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mime-types.html) + // in the Amazon SES Developer Guide. + // + // * The entire message must be base64-encoded. + // + // * If any of the MIME parts in your message contain content that is outside + // of the 7-bit ASCII character range, we highly recommend that you encode + // that content. For more information, see Sending Raw Email (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html) + // in the Amazon SES Developer Guide. + // + // * Per RFC 5321 (https://tools.ietf.org/html/rfc5321#section-4.5.3.1.6), + // the maximum length of each line of text, including the , must not + // exceed 1,000 characters. + // + // RawMessage is a required field + RawMessage *RawMessage `type:"structure" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to use the email address specified in the ReturnPath parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to use feedback@example.com, + // then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the ReturnPath to be feedback@example.com. + // + // Instead of using this parameter, you can use the X-header X-SES-RETURN-PATH-ARN + // in the raw message of the email. If you use both the ReturnPathArn parameter + // and the corresponding X-header, Amazon SES uses the value of the ReturnPathArn + // parameter. + // + // For information about when to use this parameter, see the description of + // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + ReturnPathArn *string `type:"string"` + + // The identity's email address. If you do not provide a value for this parameter, + // you must specify a "From" address in the raw text of the message. (You can + // also specify both.) + // + // Amazon SES does not support the SMTPUTF8 extension, as described inRFC6531 + // (https://tools.ietf.org/html/rfc6531). For this reason, the local part of + // a source email address (the part of the email address that precedes the @ + // sign) may only contain 7-bit ASCII characters (https://en.wikipedia.org/wiki/Email_address#Local-part). + // If the domain part of an address (the part after the @ sign) contains non-ASCII + // characters, they must be encoded using Punycode, as described in RFC3492 + // (https://tools.ietf.org/html/rfc3492.html). The sender name (also known as + // the friendly name) may contain non-ASCII characters. These characters must + // be encoded using MIME encoded-word syntax, as described in RFC 2047 (https://tools.ietf.org/html/rfc2047). + // MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. + // + // If you specify the Source parameter and have feedback forwarding enabled, + // then bounces and complaints will be sent to this email address. This takes + // precedence over any Return-Path header that you might include in the raw + // text of the message. + Source *string `type:"string"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to send for the email address specified in the Source parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to send from user@example.com, + // then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the Source to be user@example.com. + // + // Instead of using this parameter, you can use the X-header X-SES-SOURCE-ARN + // in the raw message of the email. If you use both the SourceArn parameter + // and the corresponding X-header, Amazon SES uses the value of the SourceArn + // parameter. + // + // For information about when to use this parameter, see the description of + // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + SourceArn *string `type:"string"` + + // A list of tags, in the form of name/value pairs, to apply to an email that + // you send using SendRawEmail. Tags correspond to characteristics of the email + // that you define, so that you can publish email sending events. + Tags []*MessageTag `type:"list"` +} + +// String returns the string representation +func (s SendRawEmailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendRawEmailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendRawEmailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendRawEmailInput"} + if s.RawMessage == nil { + invalidParams.Add(request.NewErrParamRequired("RawMessage")) + } + if s.RawMessage != nil { + if err := s.RawMessage.Validate(); err != nil { + invalidParams.AddNested("RawMessage", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *SendRawEmailInput) SetConfigurationSetName(v string) *SendRawEmailInput { + s.ConfigurationSetName = &v + return s +} + +// SetDestinations sets the Destinations field's value. +func (s *SendRawEmailInput) SetDestinations(v []*string) *SendRawEmailInput { + s.Destinations = v + return s +} + +// SetFromArn sets the FromArn field's value. +func (s *SendRawEmailInput) SetFromArn(v string) *SendRawEmailInput { + s.FromArn = &v + return s +} + +// SetRawMessage sets the RawMessage field's value. +func (s *SendRawEmailInput) SetRawMessage(v *RawMessage) *SendRawEmailInput { + s.RawMessage = v + return s +} + +// SetReturnPathArn sets the ReturnPathArn field's value. +func (s *SendRawEmailInput) SetReturnPathArn(v string) *SendRawEmailInput { + s.ReturnPathArn = &v + return s +} + +// SetSource sets the Source field's value. +func (s *SendRawEmailInput) SetSource(v string) *SendRawEmailInput { + s.Source = &v + return s +} + +// SetSourceArn sets the SourceArn field's value. +func (s *SendRawEmailInput) SetSourceArn(v string) *SendRawEmailInput { + s.SourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *SendRawEmailInput) SetTags(v []*MessageTag) *SendRawEmailInput { + s.Tags = v + return s +} + +// Represents a unique message ID. +type SendRawEmailOutput struct { + _ struct{} `type:"structure"` + + // The unique message identifier returned from the SendRawEmail action. + // + // MessageId is a required field + MessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendRawEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendRawEmailOutput) GoString() string { + return s.String() +} + +// SetMessageId sets the MessageId field's value. +func (s *SendRawEmailOutput) SetMessageId(v string) *SendRawEmailOutput { + s.MessageId = &v + return s +} + +// Represents a request to send a templated email using Amazon SES. For more +// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +type SendTemplatedEmailInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set to use when you send an email using SendTemplatedEmail. + ConfigurationSetName *string `type:"string"` + + // The destination for this email, composed of To:, CC:, and BCC: fields. A + // Destination can include up to 50 recipients across these three fields. + // + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // The reply-to email address(es) for the message. If the recipient replies + // to the message, each reply-to address will receive the reply. + ReplyToAddresses []*string `type:"list"` + + // The email address that bounces and complaints will be forwarded to when feedback + // forwarding is enabled. If the message cannot be delivered to the recipient, + // then an error message will be returned from the recipient's ISP; this message + // will then be forwarded to the email address specified by the ReturnPath parameter. + // The ReturnPath parameter is never overwritten. This email address must be + // either individually verified with Amazon SES, or from a domain that has been + // verified with Amazon SES. + ReturnPath *string `type:"string"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to use the email address specified in the ReturnPath parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to use feedback@example.com, + // then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the ReturnPath to be feedback@example.com. + // + // For more information about sending authorization, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + ReturnPathArn *string `type:"string"` + + // The email address that is sending the email. This email address must be either + // individually verified with Amazon SES, or from a domain that has been verified + // with Amazon SES. For information about verifying identities, see the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). + // + // If you are sending on behalf of another user and have been permitted to do + // so by a sending authorization policy, then you must also specify the SourceArn + // parameter. For more information about sending authorization, see the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // + // Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531 + // (https://tools.ietf.org/html/rfc6531). For this reason, the local part of + // a source email address (the part of the email address that precedes the @ + // sign) may only contain 7-bit ASCII characters (https://en.wikipedia.org/wiki/Email_address#Local-part). + // If the domain part of an address (the part after the @ sign) contains non-ASCII + // characters, they must be encoded using Punycode, as described in RFC3492 + // (https://tools.ietf.org/html/rfc3492.html). The sender name (also known as + // the friendly name) may contain non-ASCII characters. These characters must + // be encoded using MIME encoded-word syntax, as described inRFC 2047 (https://tools.ietf.org/html/rfc2047). + // MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. + // + // Source is a required field + Source *string `type:"string" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to send for the email address specified in the Source parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to send from user@example.com, + // then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the Source to be user@example.com. + // + // For more information about sending authorization, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + SourceArn *string `type:"string"` + + // A list of tags, in the form of name/value pairs, to apply to an email that + // you send using SendTemplatedEmail. Tags correspond to characteristics of + // the email that you define, so that you can publish email sending events. + Tags []*MessageTag `type:"list"` + + // The template to use when sending this email. + // + // Template is a required field + Template *string `type:"string" required:"true"` + + // The ARN of the template to use when sending this email. + TemplateArn *string `type:"string"` + + // A list of replacement values to apply to the template. This parameter is + // a JSON object, typically consisting of key-value pairs in which the keys + // correspond to replacement tags in the email template. + // + // TemplateData is a required field + TemplateData *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendTemplatedEmailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendTemplatedEmailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendTemplatedEmailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendTemplatedEmailInput"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + if s.Template == nil { + invalidParams.Add(request.NewErrParamRequired("Template")) + } + if s.TemplateData == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateData")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *SendTemplatedEmailInput) SetConfigurationSetName(v string) *SendTemplatedEmailInput { + s.ConfigurationSetName = &v + return s +} + +// SetDestination sets the Destination field's value. +func (s *SendTemplatedEmailInput) SetDestination(v *Destination) *SendTemplatedEmailInput { + s.Destination = v + return s +} + +// SetReplyToAddresses sets the ReplyToAddresses field's value. +func (s *SendTemplatedEmailInput) SetReplyToAddresses(v []*string) *SendTemplatedEmailInput { + s.ReplyToAddresses = v + return s +} + +// SetReturnPath sets the ReturnPath field's value. +func (s *SendTemplatedEmailInput) SetReturnPath(v string) *SendTemplatedEmailInput { + s.ReturnPath = &v + return s +} + +// SetReturnPathArn sets the ReturnPathArn field's value. +func (s *SendTemplatedEmailInput) SetReturnPathArn(v string) *SendTemplatedEmailInput { + s.ReturnPathArn = &v + return s +} + +// SetSource sets the Source field's value. +func (s *SendTemplatedEmailInput) SetSource(v string) *SendTemplatedEmailInput { + s.Source = &v + return s +} + +// SetSourceArn sets the SourceArn field's value. +func (s *SendTemplatedEmailInput) SetSourceArn(v string) *SendTemplatedEmailInput { + s.SourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *SendTemplatedEmailInput) SetTags(v []*MessageTag) *SendTemplatedEmailInput { + s.Tags = v + return s +} + +// SetTemplate sets the Template field's value. +func (s *SendTemplatedEmailInput) SetTemplate(v string) *SendTemplatedEmailInput { + s.Template = &v + return s +} + +// SetTemplateArn sets the TemplateArn field's value. +func (s *SendTemplatedEmailInput) SetTemplateArn(v string) *SendTemplatedEmailInput { + s.TemplateArn = &v + return s +} + +// SetTemplateData sets the TemplateData field's value. +func (s *SendTemplatedEmailInput) SetTemplateData(v string) *SendTemplatedEmailInput { + s.TemplateData = &v + return s +} + +type SendTemplatedEmailOutput struct { + _ struct{} `type:"structure"` + + // The unique message identifier returned from the SendTemplatedEmail action. + // + // MessageId is a required field + MessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendTemplatedEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendTemplatedEmailOutput) GoString() string { + return s.String() +} + +// SetMessageId sets the MessageId field's value. +func (s *SendTemplatedEmailOutput) SetMessageId(v string) *SendTemplatedEmailOutput { + s.MessageId = &v + return s +} + +// Represents a request to set a receipt rule set as the active receipt rule +// set. You use receipt rule sets to receive email with Amazon SES. For more +// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type SetActiveReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule set to make active. Setting this value to null + // disables all email receiving. + RuleSetName *string `type:"string"` +} + +// String returns the string representation +func (s SetActiveReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetActiveReceiptRuleSetInput) GoString() string { + return s.String() +} + +// SetRuleSetName sets the RuleSetName field's value. +func (s *SetActiveReceiptRuleSetInput) SetRuleSetName(v string) *SetActiveReceiptRuleSetInput { + s.RuleSetName = &v + return s +} + +// An empty element returned on a successful request. +type SetActiveReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetActiveReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetActiveReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents a request to enable or disable Amazon SES Easy DKIM signing for +// an identity. For more information about setting up Easy DKIM, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). +type SetIdentityDkimEnabledInput struct { + _ struct{} `type:"structure"` + + // Sets whether DKIM signing is enabled for an identity. Set to true to enable + // DKIM signing for this identity; false to disable it. + // + // DkimEnabled is a required field + DkimEnabled *bool `type:"boolean" required:"true"` + + // The identity for which DKIM signing should be enabled or disabled. + // + // Identity is a required field + Identity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetIdentityDkimEnabledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityDkimEnabledInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityDkimEnabledInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityDkimEnabledInput"} + if s.DkimEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("DkimEnabled")) + } + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDkimEnabled sets the DkimEnabled field's value. +func (s *SetIdentityDkimEnabledInput) SetDkimEnabled(v bool) *SetIdentityDkimEnabledInput { + s.DkimEnabled = &v + return s +} + +// SetIdentity sets the Identity field's value. +func (s *SetIdentityDkimEnabledInput) SetIdentity(v string) *SetIdentityDkimEnabledInput { + s.Identity = &v + return s +} + +// An empty element returned on a successful request. +type SetIdentityDkimEnabledOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityDkimEnabledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityDkimEnabledOutput) GoString() string { + return s.String() +} + +// Represents a request to enable or disable whether Amazon SES forwards you +// bounce and complaint notifications through email. For information about email +// feedback forwarding, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications-via-email.html). +type SetIdentityFeedbackForwardingEnabledInput struct { + _ struct{} `type:"structure"` + + // Sets whether Amazon SES will forward bounce and complaint notifications as + // email. true specifies that Amazon SES will forward bounce and complaint notifications + // as email, in addition to any Amazon SNS topic publishing otherwise specified. + // false specifies that Amazon SES will publish bounce and complaint notifications + // only through Amazon SNS. This value can only be set to false when Amazon + // SNS topics are set for both Bounce and Complaint notification types. + // + // ForwardingEnabled is a required field + ForwardingEnabled *bool `type:"boolean" required:"true"` + + // The identity for which to set bounce and complaint notification forwarding. + // Examples: user@example.com, example.com. + // + // Identity is a required field + Identity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetIdentityFeedbackForwardingEnabledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityFeedbackForwardingEnabledInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityFeedbackForwardingEnabledInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityFeedbackForwardingEnabledInput"} + if s.ForwardingEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("ForwardingEnabled")) + } + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetForwardingEnabled sets the ForwardingEnabled field's value. +func (s *SetIdentityFeedbackForwardingEnabledInput) SetForwardingEnabled(v bool) *SetIdentityFeedbackForwardingEnabledInput { + s.ForwardingEnabled = &v + return s +} + +// SetIdentity sets the Identity field's value. +func (s *SetIdentityFeedbackForwardingEnabledInput) SetIdentity(v string) *SetIdentityFeedbackForwardingEnabledInput { + s.Identity = &v + return s +} + +// An empty element returned on a successful request. +type SetIdentityFeedbackForwardingEnabledOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityFeedbackForwardingEnabledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityFeedbackForwardingEnabledOutput) GoString() string { + return s.String() +} + +// Represents a request to set whether Amazon SES includes the original email +// headers in the Amazon SNS notifications of a specified type. For information +// about notifications, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications-via-sns.html). +type SetIdentityHeadersInNotificationsEnabledInput struct { + _ struct{} `type:"structure"` + + // Sets whether Amazon SES includes the original email headers in Amazon SNS + // notifications of the specified notification type. A value of true specifies + // that Amazon SES will include headers in notifications, and a value of false + // specifies that Amazon SES will not include headers in notifications. + // + // This value can only be set when NotificationType is already set to use a + // particular Amazon SNS topic. + // + // Enabled is a required field + Enabled *bool `type:"boolean" required:"true"` + + // The identity for which to enable or disable headers in notifications. Examples: + // user@example.com, example.com. + // + // Identity is a required field + Identity *string `type:"string" required:"true"` + + // The notification type for which to enable or disable headers in notifications. + // + // NotificationType is a required field + NotificationType *string `type:"string" required:"true" enum:"NotificationType"` +} + +// String returns the string representation +func (s SetIdentityHeadersInNotificationsEnabledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityHeadersInNotificationsEnabledInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityHeadersInNotificationsEnabledInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityHeadersInNotificationsEnabledInput"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + if s.NotificationType == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnabled sets the Enabled field's value. +func (s *SetIdentityHeadersInNotificationsEnabledInput) SetEnabled(v bool) *SetIdentityHeadersInNotificationsEnabledInput { + s.Enabled = &v + return s +} + +// SetIdentity sets the Identity field's value. +func (s *SetIdentityHeadersInNotificationsEnabledInput) SetIdentity(v string) *SetIdentityHeadersInNotificationsEnabledInput { + s.Identity = &v + return s +} + +// SetNotificationType sets the NotificationType field's value. +func (s *SetIdentityHeadersInNotificationsEnabledInput) SetNotificationType(v string) *SetIdentityHeadersInNotificationsEnabledInput { + s.NotificationType = &v + return s +} + +// An empty element returned on a successful request. +type SetIdentityHeadersInNotificationsEnabledOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityHeadersInNotificationsEnabledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityHeadersInNotificationsEnabledOutput) GoString() string { + return s.String() +} + +// Represents a request to enable or disable the Amazon SES custom MAIL FROM +// domain setup for a verified identity. For information about using a custom +// MAIL FROM domain, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html). +type SetIdentityMailFromDomainInput struct { + _ struct{} `type:"structure"` + + // The action that you want Amazon SES to take if it cannot successfully read + // the required MX record when you send an email. If you choose UseDefaultValue, + // Amazon SES will use amazonses.com (or a subdomain of that) as the MAIL FROM + // domain. If you choose RejectMessage, Amazon SES will return a MailFromDomainNotVerified + // error and not send the email. + // + // The action specified in BehaviorOnMXFailure is taken when the custom MAIL + // FROM domain setup is in the Pending, Failed, and TemporaryFailure states. + BehaviorOnMXFailure *string `type:"string" enum:"BehaviorOnMXFailure"` + + // The verified identity for which you want to enable or disable the specified + // custom MAIL FROM domain. + // + // Identity is a required field + Identity *string `type:"string" required:"true"` + + // The custom MAIL FROM domain that you want the verified identity to use. The + // MAIL FROM domain must 1) be a subdomain of the verified identity, 2) not + // be used in a "From" address if the MAIL FROM domain is the destination of + // email feedback forwarding (for more information, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html)), + // and 3) not be used to receive emails. A value of null disables the custom + // MAIL FROM setting for the identity. + MailFromDomain *string `type:"string"` +} + +// String returns the string representation +func (s SetIdentityMailFromDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityMailFromDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityMailFromDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityMailFromDomainInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBehaviorOnMXFailure sets the BehaviorOnMXFailure field's value. +func (s *SetIdentityMailFromDomainInput) SetBehaviorOnMXFailure(v string) *SetIdentityMailFromDomainInput { + s.BehaviorOnMXFailure = &v + return s +} + +// SetIdentity sets the Identity field's value. +func (s *SetIdentityMailFromDomainInput) SetIdentity(v string) *SetIdentityMailFromDomainInput { + s.Identity = &v + return s +} + +// SetMailFromDomain sets the MailFromDomain field's value. +func (s *SetIdentityMailFromDomainInput) SetMailFromDomain(v string) *SetIdentityMailFromDomainInput { + s.MailFromDomain = &v + return s +} + +// An empty element returned on a successful request. +type SetIdentityMailFromDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityMailFromDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityMailFromDomainOutput) GoString() string { + return s.String() +} + +// Represents a request to specify the Amazon SNS topic to which Amazon SES +// will publish bounce, complaint, or delivery notifications for emails sent +// with that identity as the Source. For information about Amazon SES notifications, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications-via-sns.html). +type SetIdentityNotificationTopicInput struct { + _ struct{} `type:"structure"` + + // The identity (email address or domain) that you want to set the Amazon SNS + // topic for. + // + // You can only specify a verified identity for this parameter. + // + // You can specify an identity by using its name or by using its Amazon Resource + // Name (ARN). The following examples are all valid identities: sender@example.com, + // example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // Identity is a required field + Identity *string `type:"string" required:"true"` + + // The type of notifications that will be published to the specified Amazon + // SNS topic. + // + // NotificationType is a required field + NotificationType *string `type:"string" required:"true" enum:"NotificationType"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic. If the parameter + // is omitted from the request or a null value is passed, SnsTopic is cleared + // and publishing is disabled. + SnsTopic *string `type:"string"` +} + +// String returns the string representation +func (s SetIdentityNotificationTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityNotificationTopicInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityNotificationTopicInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityNotificationTopicInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + if s.NotificationType == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIdentity sets the Identity field's value. +func (s *SetIdentityNotificationTopicInput) SetIdentity(v string) *SetIdentityNotificationTopicInput { + s.Identity = &v + return s +} + +// SetNotificationType sets the NotificationType field's value. +func (s *SetIdentityNotificationTopicInput) SetNotificationType(v string) *SetIdentityNotificationTopicInput { + s.NotificationType = &v + return s +} + +// SetSnsTopic sets the SnsTopic field's value. +func (s *SetIdentityNotificationTopicInput) SetSnsTopic(v string) *SetIdentityNotificationTopicInput { + s.SnsTopic = &v + return s +} + +// An empty element returned on a successful request. +type SetIdentityNotificationTopicOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityNotificationTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityNotificationTopicOutput) GoString() string { + return s.String() +} + +// Represents a request to set the position of a receipt rule in a receipt rule +// set. You use receipt rule sets to receive email with Amazon SES. For more +// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type SetReceiptRulePositionInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule after which to place the specified receipt rule. + After *string `type:"string"` + + // The name of the receipt rule to reposition. + // + // RuleName is a required field + RuleName *string `type:"string" required:"true"` + + // The name of the receipt rule set that contains the receipt rule to reposition. + // + // RuleSetName is a required field + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetReceiptRulePositionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetReceiptRulePositionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetReceiptRulePositionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetReceiptRulePositionInput"} + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAfter sets the After field's value. +func (s *SetReceiptRulePositionInput) SetAfter(v string) *SetReceiptRulePositionInput { + s.After = &v + return s +} + +// SetRuleName sets the RuleName field's value. +func (s *SetReceiptRulePositionInput) SetRuleName(v string) *SetReceiptRulePositionInput { + s.RuleName = &v + return s +} + +// SetRuleSetName sets the RuleSetName field's value. +func (s *SetReceiptRulePositionInput) SetRuleSetName(v string) *SetReceiptRulePositionInput { + s.RuleSetName = &v + return s +} + +// An empty element returned on a successful request. +type SetReceiptRulePositionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetReceiptRulePositionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetReceiptRulePositionOutput) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action terminates the evaluation of +// the receipt rule set and, optionally, publishes a notification to Amazon +// Simple Notification Service (Amazon SNS). +// +// For information about setting a stop action in a receipt rule, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-stop.html). +type StopAction struct { + _ struct{} `type:"structure"` + + // The name of the RuleSet that is being stopped. + // + // Scope is a required field + Scope *string `type:"string" required:"true" enum:"StopScope"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the + // stop action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s StopAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopAction"} + if s.Scope == nil { + invalidParams.Add(request.NewErrParamRequired("Scope")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetScope sets the Scope field's value. +func (s *StopAction) SetScope(v string) *StopAction { + s.Scope = &v + return s +} + +// SetTopicArn sets the TopicArn field's value. +func (s *StopAction) SetTopicArn(v string) *StopAction { + s.TopicArn = &v + return s +} + +// The content of the email, composed of a subject line, an HTML part, and a +// text-only part. +type Template struct { + _ struct{} `type:"structure"` + + // The HTML body of the email. + HtmlPart *string `type:"string"` + + // The subject line of the email. + SubjectPart *string `type:"string"` + + // The name of the template. You will refer to this name when you send email + // using the SendTemplatedEmail or SendBulkTemplatedEmail operations. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` + + // The email body that will be visible to recipients whose email clients do + // not display HTML. + TextPart *string `type:"string"` +} + +// String returns the string representation +func (s Template) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Template) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Template) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Template"} + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHtmlPart sets the HtmlPart field's value. +func (s *Template) SetHtmlPart(v string) *Template { + s.HtmlPart = &v + return s +} + +// SetSubjectPart sets the SubjectPart field's value. +func (s *Template) SetSubjectPart(v string) *Template { + s.SubjectPart = &v + return s +} + +// SetTemplateName sets the TemplateName field's value. +func (s *Template) SetTemplateName(v string) *Template { + s.TemplateName = &v + return s +} + +// SetTextPart sets the TextPart field's value. +func (s *Template) SetTextPart(v string) *Template { + s.TextPart = &v + return s +} + +// Contains information about an email template. +type TemplateMetadata struct { + _ struct{} `type:"structure"` + + // The time and date the template was created. + CreatedTimestamp *time.Time `type:"timestamp"` + + // The name of the template. + Name *string `type:"string"` +} + +// String returns the string representation +func (s TemplateMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TemplateMetadata) GoString() string { + return s.String() +} + +// SetCreatedTimestamp sets the CreatedTimestamp field's value. +func (s *TemplateMetadata) SetCreatedTimestamp(v time.Time) *TemplateMetadata { + s.CreatedTimestamp = &v + return s +} + +// SetName sets the Name field's value. +func (s *TemplateMetadata) SetName(v string) *TemplateMetadata { + s.Name = &v + return s +} + +type TestRenderTemplateInput struct { + _ struct{} `type:"structure"` + + // A list of replacement values to apply to the template. This parameter is + // a JSON object, typically consisting of key-value pairs in which the keys + // correspond to replacement tags in the email template. + // + // TemplateData is a required field + TemplateData *string `type:"string" required:"true"` + + // The name of the template that you want to render. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s TestRenderTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestRenderTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TestRenderTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TestRenderTemplateInput"} + if s.TemplateData == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateData")) + } + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTemplateData sets the TemplateData field's value. +func (s *TestRenderTemplateInput) SetTemplateData(v string) *TestRenderTemplateInput { + s.TemplateData = &v + return s +} + +// SetTemplateName sets the TemplateName field's value. +func (s *TestRenderTemplateInput) SetTemplateName(v string) *TestRenderTemplateInput { + s.TemplateName = &v + return s +} + +type TestRenderTemplateOutput struct { + _ struct{} `type:"structure"` + + // The complete MIME message rendered by applying the data in the TemplateData + // parameter to the template specified in the TemplateName parameter. + RenderedTemplate *string `type:"string"` +} + +// String returns the string representation +func (s TestRenderTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestRenderTemplateOutput) GoString() string { + return s.String() +} + +// SetRenderedTemplate sets the RenderedTemplate field's value. +func (s *TestRenderTemplateOutput) SetRenderedTemplate(v string) *TestRenderTemplateOutput { + s.RenderedTemplate = &v + return s +} + +// A domain that is used to redirect email recipients to an Amazon SES-operated +// domain. This domain captures open and click events generated by Amazon SES +// emails. +// +// For more information, see Configuring Custom Domains to Handle Open and Click +// Tracking (ses/latest/DeveloperGuide/configure-custom-open-click-domains.html) +// in the Amazon SES Developer Guide. +type TrackingOptions struct { + _ struct{} `type:"structure"` + + // The custom subdomain that will be used to redirect email recipients to the + // Amazon SES event tracking domain. + CustomRedirectDomain *string `type:"string"` +} + +// String returns the string representation +func (s TrackingOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrackingOptions) GoString() string { + return s.String() +} + +// SetCustomRedirectDomain sets the CustomRedirectDomain field's value. +func (s *TrackingOptions) SetCustomRedirectDomain(v string) *TrackingOptions { + s.CustomRedirectDomain = &v + return s +} + +// Represents a request to enable or disable the email sending capabilities +// for your entire Amazon SES account. +type UpdateAccountSendingEnabledInput struct { + _ struct{} `type:"structure"` + + // Describes whether email sending is enabled or disabled for your Amazon SES + // account in the current AWS Region. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpdateAccountSendingEnabledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccountSendingEnabledInput) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *UpdateAccountSendingEnabledInput) SetEnabled(v bool) *UpdateAccountSendingEnabledInput { + s.Enabled = &v + return s +} + +type UpdateAccountSendingEnabledOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAccountSendingEnabledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccountSendingEnabledOutput) GoString() string { + return s.String() +} + +// Represents a request to update the event destination of a configuration set. +// Configuration sets enable you to publish email sending events. For information +// about using configuration sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html). +type UpdateConfigurationSetEventDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that contains the event destination that + // you want to update. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `type:"string" required:"true"` + + // The event destination object that you want to apply to the specified configuration + // set. + // + // EventDestination is a required field + EventDestination *EventDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateConfigurationSetEventDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateConfigurationSetEventDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateConfigurationSetEventDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateConfigurationSetEventDestinationInput"} + if s.ConfigurationSetName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationSetName")) + } + if s.EventDestination == nil { + invalidParams.Add(request.NewErrParamRequired("EventDestination")) + } + if s.EventDestination != nil { + if err := s.EventDestination.Validate(); err != nil { + invalidParams.AddNested("EventDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *UpdateConfigurationSetEventDestinationInput) SetConfigurationSetName(v string) *UpdateConfigurationSetEventDestinationInput { + s.ConfigurationSetName = &v + return s +} + +// SetEventDestination sets the EventDestination field's value. +func (s *UpdateConfigurationSetEventDestinationInput) SetEventDestination(v *EventDestination) *UpdateConfigurationSetEventDestinationInput { + s.EventDestination = v + return s +} + +// An empty element returned on a successful request. +type UpdateConfigurationSetEventDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateConfigurationSetEventDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateConfigurationSetEventDestinationOutput) GoString() string { + return s.String() +} + +// Represents a request to modify the reputation metric publishing settings +// for a configuration set. +type UpdateConfigurationSetReputationMetricsEnabledInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that you want to update. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `type:"string" required:"true"` + + // Describes whether or not Amazon SES will publish reputation metrics for the + // configuration set, such as bounce and complaint rates, to Amazon CloudWatch. + // + // Enabled is a required field + Enabled *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s UpdateConfigurationSetReputationMetricsEnabledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateConfigurationSetReputationMetricsEnabledInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateConfigurationSetReputationMetricsEnabledInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateConfigurationSetReputationMetricsEnabledInput"} + if s.ConfigurationSetName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationSetName")) + } + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *UpdateConfigurationSetReputationMetricsEnabledInput) SetConfigurationSetName(v string) *UpdateConfigurationSetReputationMetricsEnabledInput { + s.ConfigurationSetName = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *UpdateConfigurationSetReputationMetricsEnabledInput) SetEnabled(v bool) *UpdateConfigurationSetReputationMetricsEnabledInput { + s.Enabled = &v + return s +} + +type UpdateConfigurationSetReputationMetricsEnabledOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateConfigurationSetReputationMetricsEnabledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateConfigurationSetReputationMetricsEnabledOutput) GoString() string { + return s.String() +} + +// Represents a request to enable or disable the email sending capabilities +// for a specific configuration set. +type UpdateConfigurationSetSendingEnabledInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that you want to update. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `type:"string" required:"true"` + + // Describes whether email sending is enabled or disabled for the configuration + // set. + // + // Enabled is a required field + Enabled *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s UpdateConfigurationSetSendingEnabledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateConfigurationSetSendingEnabledInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateConfigurationSetSendingEnabledInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateConfigurationSetSendingEnabledInput"} + if s.ConfigurationSetName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationSetName")) + } + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *UpdateConfigurationSetSendingEnabledInput) SetConfigurationSetName(v string) *UpdateConfigurationSetSendingEnabledInput { + s.ConfigurationSetName = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *UpdateConfigurationSetSendingEnabledInput) SetEnabled(v bool) *UpdateConfigurationSetSendingEnabledInput { + s.Enabled = &v + return s +} + +type UpdateConfigurationSetSendingEnabledOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateConfigurationSetSendingEnabledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateConfigurationSetSendingEnabledOutput) GoString() string { + return s.String() +} + +// Represents a request to update the tracking options for a configuration set. +type UpdateConfigurationSetTrackingOptionsInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set for which you want to update the custom + // tracking domain. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `type:"string" required:"true"` + + // A domain that is used to redirect email recipients to an Amazon SES-operated + // domain. This domain captures open and click events generated by Amazon SES + // emails. + // + // For more information, see Configuring Custom Domains to Handle Open and Click + // Tracking (ses/latest/DeveloperGuide/configure-custom-open-click-domains.html) + // in the Amazon SES Developer Guide. + // + // TrackingOptions is a required field + TrackingOptions *TrackingOptions `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateConfigurationSetTrackingOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateConfigurationSetTrackingOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateConfigurationSetTrackingOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateConfigurationSetTrackingOptionsInput"} + if s.ConfigurationSetName == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationSetName")) + } + if s.TrackingOptions == nil { + invalidParams.Add(request.NewErrParamRequired("TrackingOptions")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *UpdateConfigurationSetTrackingOptionsInput) SetConfigurationSetName(v string) *UpdateConfigurationSetTrackingOptionsInput { + s.ConfigurationSetName = &v + return s +} + +// SetTrackingOptions sets the TrackingOptions field's value. +func (s *UpdateConfigurationSetTrackingOptionsInput) SetTrackingOptions(v *TrackingOptions) *UpdateConfigurationSetTrackingOptionsInput { + s.TrackingOptions = v + return s +} + +// An empty element returned on a successful request. +type UpdateConfigurationSetTrackingOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateConfigurationSetTrackingOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateConfigurationSetTrackingOptionsOutput) GoString() string { + return s.String() +} + +// Represents a request to update an existing custom verification email template. +type UpdateCustomVerificationEmailTemplateInput struct { + _ struct{} `type:"structure"` + + // The URL that the recipient of the verification email is sent to if his or + // her address is not successfully verified. + FailureRedirectionURL *string `type:"string"` + + // The email address that the custom verification email is sent from. + FromEmailAddress *string `type:"string"` + + // The URL that the recipient of the verification email is sent to if his or + // her address is successfully verified. + SuccessRedirectionURL *string `type:"string"` + + // The content of the custom verification email. The total size of the email + // must be less than 10 MB. The message body may contain HTML, with some limitations. + // For more information, see Custom Verification Email Frequently Asked Questions + // (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/custom-verification-emails.html#custom-verification-emails-faq) + // in the Amazon SES Developer Guide. + TemplateContent *string `type:"string"` + + // The name of the custom verification email template that you want to update. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` + + // The subject line of the custom verification email. + TemplateSubject *string `type:"string"` +} + +// String returns the string representation +func (s UpdateCustomVerificationEmailTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCustomVerificationEmailTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateCustomVerificationEmailTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateCustomVerificationEmailTemplateInput"} + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFailureRedirectionURL sets the FailureRedirectionURL field's value. +func (s *UpdateCustomVerificationEmailTemplateInput) SetFailureRedirectionURL(v string) *UpdateCustomVerificationEmailTemplateInput { + s.FailureRedirectionURL = &v + return s +} + +// SetFromEmailAddress sets the FromEmailAddress field's value. +func (s *UpdateCustomVerificationEmailTemplateInput) SetFromEmailAddress(v string) *UpdateCustomVerificationEmailTemplateInput { + s.FromEmailAddress = &v + return s +} + +// SetSuccessRedirectionURL sets the SuccessRedirectionURL field's value. +func (s *UpdateCustomVerificationEmailTemplateInput) SetSuccessRedirectionURL(v string) *UpdateCustomVerificationEmailTemplateInput { + s.SuccessRedirectionURL = &v + return s +} + +// SetTemplateContent sets the TemplateContent field's value. +func (s *UpdateCustomVerificationEmailTemplateInput) SetTemplateContent(v string) *UpdateCustomVerificationEmailTemplateInput { + s.TemplateContent = &v + return s +} + +// SetTemplateName sets the TemplateName field's value. +func (s *UpdateCustomVerificationEmailTemplateInput) SetTemplateName(v string) *UpdateCustomVerificationEmailTemplateInput { + s.TemplateName = &v + return s +} + +// SetTemplateSubject sets the TemplateSubject field's value. +func (s *UpdateCustomVerificationEmailTemplateInput) SetTemplateSubject(v string) *UpdateCustomVerificationEmailTemplateInput { + s.TemplateSubject = &v + return s +} + +type UpdateCustomVerificationEmailTemplateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateCustomVerificationEmailTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCustomVerificationEmailTemplateOutput) GoString() string { + return s.String() +} + +// Represents a request to update a receipt rule. You use receipt rules to receive +// email with Amazon SES. For more information, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). +type UpdateReceiptRuleInput struct { + _ struct{} `type:"structure"` + + // A data structure that contains the updated receipt rule information. + // + // Rule is a required field + Rule *ReceiptRule `type:"structure" required:"true"` + + // The name of the receipt rule set that the receipt rule belongs to. + // + // RuleSetName is a required field + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateReceiptRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateReceiptRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateReceiptRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateReceiptRuleInput"} + if s.Rule == nil { + invalidParams.Add(request.NewErrParamRequired("Rule")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + if s.Rule != nil { + if err := s.Rule.Validate(); err != nil { + invalidParams.AddNested("Rule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRule sets the Rule field's value. +func (s *UpdateReceiptRuleInput) SetRule(v *ReceiptRule) *UpdateReceiptRuleInput { + s.Rule = v + return s +} + +// SetRuleSetName sets the RuleSetName field's value. +func (s *UpdateReceiptRuleInput) SetRuleSetName(v string) *UpdateReceiptRuleInput { + s.RuleSetName = &v + return s +} + +// An empty element returned on a successful request. +type UpdateReceiptRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateReceiptRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateReceiptRuleOutput) GoString() string { + return s.String() +} + +type UpdateTemplateInput struct { + _ struct{} `type:"structure"` + + // The content of the email, composed of a subject line, an HTML part, and a + // text-only part. + // + // Template is a required field + Template *Template `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTemplateInput"} + if s.Template == nil { + invalidParams.Add(request.NewErrParamRequired("Template")) + } + if s.Template != nil { + if err := s.Template.Validate(); err != nil { + invalidParams.AddNested("Template", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTemplate sets the Template field's value. +func (s *UpdateTemplateInput) SetTemplate(v *Template) *UpdateTemplateInput { + s.Template = v + return s +} + +type UpdateTemplateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTemplateOutput) GoString() string { + return s.String() +} + +// Represents a request to generate the CNAME records needed to set up Easy +// DKIM with Amazon SES. For more information about setting up Easy DKIM, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). +type VerifyDomainDkimInput struct { + _ struct{} `type:"structure"` + + // The name of the domain to be verified for Easy DKIM signing. + // + // Domain is a required field + Domain *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyDomainDkimInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyDomainDkimInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VerifyDomainDkimInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VerifyDomainDkimInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDomain sets the Domain field's value. +func (s *VerifyDomainDkimInput) SetDomain(v string) *VerifyDomainDkimInput { + s.Domain = &v + return s +} + +// Returns CNAME records that you must publish to the DNS server of your domain +// to set up Easy DKIM with Amazon SES. +type VerifyDomainDkimOutput struct { + _ struct{} `type:"structure"` + + // A set of character strings that represent the domain's identity. If the identity + // is an email address, the tokens represent the domain of that address. + // + // Using these tokens, you will need to create DNS CNAME records that point + // to DKIM public keys hosted by Amazon SES. Amazon Web Services will eventually + // detect that you have updated your DNS records; this detection process may + // take up to 72 hours. Upon successful detection, Amazon SES will be able to + // DKIM-sign emails originating from that domain. + // + // For more information about creating DNS records using DKIM tokens, go to + // the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). + // + // DkimTokens is a required field + DkimTokens []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s VerifyDomainDkimOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyDomainDkimOutput) GoString() string { + return s.String() +} + +// SetDkimTokens sets the DkimTokens field's value. +func (s *VerifyDomainDkimOutput) SetDkimTokens(v []*string) *VerifyDomainDkimOutput { + s.DkimTokens = v + return s +} + +// Represents a request to begin Amazon SES domain verification and to generate +// the TXT records that you must publish to the DNS server of your domain to +// complete the verification. For information about domain verification, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-domains.html). +type VerifyDomainIdentityInput struct { + _ struct{} `type:"structure"` + + // The domain to be verified. + // + // Domain is a required field + Domain *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyDomainIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyDomainIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VerifyDomainIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VerifyDomainIdentityInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDomain sets the Domain field's value. +func (s *VerifyDomainIdentityInput) SetDomain(v string) *VerifyDomainIdentityInput { + s.Domain = &v + return s +} + +// Returns a TXT record that you must publish to the DNS server of your domain +// to complete domain verification with Amazon SES. +type VerifyDomainIdentityOutput struct { + _ struct{} `type:"structure"` + + // A TXT record that you must place in the DNS settings of the domain to complete + // domain verification with Amazon SES. + // + // As Amazon SES searches for the TXT record, the domain's verification status + // is "Pending". When Amazon SES detects the record, the domain's verification + // status changes to "Success". If Amazon SES is unable to detect the record + // within 72 hours, the domain's verification status changes to "Failed." In + // that case, if you still want to verify the domain, you must restart the verification + // process from the beginning. + // + // VerificationToken is a required field + VerificationToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyDomainIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyDomainIdentityOutput) GoString() string { + return s.String() +} + +// SetVerificationToken sets the VerificationToken field's value. +func (s *VerifyDomainIdentityOutput) SetVerificationToken(v string) *VerifyDomainIdentityOutput { + s.VerificationToken = &v + return s +} + +// Represents a request to begin email address verification with Amazon SES. +// For information about email address verification, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-email-addresses.html). +type VerifyEmailAddressInput struct { + _ struct{} `type:"structure"` + + // The email address to be verified. + // + // EmailAddress is a required field + EmailAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyEmailAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyEmailAddressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VerifyEmailAddressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VerifyEmailAddressInput"} + if s.EmailAddress == nil { + invalidParams.Add(request.NewErrParamRequired("EmailAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *VerifyEmailAddressInput) SetEmailAddress(v string) *VerifyEmailAddressInput { + s.EmailAddress = &v + return s +} + +type VerifyEmailAddressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s VerifyEmailAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyEmailAddressOutput) GoString() string { + return s.String() +} + +// Represents a request to begin email address verification with Amazon SES. +// For information about email address verification, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-email-addresses.html). +type VerifyEmailIdentityInput struct { + _ struct{} `type:"structure"` + + // The email address to be verified. + // + // EmailAddress is a required field + EmailAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyEmailIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyEmailIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VerifyEmailIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VerifyEmailIdentityInput"} + if s.EmailAddress == nil { + invalidParams.Add(request.NewErrParamRequired("EmailAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *VerifyEmailIdentityInput) SetEmailAddress(v string) *VerifyEmailIdentityInput { + s.EmailAddress = &v + return s +} + +// An empty element returned on a successful request. +type VerifyEmailIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s VerifyEmailIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyEmailIdentityOutput) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action calls Amazon WorkMail and, optionally, +// publishes a notification to Amazon Simple Notification Service (Amazon SNS). +// You will typically not use this action directly because Amazon WorkMail adds +// the rule automatically during its setup procedure. +// +// For information using a receipt rule to call Amazon WorkMail, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-workmail.html). +type WorkmailAction struct { + _ struct{} `type:"structure"` + + // The ARN of the Amazon WorkMail organization. An example of an Amazon WorkMail + // organization ARN is arn:aws:workmail:us-west-2:123456789012:organization/m-68755160c4cb4e29a2b2f8fb58f359d7. + // For information about Amazon WorkMail organizations, see the Amazon WorkMail + // Administrator Guide (http://docs.aws.amazon.com/workmail/latest/adminguide/organizations_overview.html). + // + // OrganizationArn is a required field + OrganizationArn *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the + // WorkMail action is called. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s WorkmailAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkmailAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WorkmailAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WorkmailAction"} + if s.OrganizationArn == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOrganizationArn sets the OrganizationArn field's value. +func (s *WorkmailAction) SetOrganizationArn(v string) *WorkmailAction { + s.OrganizationArn = &v + return s +} + +// SetTopicArn sets the TopicArn field's value. +func (s *WorkmailAction) SetTopicArn(v string) *WorkmailAction { + s.TopicArn = &v + return s +} + +const ( + // BehaviorOnMXFailureUseDefaultValue is a BehaviorOnMXFailure enum value + BehaviorOnMXFailureUseDefaultValue = "UseDefaultValue" + + // BehaviorOnMXFailureRejectMessage is a BehaviorOnMXFailure enum value + BehaviorOnMXFailureRejectMessage = "RejectMessage" +) + +const ( + // BounceTypeDoesNotExist is a BounceType enum value + BounceTypeDoesNotExist = "DoesNotExist" + + // BounceTypeMessageTooLarge is a BounceType enum value + BounceTypeMessageTooLarge = "MessageTooLarge" + + // BounceTypeExceededQuota is a BounceType enum value + BounceTypeExceededQuota = "ExceededQuota" + + // BounceTypeContentRejected is a BounceType enum value + BounceTypeContentRejected = "ContentRejected" + + // BounceTypeUndefined is a BounceType enum value + BounceTypeUndefined = "Undefined" + + // BounceTypeTemporaryFailure is a BounceType enum value + BounceTypeTemporaryFailure = "TemporaryFailure" +) + +const ( + // BulkEmailStatusSuccess is a BulkEmailStatus enum value + BulkEmailStatusSuccess = "Success" + + // BulkEmailStatusMessageRejected is a BulkEmailStatus enum value + BulkEmailStatusMessageRejected = "MessageRejected" + + // BulkEmailStatusMailFromDomainNotVerified is a BulkEmailStatus enum value + BulkEmailStatusMailFromDomainNotVerified = "MailFromDomainNotVerified" + + // BulkEmailStatusConfigurationSetDoesNotExist is a BulkEmailStatus enum value + BulkEmailStatusConfigurationSetDoesNotExist = "ConfigurationSetDoesNotExist" + + // BulkEmailStatusTemplateDoesNotExist is a BulkEmailStatus enum value + BulkEmailStatusTemplateDoesNotExist = "TemplateDoesNotExist" + + // BulkEmailStatusAccountSuspended is a BulkEmailStatus enum value + BulkEmailStatusAccountSuspended = "AccountSuspended" + + // BulkEmailStatusAccountThrottled is a BulkEmailStatus enum value + BulkEmailStatusAccountThrottled = "AccountThrottled" + + // BulkEmailStatusAccountDailyQuotaExceeded is a BulkEmailStatus enum value + BulkEmailStatusAccountDailyQuotaExceeded = "AccountDailyQuotaExceeded" + + // BulkEmailStatusInvalidSendingPoolName is a BulkEmailStatus enum value + BulkEmailStatusInvalidSendingPoolName = "InvalidSendingPoolName" + + // BulkEmailStatusAccountSendingPaused is a BulkEmailStatus enum value + BulkEmailStatusAccountSendingPaused = "AccountSendingPaused" + + // BulkEmailStatusConfigurationSetSendingPaused is a BulkEmailStatus enum value + BulkEmailStatusConfigurationSetSendingPaused = "ConfigurationSetSendingPaused" + + // BulkEmailStatusInvalidParameterValue is a BulkEmailStatus enum value + BulkEmailStatusInvalidParameterValue = "InvalidParameterValue" + + // BulkEmailStatusTransientFailure is a BulkEmailStatus enum value + BulkEmailStatusTransientFailure = "TransientFailure" + + // BulkEmailStatusFailed is a BulkEmailStatus enum value + BulkEmailStatusFailed = "Failed" +) + +const ( + // ConfigurationSetAttributeEventDestinations is a ConfigurationSetAttribute enum value + ConfigurationSetAttributeEventDestinations = "eventDestinations" + + // ConfigurationSetAttributeTrackingOptions is a ConfigurationSetAttribute enum value + ConfigurationSetAttributeTrackingOptions = "trackingOptions" + + // ConfigurationSetAttributeReputationOptions is a ConfigurationSetAttribute enum value + ConfigurationSetAttributeReputationOptions = "reputationOptions" +) + +const ( + // CustomMailFromStatusPending is a CustomMailFromStatus enum value + CustomMailFromStatusPending = "Pending" + + // CustomMailFromStatusSuccess is a CustomMailFromStatus enum value + CustomMailFromStatusSuccess = "Success" + + // CustomMailFromStatusFailed is a CustomMailFromStatus enum value + CustomMailFromStatusFailed = "Failed" + + // CustomMailFromStatusTemporaryFailure is a CustomMailFromStatus enum value + CustomMailFromStatusTemporaryFailure = "TemporaryFailure" +) + +const ( + // DimensionValueSourceMessageTag is a DimensionValueSource enum value + DimensionValueSourceMessageTag = "messageTag" + + // DimensionValueSourceEmailHeader is a DimensionValueSource enum value + DimensionValueSourceEmailHeader = "emailHeader" + + // DimensionValueSourceLinkTag is a DimensionValueSource enum value + DimensionValueSourceLinkTag = "linkTag" +) + +const ( + // DsnActionFailed is a DsnAction enum value + DsnActionFailed = "failed" + + // DsnActionDelayed is a DsnAction enum value + DsnActionDelayed = "delayed" + + // DsnActionDelivered is a DsnAction enum value + DsnActionDelivered = "delivered" + + // DsnActionRelayed is a DsnAction enum value + DsnActionRelayed = "relayed" + + // DsnActionExpanded is a DsnAction enum value + DsnActionExpanded = "expanded" +) + +const ( + // EventTypeSend is a EventType enum value + EventTypeSend = "send" + + // EventTypeReject is a EventType enum value + EventTypeReject = "reject" + + // EventTypeBounce is a EventType enum value + EventTypeBounce = "bounce" + + // EventTypeComplaint is a EventType enum value + EventTypeComplaint = "complaint" + + // EventTypeDelivery is a EventType enum value + EventTypeDelivery = "delivery" + + // EventTypeOpen is a EventType enum value + EventTypeOpen = "open" + + // EventTypeClick is a EventType enum value + EventTypeClick = "click" + + // EventTypeRenderingFailure is a EventType enum value + EventTypeRenderingFailure = "renderingFailure" +) + +const ( + // IdentityTypeEmailAddress is a IdentityType enum value + IdentityTypeEmailAddress = "EmailAddress" + + // IdentityTypeDomain is a IdentityType enum value + IdentityTypeDomain = "Domain" +) + +const ( + // InvocationTypeEvent is a InvocationType enum value + InvocationTypeEvent = "Event" + + // InvocationTypeRequestResponse is a InvocationType enum value + InvocationTypeRequestResponse = "RequestResponse" +) + +const ( + // NotificationTypeBounce is a NotificationType enum value + NotificationTypeBounce = "Bounce" + + // NotificationTypeComplaint is a NotificationType enum value + NotificationTypeComplaint = "Complaint" + + // NotificationTypeDelivery is a NotificationType enum value + NotificationTypeDelivery = "Delivery" +) + +const ( + // ReceiptFilterPolicyBlock is a ReceiptFilterPolicy enum value + ReceiptFilterPolicyBlock = "Block" + + // ReceiptFilterPolicyAllow is a ReceiptFilterPolicy enum value + ReceiptFilterPolicyAllow = "Allow" +) + +const ( + // SNSActionEncodingUtf8 is a SNSActionEncoding enum value + SNSActionEncodingUtf8 = "UTF-8" + + // SNSActionEncodingBase64 is a SNSActionEncoding enum value + SNSActionEncodingBase64 = "Base64" +) + +const ( + // StopScopeRuleSet is a StopScope enum value + StopScopeRuleSet = "RuleSet" +) + +const ( + // TlsPolicyRequire is a TlsPolicy enum value + TlsPolicyRequire = "Require" + + // TlsPolicyOptional is a TlsPolicy enum value + TlsPolicyOptional = "Optional" +) + +const ( + // VerificationStatusPending is a VerificationStatus enum value + VerificationStatusPending = "Pending" + + // VerificationStatusSuccess is a VerificationStatus enum value + VerificationStatusSuccess = "Success" + + // VerificationStatusFailed is a VerificationStatus enum value + VerificationStatusFailed = "Failed" + + // VerificationStatusTemporaryFailure is a VerificationStatus enum value + VerificationStatusTemporaryFailure = "TemporaryFailure" + + // VerificationStatusNotStarted is a VerificationStatus enum value + VerificationStatusNotStarted = "NotStarted" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ses/doc.go new file mode 100644 index 000000000..6ba270a75 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/doc.go @@ -0,0 +1,35 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ses provides the client and types for making API +// requests to Amazon Simple Email Service. +// +// This document contains reference information for the Amazon Simple Email +// Service (https://aws.amazon.com/ses/) (Amazon SES) API, version 2010-12-01. +// This document is best used in conjunction with the Amazon SES Developer Guide +// (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/Welcome.html). +// +// For a list of Amazon SES endpoints to use in service requests, see Regions +// and Amazon SES (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/regions.html) +// in the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/Welcome.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01 for more information on this service. +// +// See ses package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ses/ +// +// Using the Client +// +// To contact Amazon Simple Email Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Simple Email Service client SES for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ses/#New +package ses diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ses/errors.go new file mode 100644 index 000000000..dd94d6351 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/errors.go @@ -0,0 +1,244 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ses + +const ( + + // ErrCodeAccountSendingPausedException for service response error code + // "AccountSendingPausedException". + // + // Indicates that email sending is disabled for your entire Amazon SES account. + // + // You can enable or disable email sending for your Amazon SES account using + // UpdateAccountSendingEnabled. + ErrCodeAccountSendingPausedException = "AccountSendingPausedException" + + // ErrCodeAlreadyExistsException for service response error code + // "AlreadyExists". + // + // Indicates that a resource could not be created because of a naming conflict. + ErrCodeAlreadyExistsException = "AlreadyExists" + + // ErrCodeCannotDeleteException for service response error code + // "CannotDelete". + // + // Indicates that the delete operation could not be completed. + ErrCodeCannotDeleteException = "CannotDelete" + + // ErrCodeConfigurationSetAlreadyExistsException for service response error code + // "ConfigurationSetAlreadyExists". + // + // Indicates that the configuration set could not be created because of a naming + // conflict. + ErrCodeConfigurationSetAlreadyExistsException = "ConfigurationSetAlreadyExists" + + // ErrCodeConfigurationSetDoesNotExistException for service response error code + // "ConfigurationSetDoesNotExist". + // + // Indicates that the configuration set does not exist. + ErrCodeConfigurationSetDoesNotExistException = "ConfigurationSetDoesNotExist" + + // ErrCodeConfigurationSetSendingPausedException for service response error code + // "ConfigurationSetSendingPausedException". + // + // Indicates that email sending is disabled for the configuration set. + // + // You can enable or disable email sending for a configuration set using UpdateConfigurationSetSendingEnabled. + ErrCodeConfigurationSetSendingPausedException = "ConfigurationSetSendingPausedException" + + // ErrCodeCustomVerificationEmailInvalidContentException for service response error code + // "CustomVerificationEmailInvalidContent". + // + // Indicates that custom verification email template provided content is invalid. + ErrCodeCustomVerificationEmailInvalidContentException = "CustomVerificationEmailInvalidContent" + + // ErrCodeCustomVerificationEmailTemplateAlreadyExistsException for service response error code + // "CustomVerificationEmailTemplateAlreadyExists". + // + // Indicates that a custom verification email template with the name you specified + // already exists. + ErrCodeCustomVerificationEmailTemplateAlreadyExistsException = "CustomVerificationEmailTemplateAlreadyExists" + + // ErrCodeCustomVerificationEmailTemplateDoesNotExistException for service response error code + // "CustomVerificationEmailTemplateDoesNotExist". + // + // Indicates that a custom verification email template with the name you specified + // does not exist. + ErrCodeCustomVerificationEmailTemplateDoesNotExistException = "CustomVerificationEmailTemplateDoesNotExist" + + // ErrCodeEventDestinationAlreadyExistsException for service response error code + // "EventDestinationAlreadyExists". + // + // Indicates that the event destination could not be created because of a naming + // conflict. + ErrCodeEventDestinationAlreadyExistsException = "EventDestinationAlreadyExists" + + // ErrCodeEventDestinationDoesNotExistException for service response error code + // "EventDestinationDoesNotExist". + // + // Indicates that the event destination does not exist. + ErrCodeEventDestinationDoesNotExistException = "EventDestinationDoesNotExist" + + // ErrCodeFromEmailAddressNotVerifiedException for service response error code + // "FromEmailAddressNotVerified". + // + // Indicates that the sender address specified for a custom verification email + // is not verified, and is therefore not eligible to send the custom verification + // email. + ErrCodeFromEmailAddressNotVerifiedException = "FromEmailAddressNotVerified" + + // ErrCodeInvalidCloudWatchDestinationException for service response error code + // "InvalidCloudWatchDestination". + // + // Indicates that the Amazon CloudWatch destination is invalid. See the error + // message for details. + ErrCodeInvalidCloudWatchDestinationException = "InvalidCloudWatchDestination" + + // ErrCodeInvalidConfigurationSetException for service response error code + // "InvalidConfigurationSet". + // + // Indicates that the configuration set is invalid. See the error message for + // details. + ErrCodeInvalidConfigurationSetException = "InvalidConfigurationSet" + + // ErrCodeInvalidFirehoseDestinationException for service response error code + // "InvalidFirehoseDestination". + // + // Indicates that the Amazon Kinesis Firehose destination is invalid. See the + // error message for details. + ErrCodeInvalidFirehoseDestinationException = "InvalidFirehoseDestination" + + // ErrCodeInvalidLambdaFunctionException for service response error code + // "InvalidLambdaFunction". + // + // Indicates that the provided AWS Lambda function is invalid, or that Amazon + // SES could not execute the provided function, possibly due to permissions + // issues. For information about giving permissions, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). + ErrCodeInvalidLambdaFunctionException = "InvalidLambdaFunction" + + // ErrCodeInvalidPolicyException for service response error code + // "InvalidPolicy". + // + // Indicates that the provided policy is invalid. Check the error stack for + // more information about what caused the error. + ErrCodeInvalidPolicyException = "InvalidPolicy" + + // ErrCodeInvalidRenderingParameterException for service response error code + // "InvalidRenderingParameter". + // + // Indicates that one or more of the replacement values you provided is invalid. + // This error may occur when the TemplateData object contains invalid JSON. + ErrCodeInvalidRenderingParameterException = "InvalidRenderingParameter" + + // ErrCodeInvalidS3ConfigurationException for service response error code + // "InvalidS3Configuration". + // + // Indicates that the provided Amazon S3 bucket or AWS KMS encryption key is + // invalid, or that Amazon SES could not publish to the bucket, possibly due + // to permissions issues. For information about giving permissions, see the + // Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). + ErrCodeInvalidS3ConfigurationException = "InvalidS3Configuration" + + // ErrCodeInvalidSNSDestinationException for service response error code + // "InvalidSNSDestination". + // + // Indicates that the Amazon Simple Notification Service (Amazon SNS) destination + // is invalid. See the error message for details. + ErrCodeInvalidSNSDestinationException = "InvalidSNSDestination" + + // ErrCodeInvalidSnsTopicException for service response error code + // "InvalidSnsTopic". + // + // Indicates that the provided Amazon SNS topic is invalid, or that Amazon SES + // could not publish to the topic, possibly due to permissions issues. For information + // about giving permissions, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). + ErrCodeInvalidSnsTopicException = "InvalidSnsTopic" + + // ErrCodeInvalidTemplateException for service response error code + // "InvalidTemplate". + // + // Indicates that the template that you specified could not be rendered. This + // issue may occur when a template refers to a partial that does not exist. + ErrCodeInvalidTemplateException = "InvalidTemplate" + + // ErrCodeInvalidTrackingOptionsException for service response error code + // "InvalidTrackingOptions". + // + // Indicates that the custom domain to be used for open and click tracking redirects + // is invalid. This error appears most often in the following situations: + // + // * When the tracking domain you specified is not verified in Amazon SES. + // + // * When the tracking domain you specified is not a valid domain or subdomain. + ErrCodeInvalidTrackingOptionsException = "InvalidTrackingOptions" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceeded". + // + // Indicates that a resource could not be created because of service limits. + // For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). + ErrCodeLimitExceededException = "LimitExceeded" + + // ErrCodeMailFromDomainNotVerifiedException for service response error code + // "MailFromDomainNotVerifiedException". + // + // Indicates that the message could not be sent because Amazon SES could not + // read the MX record required to use the specified MAIL FROM domain. For information + // about editing the custom MAIL FROM domain settings for an identity, see the + // Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). + ErrCodeMailFromDomainNotVerifiedException = "MailFromDomainNotVerifiedException" + + // ErrCodeMessageRejected for service response error code + // "MessageRejected". + // + // Indicates that the action failed, and the message could not be sent. Check + // the error stack for more information about what caused the error. + ErrCodeMessageRejected = "MessageRejected" + + // ErrCodeMissingRenderingAttributeException for service response error code + // "MissingRenderingAttribute". + // + // Indicates that one or more of the replacement values for the specified template + // was not specified. Ensure that the TemplateData object contains references + // to all of the replacement tags in the specified template. + ErrCodeMissingRenderingAttributeException = "MissingRenderingAttribute" + + // ErrCodeProductionAccessNotGrantedException for service response error code + // "ProductionAccessNotGranted". + // + // Indicates that the account has not been granted production access. + ErrCodeProductionAccessNotGrantedException = "ProductionAccessNotGranted" + + // ErrCodeRuleDoesNotExistException for service response error code + // "RuleDoesNotExist". + // + // Indicates that the provided receipt rule does not exist. + ErrCodeRuleDoesNotExistException = "RuleDoesNotExist" + + // ErrCodeRuleSetDoesNotExistException for service response error code + // "RuleSetDoesNotExist". + // + // Indicates that the provided receipt rule set does not exist. + ErrCodeRuleSetDoesNotExistException = "RuleSetDoesNotExist" + + // ErrCodeTemplateDoesNotExistException for service response error code + // "TemplateDoesNotExist". + // + // Indicates that the Template object you specified does not exist in your Amazon + // SES account. + ErrCodeTemplateDoesNotExistException = "TemplateDoesNotExist" + + // ErrCodeTrackingOptionsAlreadyExistsException for service response error code + // "TrackingOptionsAlreadyExistsException". + // + // Indicates that the configuration set you specified already contains a TrackingOptions + // object. + ErrCodeTrackingOptionsAlreadyExistsException = "TrackingOptionsAlreadyExistsException" + + // ErrCodeTrackingOptionsDoesNotExistException for service response error code + // "TrackingOptionsDoesNotExistException". + // + // Indicates that the TrackingOptions object you specified does not exist. + ErrCodeTrackingOptionsDoesNotExistException = "TrackingOptionsDoesNotExistException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/service.go b/vendor/github.com/aws/aws-sdk-go/service/ses/service.go new file mode 100644 index 000000000..0e33b771f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/service.go @@ -0,0 +1,98 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ses + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// SES provides the API operation methods for making requests to +// Amazon Simple Email Service. See this package's package overview docs +// for details on the service. +// +// SES methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SES struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "email" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "SES" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the SES client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SES client from just a session. +// svc := ses.New(mySession) +// +// // Create a SES client with additional configuration +// svc := ses.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SES { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "ses" + } + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SES { + svc := &SES{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-12-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SES operation and runs any +// custom request initialization. +func (c *SES) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/ses/waiters.go new file mode 100644 index 000000000..6a78874fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/waiters.go @@ -0,0 +1,56 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ses + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilIdentityExists uses the Amazon SES API operation +// GetIdentityVerificationAttributes to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *SES) WaitUntilIdentityExists(input *GetIdentityVerificationAttributesInput) error { + return c.WaitUntilIdentityExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilIdentityExistsWithContext is an extended version of WaitUntilIdentityExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) WaitUntilIdentityExistsWithContext(ctx aws.Context, input *GetIdentityVerificationAttributesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilIdentityExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(3 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "VerificationAttributes.*.VerificationStatus", + Expected: "Success", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *GetIdentityVerificationAttributesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetIdentityVerificationAttributesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 000000000..811308964 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,2401 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + output = &AssumeRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRole API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) that you can use to access +// AWS resources that you might not normally have access to. Typically, you +// use AssumeRole for cross-account access or federation. For a comparison of +// AssumeRole with the other APIs that produce temporary credentials, see Requesting +// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Important: You cannot call AssumeRole by using AWS root account credentials; +// access is denied. You must use credentials for an IAM user or an IAM role +// to call AssumeRole. +// +// For cross-account access, imagine that you own multiple accounts and need +// to access resources in each account. You could create long-term credentials +// in each account to access those resources. However, managing all those credentials +// and remembering which one can access which account can be time consuming. +// Instead, you can create one set of long-term credentials in one account and +// then use temporary security credentials to access all the other accounts +// by assuming roles in those accounts. For more information about roles, see +// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) +// in the IAM User Guide. +// +// For federation, you can, for example, grant single sign-on access to the +// AWS Management Console. If you already have an identity and authentication +// system in your corporate network, you don't have to recreate user identities +// in AWS in order to grant those user identities access to AWS. Instead, after +// a user has been authenticated, you call AssumeRole (and specify the role +// with the appropriate permissions) to get temporary security credentials for +// that user. With those temporary security credentials, you construct a sign-in +// URL that users can use to access the console. For more information, see Common +// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) +// in the IAM User Guide. +// +// By default, the temporary security credentials created by AssumeRole last +// for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any AWS service with the following exception: you cannot call +// the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// To assume a role, your AWS account must be trusted by the role. The trust +// relationship is defined in the role's trust policy when the role is created. +// That trust policy states which accounts are allowed to delegate access to +// this account's role. +// +// The user who wants to access the role must also have permissions delegated +// from the role's administrator. If the user is in a different account than +// the role, then the user's administrator must attach a policy that allows +// the user to call AssumeRole on the ARN of the role in the other account. +// If the user is in the same account as the role, then you can either attach +// a policy to the user (identical to the previous different account user), +// or you can add the user as a principal directly in the role's trust policy. +// In this case, the trust policy acts as the only resource-based policy in +// IAM, and users in the same account as the role do not need explicit permission +// to assume the role. For more information about trust policies and resource-based +// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. +// +// Using MFA with AssumeRole +// +// You can optionally include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios in which +// you want to make sure that the user who is assuming the role has been authenticated +// using an AWS MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication; if the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication +// might look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA devices produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + output = &AssumeRoleWithSAMLOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based AWS access +// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to AWS services. +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any AWS service with the following exception: you cannot +// call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by the intersection of both the access policy +// of the role that is being assumed, and the policy that you pass. This means +// that both policies must grant the permission for the action to be allowed. +// This gives you a way to further restrict the permissions for the resulting +// temporary security credentials. You cannot use the passed policy to grant +// permissions that are in excess of those allowed by the access policy of the +// role that is being assumed. For more information, see Permissions for AssumeRole, +// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider, and create +// an IAM role that specifies this SAML provider in its trust policy. +// +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. +// The identity of the caller is validated by using keys in the metadata document +// that is uploaded for the SAML provider entity for your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail +// logs. The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the Persistent +// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// For more information, see the following resources: +// +// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + output = &AssumeRoleWithWebIdentityOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider, such as Amazon +// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible +// identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely +// identify a user and supply the user with a consistent identity throughout +// the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview +// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the AWS SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// credentials. Therefore, you can distribute an application (for example, on +// mobile devices) that requests temporary security credentials without including +// long-term AWS credentials in the application, and without deploying server-based +// proxy services that use long-term AWS credentials. Instead, the identity +// of the caller is validated by using a token from the web identity provider. +// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce +// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to AWS service APIs. +// +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any AWS service with the following exception: +// you cannot call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided Web Identity Token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// This interactive website lets you walk through the process of authenticating +// via Login with Amazon, Facebook, or Google, getting temporary security +// credentials, and then using those credentials to make a request to AWS. +// +// +// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android +// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample +// apps that show how to invoke the identity providers, and then how to use +// the information from these providers to get and use temporary security +// credentials. +// +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the non-AWS identity provider +// (IDP) that was asked to verify the incoming identity token could not be reached. +// This is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the non-AWS identity provider might be down or not responding. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + output = &DecodeAuthorizationMessageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an AWS request. +// +// For example, if a user is not authorized to perform an action that he or +// she has requested, the request returns a Client.UnauthorizedOperation response +// (an HTTP 403 response). Some AWS actions additionally return an encoded message +// that can provide details about this authorization failure. +// +// Only certain AWS actions return an encoded authorization message. The documentation +// for an individual action indicates whether that action returns an encoded +// message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// constitute privileged information that the user who requested the action +// should not see. To decode an authorization status message, a user must be +// granted permissions via an IAM policy to request the DecodeAuthorizationMessage +// (sts:DecodeAuthorizationMessage) action. +// +// The decoded message includes the following type of information: +// +// * Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested action. +// +// * The requested resource. +// +// * The values of condition keys in the context of the user's request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + output = &GetCallerIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCallerIdentity API operation for AWS Security Token Service. +// +// Returns details about the IAM identity whose credentials are used to call +// the API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + output = &GetFederationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetFederationToken API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. Because +// you must call the GetFederationToken action using the long-term security +// credentials of an IAM user, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other APIs that produce temporary +// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// If you are creating a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider, we recommend that you +// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// The GetFederationToken action must be called by using the long-term AWS security +// credentials of an IAM user. You can also call GetFederationToken using the +// security credentials of an AWS root account, but we do not recommended it. +// Instead, we recommend that you create an IAM user for the purpose of the +// proxy application and then attach a policy to the IAM user that limits federated +// users to only the actions and resources that they need access to. For more +// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// The temporary security credentials that are obtained by using the long-term +// credentials of an IAM user are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default +// is 43200 seconds (12 hours). Temporary credentials that are obtained by using +// AWS root account credentials have a maximum duration of 3600 seconds (1 hour). +// +// The temporary security credentials created by GetFederationToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot use these credentials to call any IAM APIs. +// +// * You cannot call any STS APIs except GetCallerIdentity. +// +// Permissions +// +// The permissions for the temporary security credentials returned by GetFederationToken +// are determined by a combination of the following: +// +// * The policy or policies that are attached to the IAM user whose credentials +// are used to call GetFederationToken. +// +// * The policy that is passed as a parameter in the call. +// +// The passed policy is attached to the temporary security credentials that +// result from the GetFederationToken API call--that is, to the federated user. +// When the federated user makes an AWS request, AWS evaluates the policy attached +// to the federated user in combination with the policy or policies attached +// to the IAM user whose credentials were used to call GetFederationToken. AWS +// allows the federated user's request only when both the federated user and +// the IAM user are explicitly allowed to perform the requested action. The +// passed policy cannot grant more permissions than those that are defined in +// the IAM user policy. +// +// A typical use case is that the permissions of the IAM user whose credentials +// are used to call GetFederationToken are designed to allow access to all the +// actions and resources that any federated user will need. Then, for individual +// users, you pass a policy to the operation that scopes down the permissions +// to a level that's appropriate to that individual user, using a policy that +// allows only a subset of permissions that are granted to the IAM user. +// +// If you do not pass a policy, the resulting temporary security credentials +// have no effective permissions. The only exception is when the temporary security +// credentials are used to access a resource that has a resource-based policy +// that specifically allows the federated user to access the resource. +// +// For more information about how permissions work, see Permissions for GetFederationToken +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). +// For information about using GetFederationToken to create temporary security +// credentials, see GetFederationToken—Federation Through a Custom Identity +// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + output = &GetSessionTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSessionToken API operation for AWS Security Token Service. +// +// Returns a set of temporary credentials for an AWS account or IAM user. The +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use GetSessionToken if you want to use MFA to protect +// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled +// IAM users would need to call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that are returned from the call, IAM users can then make programmatic calls +// to APIs that require MFA authentication. If you do not supply a correct MFA +// code, then the API returns an access denied error. For a comparison of GetSessionToken +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The GetSessionToken action must be called by using the long-term AWS security +// credentials of the AWS account or an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify, from 900 seconds +// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default +// of 43200 seconds (12 hours); credentials that are created by using account +// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 +// seconds (1 hour), with a default of 1 hour. +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot call any IAM APIs unless MFA authentication information is +// included in the request. +// +// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity. +// +// We recommend that you do not call GetSessionToken with root account credentials. +// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with AWS. +// +// The permissions associated with the temporary security credentials returned +// by GetSessionToken are based on the permissions associated with account or +// IAM user whose credentials are used to call the action. If GetSessionToken +// is called using root account credentials, the temporary credentials have +// root account permissions. Similarly, if GetSessionToken is called using the +// credentials of an IAM user, the temporary credentials have the same permissions +// as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that is used by third parties when assuming roles in + // their customers' accounts. For each role that the third party can assume, + // they should instruct their customers to ensure the role's trust policy checks + // for the external ID that the third party generated. Each time the third party + // assumes the role, they should pass the customer's external ID. The external + // ID is useful in order to help third parties bind a role to the customer who + // created it. For more information about the external ID, see How to Use an + // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The regex used to validated this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format. + // + // This parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both (the intersection of) the access policy of the role that + // is being assumed, and the policy that you pass. This gives you a way to further + // restrict the permissions for the resulting temporary security credentials. + // You cannot use the passed policy to grant permissions that are in excess + // of those allowed by the access policy of the role that is being assumed. + // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, + // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests using the temporary security credentials will expose the role session + // name to the external account in their CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { + s.DurationSeconds = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { + s.ExternalId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { + s.Policy = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { + s.RoleSessionName = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful AssumeRole request, including temporary +// AWS credentials that can be used to make AWS requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { + s.AssumedRoleUser = v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { + s.PackedPolicySize = &v + return s +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + // + // PrincipalArn is a required field + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base-64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the Using IAM guide. + // + // SAMLAssertion is a required field + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { + s.Policy = &v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { + s.PrincipalArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { + s.RoleArn = &v + return s +} + +// SetSAMLAssertion sets the SAMLAssertion field's value. +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { + s.SAMLAssertion = &v + return s +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the Issuer response value, the + // AWS account ID, and the friendly name (the last part of the ARN) of the SAML + // provider in IAM. The combination of NameQualifier and Subject can be used + // to uniquely identify a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { + s.Credentials = v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { + s.Issuer = &v + return s +} + +// SetNameQualifier sets the NameQualifier field's value. +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { + s.NameQualifier = &v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { + s.PackedPolicySize = &v + return s +} + +// SetSubject sets the Subject field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { + s.Subject = &v + return s +} + +// SetSubjectType sets the SubjectType field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { + s.SubjectType = &v + return s +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + // + // WebIdentityToken is a required field + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { + s.Policy = &v + return s +} + +// SetProviderId sets the ProviderId field's value. +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { + s.ProviderId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { + s.RoleSessionName = &v + return s +} + +// SetWebIdentityToken sets the WebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { + s.WebIdentityToken = &v + return s +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID Tokens this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { + s.PackedPolicySize = &v + return s +} + +// SetProvider sets the Provider field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { + s.Provider = &v + return s +} + +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { + s.SubjectFromWebIdentityToken = &v + return s +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by AWS when the + // role is created. + // + // AssumedRoleId is a required field + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { + s.Arn = &v + return s +} + +// SetAssumedRoleId sets the AssumedRoleId field's value. +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { + s.AssumedRoleId = &v + return s +} + +// AWS credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" required:"true"` + + // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *Credentials) SetSecretAccessKey(v string) *Credentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + // + // EncodedMessage is a required field + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncodedMessage sets the EncodedMessage field's value. +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { + s.EncodedMessage = &v + return s +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an AWS +// request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // An XML document that contains the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// SetDecodedMessage sets the DecodedMessage field's value. +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { + s.DecodedMessage = &v + return s +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *FederatedUser) SetArn(v string) *FederatedUser { + s.Arn = &v + return s +} + +// SetFederatedUserId sets the FederatedUserId field's value. +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { + s.FederatedUserId = &v + return s +} + +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID number of the account that owns or contains the calling + // entity. + Account *string `type:"string"` + + // The AWS ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity making the call. The values returned are those listed in the + // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { + s.Account = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { + s.Arn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { + s.UserId = &v + return s +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds + // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained + // using AWS account (root) credentials are restricted to a maximum of 3600 + // seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using AWS account (root) credentials defaults to one + // hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // Name is a required field + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that is passed with the GetFederationToken call + // and evaluated along with the policy or policies that are attached to the + // IAM user whose credentials are used to call GetFederationToken. The passed + // policy is used to scope down the permissions that are available to the IAM + // user, by allowing only a subset of the permissions that are granted to the + // IAM user. The passed policy cannot grant more permissions than those granted + // to the IAM user. The final permissions for the federated user are the most + // restrictive set based on the intersection of the passed policy and the IAM + // user policy. + // + // If you do not pass a policy, the resulting temporary security credentials + // have no effective permissions. The only exception is when the temporary security + // credentials are used to access a resource that has a resource-based policy + // that specifically allows the federated user to access the resource. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + // + // For more information about how permissions work, see Permissions for GetFederationToken + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). + Policy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { + s.DurationSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { + s.Policy = &v + return s +} + +// Contains the response to a successful GetFederationToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value indicating the size of the policy in packed form. The + // service rejects policies for which the packed size is greater than 100 percent + // of the allowed value. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { + s.Credentials = v + return s +} + +// SetFederatedUser sets the FederatedUser field's value. +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { + s.FederatedUser = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { + s.PackedPolicySize = &v + return s +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 + // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). + // If the duration is longer than one hour, the session for AWS account owners + // defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the AWS Management Console + // and viewing the user's security credentials. + // + // The regex used to validated this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, and the user does not provide a code when requesting a set of + // temporary security credentials, the user will receive an "access denied" + // response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { + s.DurationSeconds = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful GetSessionToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { + s.Credentials = v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 000000000..ef681ab0c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,72 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient +// way to create programmatic access to STS. For example, the SDKs take care +// of cryptographically signing requests, managing errors, and retrying requests +// automatically. For information about the AWS SDKs, including how to download +// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). +// +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about the Query API, +// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in Using IAM. For information about using security tokens with other AWS +// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the IAM User Guide. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/). +// +// Endpoints +// +// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com +// that maps to the US East (N. Virginia) region. Additional regions are available +// and are activated by default. For more information, see Activating and Deactivating +// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) +// in the AWS General Reference. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. To learn more about CloudTrail, including how to turn it on and find +// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// Using the Client +// +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 000000000..e24884ef3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,73 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the non-AWS identity provider + // (IDP) that was asked to verify the incoming identity token could not be reached. + // This is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the non-AWS identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by AWS. Get + // a new identity token from the identity provider and then retry the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the policy document was too large. The error + // message describes how big the policy document is, in packed form, as a percentage + // of what the API allows. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 000000000..185c914d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,95 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. +// +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2011-06-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/gorilla/context/.travis.yml b/vendor/github.com/gorilla/context/.travis.yml deleted file mode 100644 index d87d46576..000000000 --- a/vendor/github.com/gorilla/context/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - tip diff --git a/vendor/github.com/gorilla/context/LICENSE b/vendor/github.com/gorilla/context/LICENSE deleted file mode 100644 index 0e5fb8728..000000000 --- a/vendor/github.com/gorilla/context/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md deleted file mode 100644 index c60a31b05..000000000 --- a/vendor/github.com/gorilla/context/README.md +++ /dev/null @@ -1,7 +0,0 @@ -context -======= -[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) - -gorilla/context is a general purpose registry for global request variables. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/context/context.go b/vendor/github.com/gorilla/context/context.go deleted file mode 100644 index 81cb128b1..000000000 --- a/vendor/github.com/gorilla/context/context.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "sync" - "time" -) - -var ( - mutex sync.RWMutex - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) -) - -// Set stores a value for a given key in a given request. -func Set(r *http.Request, key, val interface{}) { - mutex.Lock() - if data[r] == nil { - data[r] = make(map[interface{}]interface{}) - datat[r] = time.Now().Unix() - } - data[r][key] = val - mutex.Unlock() -} - -// Get returns a value stored for a given key in a given request. -func Get(r *http.Request, key interface{}) interface{} { - mutex.RLock() - if ctx := data[r]; ctx != nil { - value := ctx[key] - mutex.RUnlock() - return value - } - mutex.RUnlock() - return nil -} - -// GetOk returns stored value and presence state like multi-value return of map access. -func GetOk(r *http.Request, key interface{}) (interface{}, bool) { - mutex.RLock() - if _, ok := data[r]; ok { - value, ok := data[r][key] - mutex.RUnlock() - return value, ok - } - mutex.RUnlock() - return nil, false -} - -// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. -func GetAll(r *http.Request) map[interface{}]interface{} { - mutex.RLock() - if context, ok := data[r]; ok { - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result - } - mutex.RUnlock() - return nil -} - -// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if -// the request was registered. -func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { - mutex.RLock() - context, ok := data[r] - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result, ok -} - -// Delete removes a value stored for a given key in a given request. -func Delete(r *http.Request, key interface{}) { - mutex.Lock() - if data[r] != nil { - delete(data[r], key) - } - mutex.Unlock() -} - -// Clear removes all values stored for a given request. -// -// This is usually called by a handler wrapper to clean up request -// variables at the end of a request lifetime. See ClearHandler(). -func Clear(r *http.Request) { - mutex.Lock() - clear(r) - mutex.Unlock() -} - -// clear is Clear without the lock. -func clear(r *http.Request) { - delete(data, r) - delete(datat, r) -} - -// Purge removes request data stored for longer than maxAge, in seconds. -// It returns the amount of requests removed. -// -// If maxAge <= 0, all request data is removed. -// -// This is only used for sanity check: in case context cleaning was not -// properly set some request data can be kept forever, consuming an increasing -// amount of memory. In case this is detected, Purge() must be called -// periodically until the problem is fixed. -func Purge(maxAge int) int { - mutex.Lock() - count := 0 - if maxAge <= 0 { - count = len(data) - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) - } else { - min := time.Now().Unix() - int64(maxAge) - for r := range data { - if datat[r] < min { - clear(r) - count++ - } - } - } - mutex.Unlock() - return count -} - -// ClearHandler wraps an http.Handler and clears request values at the end -// of a request lifetime. -func ClearHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer Clear(r) - h.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/gorilla/context/context_test.go b/vendor/github.com/gorilla/context/context_test.go deleted file mode 100644 index 6ada8ec31..000000000 --- a/vendor/github.com/gorilla/context/context_test.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "testing" -) - -type keyType int - -const ( - key1 keyType = iota - key2 -) - -func TestContext(t *testing.T) { - assertEqual := func(val interface{}, exp interface{}) { - if val != exp { - t.Errorf("Expected %v, got %v.", exp, val) - } - } - - r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - - // Get() - assertEqual(Get(r, key1), nil) - - // Set() - Set(r, key1, "1") - assertEqual(Get(r, key1), "1") - assertEqual(len(data[r]), 1) - - Set(r, key2, "2") - assertEqual(Get(r, key2), "2") - assertEqual(len(data[r]), 2) - - //GetOk - value, ok := GetOk(r, key1) - assertEqual(value, "1") - assertEqual(ok, true) - - value, ok = GetOk(r, "not exists") - assertEqual(value, nil) - assertEqual(ok, false) - - Set(r, "nil value", nil) - value, ok = GetOk(r, "nil value") - assertEqual(value, nil) - assertEqual(ok, true) - - // GetAll() - values := GetAll(r) - assertEqual(len(values), 3) - - // GetAll() for empty request - values = GetAll(emptyR) - if values != nil { - t.Error("GetAll didn't return nil value for invalid request") - } - - // GetAllOk() - values, ok = GetAllOk(r) - assertEqual(len(values), 3) - assertEqual(ok, true) - - // GetAllOk() for empty request - values, ok = GetAllOk(emptyR) - assertEqual(value, nil) - assertEqual(ok, false) - - // Delete() - Delete(r, key1) - assertEqual(Get(r, key1), nil) - assertEqual(len(data[r]), 2) - - Delete(r, key2) - assertEqual(Get(r, key2), nil) - assertEqual(len(data[r]), 1) - - // Clear() - Clear(r) - assertEqual(len(data), 0) -} - -func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { - <-wait - for i := 0; i < iterations; i++ { - Get(r, key) - } - done <- struct{}{} - -} - -func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { - <-wait - for i := 0; i < iterations; i++ { - Get(r, key) - } - done <- struct{}{} - -} - -func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { - - b.StopTimer() - r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - done := make(chan struct{}) - b.StartTimer() - - for i := 0; i < b.N; i++ { - wait := make(chan struct{}) - - for i := 0; i < numReaders; i++ { - go parallelReader(r, "test", iterations, wait, done) - } - - for i := 0; i < numWriters; i++ { - go parallelWriter(r, "test", "123", iterations, wait, done) - } - - close(wait) - - for i := 0; i < numReaders+numWriters; i++ { - <-done - } - - } - -} - -func BenchmarkMutexSameReadWrite1(b *testing.B) { - benchmarkMutex(b, 1, 1, 32) -} -func BenchmarkMutexSameReadWrite2(b *testing.B) { - benchmarkMutex(b, 2, 2, 32) -} -func BenchmarkMutexSameReadWrite4(b *testing.B) { - benchmarkMutex(b, 4, 4, 32) -} -func BenchmarkMutex1(b *testing.B) { - benchmarkMutex(b, 2, 8, 32) -} -func BenchmarkMutex2(b *testing.B) { - benchmarkMutex(b, 16, 4, 64) -} -func BenchmarkMutex3(b *testing.B) { - benchmarkMutex(b, 1, 2, 128) -} -func BenchmarkMutex4(b *testing.B) { - benchmarkMutex(b, 128, 32, 256) -} -func BenchmarkMutex5(b *testing.B) { - benchmarkMutex(b, 1024, 2048, 64) -} -func BenchmarkMutex6(b *testing.B) { - benchmarkMutex(b, 2048, 1024, 512) -} diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go deleted file mode 100644 index 73c740031..000000000 --- a/vendor/github.com/gorilla/context/doc.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package context stores values shared during a request lifetime. - -For example, a router can set variables extracted from the URL and later -application handlers can access those values, or it can be used to store -sessions values to be saved at the end of a request. There are several -others common uses. - -The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: - - http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 - -Here's the basic usage: first define the keys that you will need. The key -type is interface{} so a key can be of any type that supports equality. -Here we define a key using a custom int type to avoid name collisions: - - package foo - - import ( - "github.com/gorilla/context" - ) - - type key int - - const MyKey key = 0 - -Then set a variable. Variables are bound to an http.Request object, so you -need a request instance to set a value: - - context.Set(r, MyKey, "bar") - -The application can later access the variable using the same key you provided: - - func MyHandler(w http.ResponseWriter, r *http.Request) { - // val is "bar". - val := context.Get(r, foo.MyKey) - - // returns ("bar", true) - val, ok := context.GetOk(r, foo.MyKey) - // ... - } - -And that's all about the basic usage. We discuss some other ideas below. - -Any type can be stored in the context. To enforce a given type, make the key -private and wrap Get() and Set() to accept and return values of a specific -type: - - type key int - - const mykey key = 0 - - // GetMyKey returns a value for this package from the request values. - func GetMyKey(r *http.Request) SomeType { - if rv := context.Get(r, mykey); rv != nil { - return rv.(SomeType) - } - return nil - } - - // SetMyKey sets a value for this package in the request values. - func SetMyKey(r *http.Request, val SomeType) { - context.Set(r, mykey, val) - } - -Variables must be cleared at the end of a request, to remove all values -that were stored. This can be done in an http.Handler, after a request was -served. Just call Clear() passing the request: - - context.Clear(r) - -...or use ClearHandler(), which conveniently wraps an http.Handler to clear -variables at the end of a request lifetime. - -The Routers from the packages gorilla/mux and gorilla/pat call Clear() -so if you are using either of them you don't need to clear the context manually. -*/ -package context diff --git a/vendor/github.com/gorilla/mux/.travis.yml b/vendor/github.com/gorilla/mux/.travis.yml index d87d46576..0e58a7297 100644 --- a/vendor/github.com/gorilla/mux/.travis.yml +++ b/vendor/github.com/gorilla/mux/.travis.yml @@ -1,7 +1,24 @@ language: go +sudo: false -go: - - 1.0 - - 1.1 - - 1.2 - - tip +matrix: + include: + - go: 1.7.x + - go: 1.8.x + - go: 1.9.x + - go: 1.10.x + - go: 1.11.x + - go: 1.x + env: LATEST=true + - go: tip + allow_failures: + - go: tip + +install: + - # Skip + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - if [[ "$LATEST" = true ]]; then go tool vet .; fi + - go test -v -race ./... diff --git a/vendor/github.com/gorilla/mux/AUTHORS b/vendor/github.com/gorilla/mux/AUTHORS new file mode 100644 index 000000000..b722392ee --- /dev/null +++ b/vendor/github.com/gorilla/mux/AUTHORS @@ -0,0 +1,8 @@ +# This is the official list of gorilla/mux authors for copyright purposes. +# +# Please keep the list sorted. + +Google LLC (https://opensource.google.com/) +Kamil Kisielk +Matt Silverlock +Rodrigo Moraes (https://github.com/moraes) diff --git a/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md b/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..232be82e4 --- /dev/null +++ b/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md @@ -0,0 +1,11 @@ +**What version of Go are you running?** (Paste the output of `go version`) + + +**What version of gorilla/mux are you at?** (Paste the output of `git rev-parse HEAD` inside `$GOPATH/src/github.com/gorilla/mux`) + + +**Describe your problem** (and what you have tried so far) + + +**Paste a minimal, runnable, reproduction of your issue below** (use backticks to format it) + diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE index 0e5fb8728..6903df638 100644 --- a/vendor/github.com/gorilla/mux/LICENSE +++ b/vendor/github.com/gorilla/mux/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. +Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index e60301b03..c661599ab 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -1,7 +1,649 @@ -mux -=== -[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) +# gorilla/mux -gorilla/mux is a powerful URL router and dispatcher. +[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) +[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux) +[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) -Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux +![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png) + +https://www.gorillatoolkit.org/pkg/mux + +Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to +their respective handler. + +The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: + +* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. +* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. +* URL hosts, paths and query values can have variables with an optional regular expression. +* Registered URLs can be built, or "reversed", which helps maintaining references to resources. +* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. + +--- + +* [Install](#install) +* [Examples](#examples) +* [Matching Routes](#matching-routes) +* [Static Files](#static-files) +* [Registered URLs](#registered-urls) +* [Walking Routes](#walking-routes) +* [Graceful Shutdown](#graceful-shutdown) +* [Middleware](#middleware) +* [Testing Handlers](#testing-handlers) +* [Full Example](#full-example) + +--- + +## Install + +With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain: + +```sh +go get -u github.com/gorilla/mux +``` + +## Examples + +Let's start registering a couple of URL paths and handlers: + +```go +func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) +} +``` + +Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. + +Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/products/{key}", ProductHandler) +r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: + +```go +func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "Category: %v\n", vars["category"]) +} +``` + +And this is all you need to know about the basic usage. More advanced options are explained below. + +### Matching Routes + +Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: + +```go +r := mux.NewRouter() +// Only matches if domain is "www.example.com". +r.Host("www.example.com") +// Matches a dynamic subdomain. +r.Host("{subdomain:[a-z]+}.example.com") +``` + +There are several other matchers that can be added. To match path prefixes: + +```go +r.PathPrefix("/products/") +``` + +...or HTTP methods: + +```go +r.Methods("GET", "POST") +``` + +...or URL schemes: + +```go +r.Schemes("https") +``` + +...or header values: + +```go +r.Headers("X-Requested-With", "XMLHttpRequest") +``` + +...or query values: + +```go +r.Queries("key", "value") +``` + +...or to use a custom matcher function: + +```go +r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 +}) +``` + +...and finally, it is possible to combine several matchers in a single route: + +```go +r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") +``` + +Routes are tested in the order they were added to the router. If two routes match, the first one wins: + +```go +r := mux.NewRouter() +r.HandleFunc("/specific", specificHandler) +r.PathPrefix("/").Handler(catchAllHandler) +``` + +Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". + +For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: + +```go +r := mux.NewRouter() +s := r.Host("www.example.com").Subrouter() +``` + +Then register routes in the subrouter: + +```go +s.HandleFunc("/products/", ProductsHandler) +s.HandleFunc("/products/{key}", ProductHandler) +s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: + +```go +r := mux.NewRouter() +s := r.PathPrefix("/products").Subrouter() +// "/products/" +s.HandleFunc("/", ProductsHandler) +// "/products/{key}/" +s.HandleFunc("/{key}/", ProductHandler) +// "/products/{key}/details" +s.HandleFunc("/{key}/details", ProductDetailsHandler) +``` + + +### Static Files + +Note that the path provided to `PathPrefix()` represents a "wildcard": calling +`PathPrefix("/static/").Handler(...)` means that the handler will be passed any +request that matches "/static/\*". This makes it easy to serve static files with mux: + +```go +func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) +} +``` + +### Registered URLs + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") +``` + +To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: + +```go +url, err := r.Get("article").URL("category", "technology", "id", "42") +``` + +...and the result will be a `url.URL` with the following path: + +``` +"/articles/technology/42" +``` + +This also works for host and query value variables: + +```go +r := mux.NewRouter() +r.Host("{subdomain}.example.com"). + Path("/articles/{category}/{id:[0-9]+}"). + Queries("filter", "{filter}"). + HandlerFunc(ArticleHandler). + Name("article") + +// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42", + "filter", "gorilla") +``` + +All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + +```go +r.HeadersRegexp("Content-Type", "application/(text|json)") +``` + +...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` + +There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: + +```go +// "http://news.example.com/" +host, err := r.Get("article").URLHost("subdomain", "news") + +// "/articles/technology/42" +path, err := r.Get("article").URLPath("category", "technology", "id", "42") +``` + +And if you use subrouters, host and path defined separately can be built as well: + +```go +r := mux.NewRouter() +s := r.Host("{subdomain}.example.com").Subrouter() +s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// "http://news.example.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` + +### Walking Routes + +The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example, +the following prints all of the registered routes: + +```go +package main + +import ( + "fmt" + "net/http" + "strings" + + "github.com/gorilla/mux" +) + +func handler(w http.ResponseWriter, r *http.Request) { + return +} + +func main() { + r := mux.NewRouter() + r.HandleFunc("/", handler) + r.HandleFunc("/products", handler).Methods("POST") + r.HandleFunc("/articles", handler).Methods("GET") + r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT") + r.HandleFunc("/authors", handler).Queries("surname", "{surname}") + err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + pathTemplate, err := route.GetPathTemplate() + if err == nil { + fmt.Println("ROUTE:", pathTemplate) + } + pathRegexp, err := route.GetPathRegexp() + if err == nil { + fmt.Println("Path regexp:", pathRegexp) + } + queriesTemplates, err := route.GetQueriesTemplates() + if err == nil { + fmt.Println("Queries templates:", strings.Join(queriesTemplates, ",")) + } + queriesRegexps, err := route.GetQueriesRegexp() + if err == nil { + fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ",")) + } + methods, err := route.GetMethods() + if err == nil { + fmt.Println("Methods:", strings.Join(methods, ",")) + } + fmt.Println() + return nil + }) + + if err != nil { + fmt.Println(err) + } + + http.Handle("/", r) +} +``` + +### Graceful Shutdown + +Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`: + +```go +package main + +import ( + "context" + "flag" + "log" + "net/http" + "os" + "os/signal" + "time" + + "github.com/gorilla/mux" +) + +func main() { + var wait time.Duration + flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m") + flag.Parse() + + r := mux.NewRouter() + // Add your routes as needed + + srv := &http.Server{ + Addr: "0.0.0.0:8080", + // Good practice to set timeouts to avoid Slowloris attacks. + WriteTimeout: time.Second * 15, + ReadTimeout: time.Second * 15, + IdleTimeout: time.Second * 60, + Handler: r, // Pass our instance of gorilla/mux in. + } + + // Run our server in a goroutine so that it doesn't block. + go func() { + if err := srv.ListenAndServe(); err != nil { + log.Println(err) + } + }() + + c := make(chan os.Signal, 1) + // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C) + // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught. + signal.Notify(c, os.Interrupt) + + // Block until we receive our signal. + <-c + + // Create a deadline to wait for. + ctx, cancel := context.WithTimeout(context.Background(), wait) + defer cancel() + // Doesn't block if no connections, but will otherwise wait + // until the timeout deadline. + srv.Shutdown(ctx) + // Optionally, you could run srv.Shutdown in a goroutine and block on + // <-ctx.Done() if your application should wait for other services + // to finalize based on context cancellation. + log.Println("shutting down") + os.Exit(0) +} +``` + +### Middleware + +Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters. +Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking. + +Mux middlewares are defined using the de facto standard type: + +```go +type MiddlewareFunc func(http.Handler) http.Handler +``` + +Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers. + +A very basic middleware which logs the URI of the request being handled could be written as: + +```go +func loggingMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Do stuff here + log.Println(r.RequestURI) + // Call the next handler, which can be another middleware in the chain, or the final handler. + next.ServeHTTP(w, r) + }) +} +``` + +Middlewares can be added to a router using `Router.Use()`: + +```go +r := mux.NewRouter() +r.HandleFunc("/", handler) +r.Use(loggingMiddleware) +``` + +A more complex authentication middleware, which maps session token to users, could be written as: + +```go +// Define our struct +type authenticationMiddleware struct { + tokenUsers map[string]string +} + +// Initialize it somewhere +func (amw *authenticationMiddleware) Populate() { + amw.tokenUsers["00000000"] = "user0" + amw.tokenUsers["aaaaaaaa"] = "userA" + amw.tokenUsers["05f717e5"] = "randomUser" + amw.tokenUsers["deadbeef"] = "user0" +} + +// Middleware function, which will be called for each request +func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + token := r.Header.Get("X-Session-Token") + + if user, found := amw.tokenUsers[token]; found { + // We found the token in our map + log.Printf("Authenticated user %s\n", user) + // Pass down the request to the next middleware (or final handler) + next.ServeHTTP(w, r) + } else { + // Write an error and stop the handler chain + http.Error(w, "Forbidden", http.StatusForbidden) + } + }) +} +``` + +```go +r := mux.NewRouter() +r.HandleFunc("/", handler) + +amw := authenticationMiddleware{} +amw.Populate() + +r.Use(amw.Middleware) +``` + +Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it. + +### Testing Handlers + +Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_. + +First, our simple HTTP handler: + +```go +// endpoints.go +package main + +func HealthCheckHandler(w http.ResponseWriter, r *http.Request) { + // A very simple health check. + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + // In the future we could report back on the status of our DB, or our cache + // (e.g. Redis) by performing a simple PING, and include them in the response. + io.WriteString(w, `{"alive": true}`) +} + +func main() { + r := mux.NewRouter() + r.HandleFunc("/health", HealthCheckHandler) + + log.Fatal(http.ListenAndServe("localhost:8080", r)) +} +``` + +Our test code: + +```go +// endpoints_test.go +package main + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +func TestHealthCheckHandler(t *testing.T) { + // Create a request to pass to our handler. We don't have any query parameters for now, so we'll + // pass 'nil' as the third parameter. + req, err := http.NewRequest("GET", "/health", nil) + if err != nil { + t.Fatal(err) + } + + // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response. + rr := httptest.NewRecorder() + handler := http.HandlerFunc(HealthCheckHandler) + + // Our handlers satisfy http.Handler, so we can call their ServeHTTP method + // directly and pass in our Request and ResponseRecorder. + handler.ServeHTTP(rr, req) + + // Check the status code is what we expect. + if status := rr.Code; status != http.StatusOK { + t.Errorf("handler returned wrong status code: got %v want %v", + status, http.StatusOK) + } + + // Check the response body is what we expect. + expected := `{"alive": true}` + if rr.Body.String() != expected { + t.Errorf("handler returned unexpected body: got %v want %v", + rr.Body.String(), expected) + } +} +``` + +In the case that our routes have [variables](#examples), we can pass those in the request. We could write +[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple +possible route variables as needed. + +```go +// endpoints.go +func main() { + r := mux.NewRouter() + // A route with a route variable: + r.HandleFunc("/metrics/{type}", MetricsHandler) + + log.Fatal(http.ListenAndServe("localhost:8080", r)) +} +``` + +Our test file, with a table-driven test of `routeVariables`: + +```go +// endpoints_test.go +func TestMetricsHandler(t *testing.T) { + tt := []struct{ + routeVariable string + shouldPass bool + }{ + {"goroutines", true}, + {"heap", true}, + {"counters", true}, + {"queries", true}, + {"adhadaeqm3k", false}, + } + + for _, tc := range tt { + path := fmt.Sprintf("/metrics/%s", tc.routeVariable) + req, err := http.NewRequest("GET", path, nil) + if err != nil { + t.Fatal(err) + } + + rr := httptest.NewRecorder() + + // Need to create a router that we can pass the request through so that the vars will be added to the context + router := mux.NewRouter() + router.HandleFunc("/metrics/{type}", MetricsHandler) + router.ServeHTTP(rr, req) + + // In this case, our MetricsHandler returns a non-200 response + // for a route variable it doesn't know about. + if rr.Code == http.StatusOK && !tc.shouldPass { + t.Errorf("handler should have failed on routeVariable %s: got %v want %v", + tc.routeVariable, rr.Code, http.StatusOK) + } + } +} +``` + +## Full Example + +Here's a complete, runnable example of a small `mux` based server: + +```go +package main + +import ( + "net/http" + "log" + "github.com/gorilla/mux" +) + +func YourHandler(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Gorilla!\n")) +} + +func main() { + r := mux.NewRouter() + // Routes consist of a path and a handler function. + r.HandleFunc("/", YourHandler) + + // Bind to a port and pass our router in + log.Fatal(http.ListenAndServe(":8000", r)) +} +``` + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/mux/bench_test.go b/vendor/github.com/gorilla/mux/bench_test.go deleted file mode 100644 index c5f97b2b2..000000000 --- a/vendor/github.com/gorilla/mux/bench_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "net/http" - "testing" -) - -func BenchmarkMux(b *testing.B) { - router := new(Router) - handler := func(w http.ResponseWriter, r *http.Request) {} - router.HandleFunc("/v1/{v1}", handler) - - request, _ := http.NewRequest("GET", "/v1/anything", nil) - for i := 0; i < b.N; i++ { - router.ServeHTTP(nil, request) - } -} diff --git a/vendor/github.com/gorilla/mux/context.go b/vendor/github.com/gorilla/mux/context.go new file mode 100644 index 000000000..665940a26 --- /dev/null +++ b/vendor/github.com/gorilla/mux/context.go @@ -0,0 +1,18 @@ +package mux + +import ( + "context" + "net/http" +) + +func contextGet(r *http.Request, key interface{}) interface{} { + return r.Context().Value(key) +} + +func contextSet(r *http.Request, key, val interface{}) *http.Request { + if val == nil { + return r + } + + return r.WithContext(context.WithValue(r.Context(), key, val)) +} diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go index b2deed34c..38957deea 100644 --- a/vendor/github.com/gorilla/mux/doc.go +++ b/vendor/github.com/gorilla/mux/doc.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. /* -Package gorilla/mux implements a request router and dispatcher. +Package mux implements a request router and dispatcher. The name mux stands for "HTTP request multiplexer". Like the standard http.ServeMux, mux.Router matches incoming requests against a list of @@ -12,8 +12,8 @@ or other conditions. The main features are: * Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. - * URL hosts and paths can have variables with an optional regular - expression. + * URL hosts, paths and query values can have variables with an optional + regular expression. * Registered URLs can be built, or "reversed", which helps maintaining references to resources. * Routes can be used as subrouters: nested routes are only tested if the @@ -47,12 +47,21 @@ variable will be anything until the next slash. For example: r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +Groups can be used inside patterns, as long as they are non-capturing (?:re). For example: + + r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler) + The names are used to create a map of route variables which can be retrieved calling mux.Vars(): vars := mux.Vars(request) category := vars["category"] +Note that if any capturing groups are present, mux will panic() during parsing. To prevent +this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to +"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably +when capturing groups were present. + And this is all you need to know about the basic usage. More advanced options are explained below. @@ -60,8 +69,8 @@ Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: r := mux.NewRouter() - // Only matches if domain is "www.domain.com". - r.Host("www.domain.com") + // Only matches if domain is "www.example.com". + r.Host("www.example.com") // Matches a dynamic subdomain. r.Host("{subdomain:[a-z]+}.domain.com") @@ -89,12 +98,12 @@ There are several other matchers that can be added. To match path prefixes: r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { return r.ProtoMajor == 0 - }) + }) ...and finally, it is possible to combine several matchers in a single route: r.HandleFunc("/products", ProductsHandler). - Host("www.domain.com"). + Host("www.example.com"). Methods("GET"). Schemes("http") @@ -103,11 +112,11 @@ a way to group several routes that share the same requirements. We call it "subrouting". For example, let's say we have several URLs that should only match when the -host is "www.domain.com". Create a route for that host and get a "subrouter" +host is "www.example.com". Create a route for that host and get a "subrouter" from it: r := mux.NewRouter() - s := r.Host("www.domain.com").Subrouter() + s := r.Host("www.example.com").Subrouter() Then register routes in the subrouter: @@ -116,7 +125,7 @@ Then register routes in the subrouter: s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) The three URL paths we registered above will only be tested if the domain is -"www.domain.com", because the subrouter is tested first. This is not +"www.example.com", because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. @@ -136,6 +145,31 @@ the inner routes use it as base for their paths: // "/products/{key}/details" s.HandleFunc("/{key}/details", ProductDetailsHandler) +Note that the path provided to PathPrefix() represents a "wildcard": calling +PathPrefix("/static/").Handler(...) means that the handler will be passed any +request that matches "/static/*". This makes it easy to serve static files with mux: + + func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) + } + Now let's see how to build registered URLs. Routes can be named. All routes that define a name can have their URLs built, @@ -154,24 +188,33 @@ key/value pairs for the route variables. For the previous route, we would do: "/articles/technology/42" -This also works for host variables: +This also works for host and query value variables: r := mux.NewRouter() r.Host("{subdomain}.domain.com"). Path("/articles/{category}/{id:[0-9]+}"). + Queries("filter", "{filter}"). HandlerFunc(ArticleHandler). Name("article") - // url.String() will be "http://news.domain.com/articles/technology/42" + // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") + "category", "technology", + "id", "42", + "filter", "gorilla") All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. +Regex support also exists for matching Headers within a route. For example, we could do: + + r.HeadersRegexp("Content-Type", "application/(text|json)") + +...and the route will match both requests with a Content-Type of `application/json` as well as +`application/text` + There's also a way to build only the URL host or path for a route: use the methods URLHost() or URLPath() instead. For the previous route, we would do: @@ -193,7 +236,71 @@ as well: // "http://news.domain.com/articles/technology/42" url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") + "category", "technology", + "id", "42") + +Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking. + + type MiddlewareFunc func(http.Handler) http.Handler + +Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created). + +A very basic middleware which logs the URI of the request being handled could be written as: + + func simpleMw(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Do stuff here + log.Println(r.RequestURI) + // Call the next handler, which can be another middleware in the chain, or the final handler. + next.ServeHTTP(w, r) + }) + } + +Middlewares can be added to a router using `Router.Use()`: + + r := mux.NewRouter() + r.HandleFunc("/", handler) + r.Use(simpleMw) + +A more complex authentication middleware, which maps session token to users, could be written as: + + // Define our struct + type authenticationMiddleware struct { + tokenUsers map[string]string + } + + // Initialize it somewhere + func (amw *authenticationMiddleware) Populate() { + amw.tokenUsers["00000000"] = "user0" + amw.tokenUsers["aaaaaaaa"] = "userA" + amw.tokenUsers["05f717e5"] = "randomUser" + amw.tokenUsers["deadbeef"] = "user0" + } + + // Middleware function, which will be called for each request + func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + token := r.Header.Get("X-Session-Token") + + if user, found := amw.tokenUsers[token]; found { + // We found the token in our map + log.Printf("Authenticated user %s\n", user) + next.ServeHTTP(w, r) + } else { + http.Error(w, "Forbidden", http.StatusForbidden) + } + }) + } + + r := mux.NewRouter() + r.HandleFunc("/", handler) + + amw := authenticationMiddleware{} + amw.Populate() + + r.Use(amw.Middleware) + +Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. + */ package mux diff --git a/vendor/github.com/gorilla/mux/go.mod b/vendor/github.com/gorilla/mux/go.mod new file mode 100644 index 000000000..cfc8ede58 --- /dev/null +++ b/vendor/github.com/gorilla/mux/go.mod @@ -0,0 +1 @@ +module github.com/gorilla/mux diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go new file mode 100644 index 000000000..ceb812cee --- /dev/null +++ b/vendor/github.com/gorilla/mux/middleware.go @@ -0,0 +1,72 @@ +package mux + +import ( + "net/http" + "strings" +) + +// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler. +// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed +// to it, and then calls the handler passed as parameter to the MiddlewareFunc. +type MiddlewareFunc func(http.Handler) http.Handler + +// middleware interface is anything which implements a MiddlewareFunc named Middleware. +type middleware interface { + Middleware(handler http.Handler) http.Handler +} + +// Middleware allows MiddlewareFunc to implement the middleware interface. +func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler { + return mw(handler) +} + +// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. +func (r *Router) Use(mwf ...MiddlewareFunc) { + for _, fn := range mwf { + r.middlewares = append(r.middlewares, fn) + } +} + +// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. +func (r *Router) useInterface(mw middleware) { + r.middlewares = append(r.middlewares, mw) +} + +// CORSMethodMiddleware sets the Access-Control-Allow-Methods response header +// on a request, by matching routes based only on paths. It also handles +// OPTIONS requests, by settings Access-Control-Allow-Methods, and then +// returning without calling the next http handler. +func CORSMethodMiddleware(r *Router) MiddlewareFunc { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + var allMethods []string + + err := r.Walk(func(route *Route, _ *Router, _ []*Route) error { + for _, m := range route.matchers { + if _, ok := m.(*routeRegexp); ok { + if m.Match(req, &RouteMatch{}) { + methods, err := route.GetMethods() + if err != nil { + return err + } + + allMethods = append(allMethods, methods...) + } + break + } + } + return nil + }) + + if err == nil { + w.Header().Set("Access-Control-Allow-Methods", strings.Join(append(allMethods, "OPTIONS"), ",")) + + if req.Method == "OPTIONS" { + return + } + } + + next.ServeHTTP(w, req) + }) + } +} diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go index 5b5f8e7db..8aca972d2 100644 --- a/vendor/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -5,16 +5,24 @@ package mux import ( + "errors" "fmt" "net/http" "path" + "regexp" +) - "github.com/gorilla/context" +var ( + // ErrMethodMismatch is returned when the method in the request does not match + // the method defined against the route. + ErrMethodMismatch = errors.New("method is not allowed") + // ErrNotFound is returned when no route match is found. + ErrNotFound = errors.New("no matching route was found") ) // NewRouter returns a new router instance. func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} + return &Router{namedRoutes: make(map[string]*Route)} } // Router registers routes to be matched and dispatches a handler. @@ -38,25 +46,125 @@ func NewRouter() *Router { type Router struct { // Configurable Handler to be used when no route matches. NotFoundHandler http.Handler - // Parent route, if this is a subrouter. - parent parentRoute + + // Configurable Handler to be used when the request method does not match the route. + MethodNotAllowedHandler http.Handler + // Routes to be matched, in order. routes []*Route + // Routes by name for URL building. namedRoutes map[string]*Route - // See Router.StrictSlash(). This defines the flag for new routes. - strictSlash bool - // If true, do not clear the request context after handling the request + + // If true, do not clear the request context after handling the request. + // + // Deprecated: No effect when go1.7+ is used, since the context is stored + // on the request itself. KeepContext bool + + // Slice of middlewares to be called after a match is found + middlewares []middleware + + // configuration shared with `Route` + routeConf +} + +// common route configuration shared between `Router` and `Route` +type routeConf struct { + // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" + useEncodedPath bool + + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + + // If true, when the path pattern is "/path//to", accessing "/path//to" + // will not redirect + skipClean bool + + // Manager for the variables from host and path. + regexp routeRegexpGroup + + // List of matchers. + matchers []matcher + + // The scheme used when building URLs. + buildScheme string + + buildVarsFunc BuildVarsFunc +} + +// returns an effective deep copy of `routeConf` +func copyRouteConf(r routeConf) routeConf { + c := r + + if r.regexp.path != nil { + c.regexp.path = copyRouteRegexp(r.regexp.path) + } + + if r.regexp.host != nil { + c.regexp.host = copyRouteRegexp(r.regexp.host) + } + + c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries)) + for _, q := range r.regexp.queries { + c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q)) + } + + c.matchers = make([]matcher, 0, len(r.matchers)) + for _, m := range r.matchers { + c.matchers = append(c.matchers, m) + } + + return c +} + +func copyRouteRegexp(r *routeRegexp) *routeRegexp { + c := *r + return &c } -// Match matches registered routes against the request. +// Match attempts to match the given request against the router's registered routes. +// +// If the request matches a route of this router or one of its subrouters the Route, +// Handler, and Vars fields of the the match argument are filled and this function +// returns true. +// +// If the request does not match any of this router's or its subrouters' routes +// then this function returns false. If available, a reason for the match failure +// will be filled in the match argument's MatchErr field. If the match failure type +// (eg: not found) has a registered handler, the handler is assigned to the Handler +// field of the match argument. func (r *Router) Match(req *http.Request, match *RouteMatch) bool { for _, route := range r.routes { if route.Match(req, match) { + // Build middleware chain if no error was found + if match.MatchErr == nil { + for i := len(r.middlewares) - 1; i >= 0; i-- { + match.Handler = r.middlewares[i].Middleware(match.Handler) + } + } return true } } + + if match.MatchErr == ErrMethodMismatch { + if r.MethodNotAllowedHandler != nil { + match.Handler = r.MethodNotAllowedHandler + return true + } + + return false + } + + // Closest match for a router (includes sub-routers) + if r.NotFoundHandler != nil { + match.Handler = r.NotFoundHandler + match.MatchErr = ErrNotFound + return true + } + + match.MatchErr = ErrNotFound return false } @@ -65,60 +173,71 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool { // When there is a match, the route variables can be retrieved calling // mux.Vars(request). func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - // Clean path to canonical form and redirect. - if p := cleanPath(req.URL.Path); p != req.URL.Path { - - // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() - - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return + if !r.skipClean { + path := req.URL.Path + if r.useEncodedPath { + path = req.URL.EscapedPath() + } + // Clean path to canonical form and redirect. + if p := cleanPath(path); p != path { + + // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } } var match RouteMatch var handler http.Handler if r.Match(req, &match) { handler = match.Handler - setVars(req, match.Vars) - setCurrentRoute(req, match.Route) + req = setVars(req, match.Vars) + req = setCurrentRoute(req, match.Route) } - if handler == nil { - handler = r.NotFoundHandler - if handler == nil { - handler = http.NotFoundHandler() - } + + if handler == nil && match.MatchErr == ErrMethodMismatch { + handler = methodNotAllowedHandler() } - if !r.KeepContext { - defer context.Clear(req) + + if handler == nil { + handler = http.NotFoundHandler() } + handler.ServeHTTP(w, req) } // Get returns a route registered with the given name. func (r *Router) Get(name string) *Route { - return r.getNamedRoutes()[name] + return r.namedRoutes[name] } // GetRoute returns a route registered with the given name. This method // was renamed to Get() and remains here for backwards compatibility. func (r *Router) GetRoute(name string) *Route { - return r.getNamedRoutes()[name] + return r.namedRoutes[name] } // StrictSlash defines the trailing slash behavior for new routes. The initial // value is false. // -// When true, if the route path is "/path/", accessing "/path" will redirect +// When true, if the route path is "/path/", accessing "/path" will perform a redirect // to the former and vice versa. In other words, your application will always // see the path as specified in the route. // // When false, if the route path is "/path", accessing "/path/" will not match // this route and vice versa. // +// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for +// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed +// request will be made as a GET by most clients. Use middleware or client settings +// to modify this behaviour as needed. +// // Special case: when a route sets a path prefix using the PathPrefix() method, // strict slash is ignored for that route because the redirect behavior can't // be determined from a prefix alone. However, any subrouters created from that @@ -128,28 +247,28 @@ func (r *Router) StrictSlash(value bool) *Router { return r } -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// getNamedRoutes returns the map where named routes are registered. -func (r *Router) getNamedRoutes() map[string]*Route { - if r.namedRoutes == nil { - if r.parent != nil { - r.namedRoutes = r.parent.getNamedRoutes() - } else { - r.namedRoutes = make(map[string]*Route) - } - } - return r.namedRoutes +// SkipClean defines the path cleaning behaviour for new routes. The initial +// value is false. Users should be careful about which routes are not cleaned +// +// When true, if the route path is "/path//to", it will remain with the double +// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ +// +// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will +// become /fetch/http/xkcd.com/534 +func (r *Router) SkipClean(value bool) *Router { + r.skipClean = value + return r } -// getRegexpGroup returns regexp definitions from the parent route, if any. -func (r *Router) getRegexpGroup() *routeRegexpGroup { - if r.parent != nil { - return r.parent.getRegexpGroup() - } - return nil +// UseEncodedPath tells the router to match the encoded original path +// to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". +// +// If not called, the router will match the unencoded path to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" +func (r *Router) UseEncodedPath() *Router { + r.useEncodedPath = true + return r } // ---------------------------------------------------------------------------- @@ -158,7 +277,8 @@ func (r *Router) getRegexpGroup() *routeRegexpGroup { // NewRoute registers an empty route. func (r *Router) NewRoute() *Route { - route := &Route{parent: r, strictSlash: r.strictSlash} + // initialize a route with a copy of the parent router's configuration + route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} r.routes = append(r.routes, route) return route } @@ -224,6 +344,59 @@ func (r *Router) Schemes(schemes ...string) *Route { return r.NewRoute().Schemes(schemes...) } +// BuildVarsFunc registers a new route with a custom function for modifying +// route variables before building a URL. +func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { + return r.NewRoute().BuildVarsFunc(f) +} + +// Walk walks the router and all its sub-routers, calling walkFn for each route +// in the tree. The routes are walked in the order they were added. Sub-routers +// are explored depth-first. +func (r *Router) Walk(walkFn WalkFunc) error { + return r.walk(walkFn, []*Route{}) +} + +// SkipRouter is used as a return value from WalkFuncs to indicate that the +// router that walk is about to descend down to should be skipped. +var SkipRouter = errors.New("skip this router") + +// WalkFunc is the type of the function called for each route visited by Walk. +// At every invocation, it is given the current route, and the current router, +// and a list of ancestor routes that lead to the current route. +type WalkFunc func(route *Route, router *Router, ancestors []*Route) error + +func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { + for _, t := range r.routes { + err := walkFn(t, r, ancestors) + if err == SkipRouter { + continue + } + if err != nil { + return err + } + for _, sr := range t.matchers { + if h, ok := sr.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + if h, ok := t.handler.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + return nil +} + // ---------------------------------------------------------------------------- // Context // ---------------------------------------------------------------------------- @@ -233,6 +406,11 @@ type RouteMatch struct { Route *Route Handler http.Handler Vars map[string]string + + // MatchErr is set to appropriate matching error + // It is set to ErrMethodMismatch if there is a mismatch in + // the request method and route method + MatchErr error } type contextKey int @@ -244,26 +422,30 @@ const ( // Vars returns the route variables for the current request, if any. func Vars(r *http.Request) map[string]string { - if rv := context.Get(r, varsKey); rv != nil { + if rv := contextGet(r, varsKey); rv != nil { return rv.(map[string]string) } return nil } // CurrentRoute returns the matched route for the current request, if any. +// This only works when called inside the handler of the matched route +// because the matched route is stored in the request context which is cleared +// after the handler returns, unless the KeepContext option is set on the +// Router. func CurrentRoute(r *http.Request) *Route { - if rv := context.Get(r, routeKey); rv != nil { + if rv := contextGet(r, routeKey); rv != nil { return rv.(*Route) } return nil } -func setVars(r *http.Request, val interface{}) { - context.Set(r, varsKey, val) +func setVars(r *http.Request, val interface{}) *http.Request { + return contextSet(r, varsKey, val) } -func setCurrentRoute(r *http.Request, val interface{}) { - context.Set(r, routeKey, val) +func setCurrentRoute(r *http.Request, val interface{}) *http.Request { + return contextSet(r, routeKey, val) } // ---------------------------------------------------------------------------- @@ -285,6 +467,7 @@ func cleanPath(p string) string { if p[len(p)-1] == '/' && np != "/" { np += "/" } + return np } @@ -300,13 +483,24 @@ func uniqueVars(s1, s2 []string) error { return nil } -// mapFromPairs converts variadic string parameters to a string map. -func mapFromPairs(pairs ...string) (map[string]string, error) { +// checkPairs returns the count of strings passed in, and an error if +// the count is not an even number. +func checkPairs(pairs ...string) (int, error) { length := len(pairs) if length%2 != 0 { - return nil, fmt.Errorf( + return length, fmt.Errorf( "mux: number of parameters must be multiple of 2, got %v", pairs) } + return length, nil +} + +// mapFromPairsToString converts variadic string parameters to a +// string to string map. +func mapFromPairsToString(pairs ...string) (map[string]string, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } m := make(map[string]string, length/2) for i := 0; i < length; i += 2 { m[pairs[i]] = pairs[i+1] @@ -314,6 +508,24 @@ func mapFromPairs(pairs ...string) (map[string]string, error) { return m, nil } +// mapFromPairsToRegex converts variadic string parameters to a +// string to regex map. +func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]*regexp.Regexp, length/2) + for i := 0; i < length; i += 2 { + regex, err := regexp.Compile(pairs[i+1]) + if err != nil { + return nil, err + } + m[pairs[i]] = regex + } + return m, nil +} + // matchInArray returns true if the given string value is in the array. func matchInArray(arr []string, value string) bool { for _, v := range arr { @@ -324,9 +536,8 @@ func matchInArray(arr []string, value string) bool { return false } -// matchMap returns true if the given key/value pairs exist in a given map. -func matchMap(toCheck map[string]string, toMatch map[string][]string, - canonicalKey bool) bool { +// matchMapWithString returns true if the given key/value pairs exist in a given map. +func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { for k, v := range toCheck { // Check if key exists. if canonicalKey { @@ -351,3 +562,40 @@ func matchMap(toCheck map[string]string, toMatch map[string][]string, } return true } + +// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against +// the given regex +func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != nil { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v.MatchString(value) { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// methodNotAllowed replies to the request with an HTTP status code 405. +func methodNotAllowed(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusMethodNotAllowed) +} + +// methodNotAllowedHandler returns a simple request handler +// that replies to each request with a status code 405. +func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) } diff --git a/vendor/github.com/gorilla/mux/mux_test.go b/vendor/github.com/gorilla/mux/mux_test.go deleted file mode 100644 index e455bce8f..000000000 --- a/vendor/github.com/gorilla/mux/mux_test.go +++ /dev/null @@ -1,943 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "fmt" - "net/http" - "testing" - - "github.com/gorilla/context" -) - -type routeTest struct { - title string // title of the test - route *Route // the route being tested - request *http.Request // a request to test the route - vars map[string]string // the expected vars of the match - host string // the expected host of the match - path string // the expected path of the match - shouldMatch bool // whether the request is expected to match the route at all - shouldRedirect bool // whether the request should result in a redirect -} - -func TestHost(t *testing.T) { - // newRequestHost a new request with a method, url, and host header - newRequestHost := func(method, url, host string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - req.Host = host - return req - } - - tests := []routeTest{ - { - title: "Host route match", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with port, match", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: true, - }, - { - title: "Host route with port, wrong port in request URL", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route, match with host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true}, - { - title: "Host route with port, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with pattern, wrong host in request URL", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with multiple patterns, wrong host in request URL", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - } - for _, test := range tests { - testRoute(t, test) - } -} - -func TestPath(t *testing.T) { - tests := []routeTest{ - { - title: "Path route, match", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route, match with trailing slash in request and path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - }, - { - title: "Path route, do not match with trailing slash in path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: false, - }, - { - title: "Path route, do not match with trailing slash in request", - route: new(Route).Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: false, - }, - { - title: "Path route, wrong path in request in request URL", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with pattern, match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route with pattern, URL in request does not match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with multiple patterns, match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route with multiple patterns, URL in request does not match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestPathPrefix(t *testing.T) { - tests := []routeTest{ - { - title: "PathPrefix route, match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - }, - { - title: "PathPrefix route, match substring", - route: new(Route).PathPrefix("/1"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/1", - shouldMatch: true, - }, - { - title: "PathPrefix route, URL prefix in request does not match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: false, - }, - { - title: "PathPrefix route with pattern, match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - shouldMatch: true, - }, - { - title: "PathPrefix route with pattern, URL prefix in request does not match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - shouldMatch: false, - }, - { - title: "PathPrefix route with multiple patterns, match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - shouldMatch: true, - }, - { - title: "PathPrefix route with multiple patterns, URL prefix in request does not match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestHostPath(t *testing.T) { - tests := []routeTest{ - { - title: "Host and Path route, match", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Host and Path route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Host and Path route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Host and Path route with pattern, URL in request does not match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Host and Path route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Host and Path route with multiple patterns, URL in request does not match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestHeaders(t *testing.T) { - // newRequestHeaders creates a new request with a method, url, and headers - newRequestHeaders := func(method, url string, headers map[string]string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - for k, v := range headers { - req.Header.Add(k, v) - } - return req - } - - tests := []routeTest{ - { - title: "Headers route, match", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Headers route, bad header values", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } - -} - -func TestMethods(t *testing.T) { - tests := []routeTest{ - { - title: "Methods route, match GET", - route: new(Route).Methods("GET", "POST"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Methods route, match POST", - route: new(Route).Methods("GET", "POST"), - request: newRequest("POST", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Methods route, bad method", - route: new(Route).Methods("GET", "POST"), - request: newRequest("PUT", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestQueries(t *testing.T) { - tests := []routeTest{ - { - title: "Queries route, match", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, match with a query string", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, match with a query string out of order", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, bad query", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=dong"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with pattern, match", - route: new(Route).Queries("foo", "{v1}"), - request: newRequest("GET", "http://localhost?foo=bar"), - vars: map[string]string{"v1": "bar"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with multiple patterns, match", - route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern, match", - route: new(Route).Queries("foo", "{v1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=10"), - vars: map[string]string{"v1": "10"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern, regexp does not match", - route: new(Route).Queries("foo", "{v1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=a"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestSchemes(t *testing.T) { - tests := []routeTest{ - // Schemes - { - title: "Schemes route, match https", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "https://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Schemes route, match ftp", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "ftp://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Schemes route, bad scheme", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - for _, test := range tests { - testRoute(t, test) - } -} - -func TestMatcherFunc(t *testing.T) { - m := func(r *http.Request, m *RouteMatch) bool { - if r.URL.Host == "aaa.bbb.ccc" { - return true - } - return false - } - - tests := []routeTest{ - { - title: "MatchFunc route, match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.bbb.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "MatchFunc route, non-match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.222.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestSubRouter(t *testing.T) { - subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter() - subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter() - - tests := []routeTest{ - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://aaa.google.com/bbb"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - shouldMatch: true, - }, - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://111.google.com/111"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - shouldMatch: false, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - shouldMatch: true, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestNamedRoutes(t *testing.T) { - r1 := NewRouter() - r1.NewRoute().Name("a") - r1.NewRoute().Name("b") - r1.NewRoute().Name("c") - - r2 := r1.NewRoute().Subrouter() - r2.NewRoute().Name("d") - r2.NewRoute().Name("e") - r2.NewRoute().Name("f") - - r3 := r2.NewRoute().Subrouter() - r3.NewRoute().Name("g") - r3.NewRoute().Name("h") - r3.NewRoute().Name("i") - - if r1.namedRoutes == nil || len(r1.namedRoutes) != 9 { - t.Errorf("Expected 9 named routes, got %v", r1.namedRoutes) - } else if r1.Get("i") == nil { - t.Errorf("Subroute name not registered") - } -} - -func TestStrictSlash(t *testing.T) { - r := NewRouter() - r.StrictSlash(true) - - tests := []routeTest{ - { - title: "Redirect path without slash", - route: r.NewRoute().Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Do not redirect path with slash", - route: r.NewRoute().Path("/111/"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - shouldRedirect: false, - }, - { - title: "Redirect path with slash", - route: r.NewRoute().Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Do not redirect path without slash", - route: r.NewRoute().Path("/111"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - shouldRedirect: false, - }, - { - title: "Propagate StrictSlash to subrouters", - route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), - request: newRequest("GET", "http://localhost/static/images"), - vars: map[string]string{}, - host: "", - path: "/static/images/", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Ignore StrictSlash for path prefix", - route: r.NewRoute().PathPrefix("/static/"), - request: newRequest("GET", "http://localhost/static/logo.png"), - vars: map[string]string{}, - host: "", - path: "/static/", - shouldMatch: true, - shouldRedirect: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -func getRouteTemplate(route *Route) string { - host, path := "none", "none" - if route.regexp != nil { - if route.regexp.host != nil { - host = route.regexp.host.template - } - if route.regexp.path != nil { - path = route.regexp.path.template - } - } - return fmt.Sprintf("Host: %v, Path: %v", host, path) -} - -func testRoute(t *testing.T, test routeTest) { - request := test.request - route := test.route - vars := test.vars - shouldMatch := test.shouldMatch - host := test.host - path := test.path - url := test.host + test.path - shouldRedirect := test.shouldRedirect - - var match RouteMatch - ok := route.Match(request, &match) - if ok != shouldMatch { - msg := "Should match" - if !shouldMatch { - msg = "Should not match" - } - t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars) - return - } - if shouldMatch { - if test.vars != nil && !stringMapEqual(test.vars, match.Vars) { - t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars) - return - } - if host != "" { - u, _ := test.route.URLHost(mapToPairs(match.Vars)...) - if host != u.Host { - t.Errorf("(%v) URLHost not equal: expected %v, got %v -- %v", test.title, host, u.Host, getRouteTemplate(route)) - return - } - } - if path != "" { - u, _ := route.URLPath(mapToPairs(match.Vars)...) - if path != u.Path { - t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, path, u.Path, getRouteTemplate(route)) - return - } - } - if url != "" { - u, _ := route.URL(mapToPairs(match.Vars)...) - if url != u.Host+u.Path { - t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, url, u.Host+u.Path, getRouteTemplate(route)) - return - } - } - if shouldRedirect && match.Handler == nil { - t.Errorf("(%v) Did not redirect", test.title) - return - } - if !shouldRedirect && match.Handler != nil { - t.Errorf("(%v) Unexpected redirect", test.title) - return - } - } -} - -// Tests that the context is cleared or not cleared properly depending on -// the configuration of the router -func TestKeepContext(t *testing.T) { - func1 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - res := new(http.ResponseWriter) - r.ServeHTTP(*res, req) - - if _, ok := context.GetOk(req, "t"); ok { - t.Error("Context should have been cleared at end of request") - } - - r.KeepContext = true - - req, _ = http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - r.ServeHTTP(*res, req) - if _, ok := context.GetOk(req, "t"); !ok { - t.Error("Context should NOT have been cleared at end of request") - } - -} - -type TestA301ResponseWriter struct { - hh http.Header - status int -} - -func (ho TestA301ResponseWriter) Header() http.Header { - return http.Header(ho.hh) -} - -func (ho TestA301ResponseWriter) Write(b []byte) (int, error) { - return 0, nil -} - -func (ho TestA301ResponseWriter) WriteHeader(code int) { - ho.status = code -} - -func Test301Redirect(t *testing.T) { - m := make(http.Header) - - func1 := func(w http.ResponseWriter, r *http.Request) {} - func2 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/api/", func2).Name("func2") - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) - - res := TestA301ResponseWriter{ - hh: m, - status: 0, - } - r.ServeHTTP(&res, req) - - if "http://localhost/api/?abc=def" != res.hh["Location"][0] { - t.Errorf("Should have complete URL with query string") - } -} - -// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW -func TestSubrouterHeader(t *testing.T) { - expected := "func1 response" - func1 := func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, expected) - } - func2 := func(http.ResponseWriter, *http.Request) {} - - r := NewRouter() - s := r.Headers("SomeSpecialHeader", "").Subrouter() - s.HandleFunc("/", func1).Name("func1") - r.HandleFunc("/", func2).Name("func2") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - req.Header.Add("SomeSpecialHeader", "foo") - match := new(RouteMatch) - matched := r.Match(req, match) - if !matched { - t.Errorf("Should match request") - } - if match.Route.GetName() != "func1" { - t.Errorf("Expecting func1 handler, got %s", match.Route.GetName()) - } - resp := NewRecorder() - match.Handler.ServeHTTP(resp, req) - if resp.Body.String() != expected { - t.Errorf("Expecting %q", expected) - } -} - -// mapToPairs converts a string map to a slice of string pairs -func mapToPairs(m map[string]string) []string { - var i int - p := make([]string, len(m)*2) - for k, v := range m { - p[i] = k - p[i+1] = v - i += 2 - } - return p -} - -// stringMapEqual checks the equality of two string maps -func stringMapEqual(m1, m2 map[string]string) bool { - nil1 := m1 == nil - nil2 := m2 == nil - if nil1 != nil2 || len(m1) != len(m2) { - return false - } - for k, v := range m1 { - if v != m2[k] { - return false - } - } - return true -} - -// newRequest is a helper function to create a new request with a method and url -func newRequest(method, url string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - return req -} diff --git a/vendor/github.com/gorilla/mux/old_test.go b/vendor/github.com/gorilla/mux/old_test.go deleted file mode 100644 index 1f7c190c0..000000000 --- a/vendor/github.com/gorilla/mux/old_test.go +++ /dev/null @@ -1,714 +0,0 @@ -// Old tests ported to Go1. This is a mess. Want to drop it one day. - -// Copyright 2011 Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "net/http" - "testing" -) - -// ---------------------------------------------------------------------------- -// ResponseRecorder -// ---------------------------------------------------------------------------- -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// ResponseRecorder is an implementation of http.ResponseWriter that -// records its mutations for later inspection in tests. -type ResponseRecorder struct { - Code int // the HTTP response code from WriteHeader - HeaderMap http.Header // the HTTP response headers - Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to - Flushed bool -} - -// NewRecorder returns an initialized ResponseRecorder. -func NewRecorder() *ResponseRecorder { - return &ResponseRecorder{ - HeaderMap: make(http.Header), - Body: new(bytes.Buffer), - } -} - -// DefaultRemoteAddr is the default remote address to return in RemoteAddr if -// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. -const DefaultRemoteAddr = "1.2.3.4" - -// Header returns the response headers. -func (rw *ResponseRecorder) Header() http.Header { - return rw.HeaderMap -} - -// Write always succeeds and writes to rw.Body, if not nil. -func (rw *ResponseRecorder) Write(buf []byte) (int, error) { - if rw.Body != nil { - rw.Body.Write(buf) - } - if rw.Code == 0 { - rw.Code = http.StatusOK - } - return len(buf), nil -} - -// WriteHeader sets rw.Code. -func (rw *ResponseRecorder) WriteHeader(code int) { - rw.Code = code -} - -// Flush sets rw.Flushed to true. -func (rw *ResponseRecorder) Flush() { - rw.Flushed = true -} - -// ---------------------------------------------------------------------------- - -func TestRouteMatchers(t *testing.T) { - var scheme, host, path, query, method string - var headers map[string]string - var resultVars map[bool]map[string]string - - router := NewRouter() - router.NewRoute().Host("{var1}.google.com"). - Path("/{var2:[a-z]+}/{var3:[0-9]+}"). - Queries("foo", "bar"). - Methods("GET"). - Schemes("https"). - Headers("x-requested-with", "XMLHttpRequest") - router.NewRoute().Host("www.{var4}.com"). - PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}"). - Queries("baz", "ding"). - Methods("POST"). - Schemes("http"). - Headers("Content-Type", "application/json") - - reset := func() { - // Everything match. - scheme = "https" - host = "www.google.com" - path = "/product/42" - query = "?foo=bar" - method = "GET" - headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} - resultVars = map[bool]map[string]string{ - true: {"var1": "www", "var2": "product", "var3": "42"}, - false: {}, - } - } - - reset2 := func() { - // Everything match. - scheme = "http" - host = "www.google.com" - path = "/foo/product/42/path/that/is/ignored" - query = "?baz=ding" - method = "POST" - headers = map[string]string{"Content-Type": "application/json"} - resultVars = map[bool]map[string]string{ - true: {"var4": "google", "var5": "product", "var6": "42"}, - false: {}, - } - } - - match := func(shouldMatch bool) { - url := scheme + "://" + host + path + query - request, _ := http.NewRequest(method, url, nil) - for key, value := range headers { - request.Header.Add(key, value) - } - - var routeMatch RouteMatch - matched := router.Match(request, &routeMatch) - if matched != shouldMatch { - // Need better messages. :) - if matched { - t.Errorf("Should match.") - } else { - t.Errorf("Should not match.") - } - } - - if matched { - currentRoute := routeMatch.Route - if currentRoute == nil { - t.Errorf("Expected a current route.") - } - vars := routeMatch.Vars - expectedVars := resultVars[shouldMatch] - if len(vars) != len(expectedVars) { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - for name, value := range vars { - if expectedVars[name] != value { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - } - } - } - - // 1st route -------------------------------------------------------------- - - // Everything match. - reset() - match(true) - - // Scheme doesn't match. - reset() - scheme = "http" - match(false) - - // Host doesn't match. - reset() - host = "www.mygoogle.com" - match(false) - - // Path doesn't match. - reset() - path = "/product/notdigits" - match(false) - - // Query doesn't match. - reset() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset() - method = "POST" - match(false) - - // Header doesn't match. - reset() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset() - match(true) - - // 2nd route -------------------------------------------------------------- - - // Everything match. - reset2() - match(true) - - // Scheme doesn't match. - reset2() - scheme = "https" - match(false) - - // Host doesn't match. - reset2() - host = "sub.google.com" - match(false) - - // Path doesn't match. - reset2() - path = "/bar/product/42" - match(false) - - // Query doesn't match. - reset2() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset2() - method = "GET" - match(false) - - // Header doesn't match. - reset2() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset2() - match(true) -} - -type headerMatcherTest struct { - matcher headerMatcher - headers map[string]string - result bool -} - -var headerMatcherTests = []headerMatcherTest{ - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{"X-Requested-With": "XMLHttpRequest"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": ""}), - headers: map[string]string{"X-Requested-With": "anything"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{}, - result: false, - }, -} - -type hostMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var hostMatcherTests = []hostMatcherTest{ - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://abc.def.ghi/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://a.b.c/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: false, - }, -} - -type methodMatcherTest struct { - matcher methodMatcher - method string - result bool -} - -var methodMatcherTests = []methodMatcherTest{ - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "GET", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "POST", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "PUT", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "DELETE", - result: false, - }, -} - -type pathMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var pathMatcherTests = []pathMatcherTest{ - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/123/456/789", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/1/2/3", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: false, - }, -} - -type schemeMatcherTest struct { - matcher schemeMatcher - url string - result bool -} - -var schemeMatcherTests = []schemeMatcherTest{ - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "http://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "https://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"https"}), - url: "http://localhost:8080/", - result: false, - }, - { - matcher: schemeMatcher([]string{"http"}), - url: "https://localhost:8080/", - result: false, - }, -} - -type urlBuildingTest struct { - route *Route - vars []string - url string -} - -var urlBuildingTests = []urlBuildingTest{ - { - route: new(Route).Host("foo.domain.com"), - vars: []string{}, - url: "http://foo.domain.com", - }, - { - route: new(Route).Host("{subdomain}.domain.com"), - vars: []string{"subdomain", "bar"}, - url: "http://bar.domain.com", - }, - { - route: new(Route).Host("foo.domain.com").Path("/articles"), - vars: []string{}, - url: "http://foo.domain.com/articles", - }, - { - route: new(Route).Path("/articles"), - vars: []string{}, - url: "/articles", - }, - { - route: new(Route).Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"category", "technology", "id", "42"}, - url: "/articles/technology/42", - }, - { - route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"subdomain", "foo", "category", "technology", "id", "42"}, - url: "http://foo.domain.com/articles/technology/42", - }, -} - -func TestHeaderMatcher(t *testing.T) { - for _, v := range headerMatcherTests { - request, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - for key, value := range v.headers { - request.Header.Add(key, value) - } - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, request.Header) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, request.Header) - } - } - } -} - -func TestHostMatcher(t *testing.T) { - for _, v := range hostMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestMethodMatcher(t *testing.T) { - for _, v := range methodMatcherTests { - request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.method) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.method) - } - } - } -} - -func TestPathMatcher(t *testing.T) { - for _, v := range pathMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestSchemeMatcher(t *testing.T) { - for _, v := range schemeMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - } -} - -func TestUrlBuilding(t *testing.T) { - - for _, v := range urlBuildingTests { - u, _ := v.route.URL(v.vars...) - url := u.String() - if url != v.url { - t.Errorf("expected %v, got %v", v.url, url) - /* - reversePath := "" - reverseHost := "" - if v.route.pathTemplate != nil { - reversePath = v.route.pathTemplate.Reverse - } - if v.route.hostTemplate != nil { - reverseHost = v.route.hostTemplate.Reverse - } - - t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost) - */ - } - } - - ArticleHandler := func(w http.ResponseWriter, r *http.Request) { - } - - router := NewRouter() - router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article") - - url, _ := router.Get("article").URL("category", "technology", "id", "42") - expected := "/articles/technology/42" - if url.String() != expected { - t.Errorf("Expected %v, got %v", expected, url.String()) - } -} - -func TestMatchedRouteName(t *testing.T) { - routeName := "stock" - router := NewRouter() - route := router.NewRoute().Path("/products/").Name(routeName) - - url := "http://www.domain.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - retName := rv.Route.GetName() - if retName != routeName { - t.Errorf("Expected %q, got %q.", routeName, retName) - } -} - -func TestSubRouting(t *testing.T) { - // Example from docs. - router := NewRouter() - subrouter := router.NewRoute().Host("www.domain.com").Subrouter() - route := subrouter.NewRoute().Path("/products/").Name("products") - - url := "http://www.domain.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - u, _ := router.Get("products").URL() - builtUrl := u.String() - // Yay, subroute aware of the domain when building! - if builtUrl != url { - t.Errorf("Expected %q, got %q.", url, builtUrl) - } -} - -func TestVariableNames(t *testing.T) { - route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}") - if route.err == nil { - t.Errorf("Expected error for duplicated variable names") - } -} - -func TestRedirectSlash(t *testing.T) { - var route *Route - var routeMatch RouteMatch - r := NewRouter() - - r.StrictSlash(false) - route = r.NewRoute() - if route.strictSlash != false { - t.Errorf("Expected false redirectSlash.") - } - - r.StrictSlash(true) - route = r.NewRoute() - if route.strictSlash != true { - t.Errorf("Expected true redirectSlash.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}/") - request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars := routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp := NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" { - t.Errorf("Expected redirect header.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}") - request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars = routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp = NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" { - t.Errorf("Expected redirect header.") - } -} - -// Test for the new regexp library, still not available in stable Go. -func TestNewRegexp(t *testing.T) { - var p *routeRegexp - var matches []string - - tests := map[string]map[string][]string{ - "/{foo:a{2}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": nil, - "/aaaa": nil, - }, - "/{foo:a{2,}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": {"aaaa"}, - }, - "/{foo:a{2,3}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": nil, - }, - "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abcd": nil, - "/abc/ab": {"abc", "ab"}, - "/abc/abc": nil, - "/abcd/ab": nil, - }, - `/{foo:\w{3,}}/{bar:\d{2,}}`: { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abc/1": nil, - "/abc/12": {"abc", "12"}, - "/abcd/12": {"abcd", "12"}, - "/abcd/123": {"abcd", "123"}, - }, - } - - for pattern, paths := range tests { - p, _ = newRouteRegexp(pattern, false, false, false, false) - for path, result := range paths { - matches = p.regexp.FindStringSubmatch(path) - if result == nil { - if matches != nil { - t.Errorf("%v should not match %v.", pattern, path) - } - } else { - if len(matches) != len(result)+1 { - t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches)) - } else { - for k, v := range result { - if matches[k+1] != v { - t.Errorf("Expected %v, got %v.", v, matches[k+1]) - } - } - } - } - } - } -} diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go index ef1db8ff9..f25288675 100644 --- a/vendor/github.com/gorilla/mux/regexp.go +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -10,9 +10,24 @@ import ( "net/http" "net/url" "regexp" + "strconv" "strings" ) +type routeRegexpOptions struct { + strictSlash bool + useEncodedPath bool +} + +type regexpType int + +const ( + regexpTypePath regexpType = 0 + regexpTypeHost regexpType = 1 + regexpTypePrefix regexpType = 2 + regexpTypeQuery regexpType = 3 +) + // newRouteRegexp parses a route template and returns a routeRegexp, // used to match a host, a path or a query string. // @@ -23,7 +38,7 @@ import ( // Previously we accepted only Python-like identifiers for variable // names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that // name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { +func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) { // Check if it is well-formed. idxs, errBraces := braceIndices(tpl) if errBraces != nil { @@ -33,28 +48,25 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash template := tpl // Now let's parse it. defaultPattern := "[^/]+" - if matchQuery { - defaultPattern = "[^?&]+" - matchPrefix, strictSlash = true, false - } else if matchHost { + if typ == regexpTypeQuery { + defaultPattern = ".*" + } else if typ == regexpTypeHost { defaultPattern = "[^.]+" - matchPrefix, strictSlash = false, false } - if matchPrefix { - strictSlash = false + // Only match strict slash if not matching + if typ != regexpTypePath { + options.strictSlash = false } // Set a flag for strictSlash. endSlash := false - if strictSlash && strings.HasSuffix(tpl, "/") { + if options.strictSlash && strings.HasSuffix(tpl, "/") { tpl = tpl[:len(tpl)-1] endSlash = true } varsN := make([]string, len(idxs)/2) varsR := make([]*regexp.Regexp, len(idxs)/2) pattern := bytes.NewBufferString("") - if !matchQuery { - pattern.WriteByte('^') - } + pattern.WriteByte('^') reverse := bytes.NewBufferString("") var end int var err error @@ -74,9 +86,11 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash tpl[idxs[i]:end]) } // Build the regexp pattern. - fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) + fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) + // Build the reverse template. fmt.Fprintf(reverse, "%s%%s", raw) + // Append variable name and compiled pattern. varsN[i/2] = name varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) @@ -87,10 +101,16 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash // Add the remaining. raw := tpl[end:] pattern.WriteString(regexp.QuoteMeta(raw)) - if strictSlash { + if options.strictSlash { pattern.WriteString("[/]?") } - if !matchPrefix { + if typ == regexpTypeQuery { + // Add the default pattern if the query value is empty + if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { + pattern.WriteString(defaultPattern) + } + } + if typ != regexpTypePrefix { pattern.WriteByte('$') } reverse.WriteString(raw) @@ -102,16 +122,22 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash if errCompile != nil { return nil, errCompile } + + // Check for capturing groups which used to work in older versions + if reg.NumSubexp() != len(idxs)/2 { + panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) + + "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)") + } + // Done! return &routeRegexp{ - template: template, - matchHost: matchHost, - matchQuery: matchQuery, - strictSlash: strictSlash, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, + template: template, + regexpType: typ, + options: options, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, }, nil } @@ -120,12 +146,10 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash type routeRegexp struct { // The unmodified template. template string - // True for host match, false for path or query string match. - matchHost bool - // True for query string match, false for path and host match. - matchQuery bool - // The strictSlash value defined on the route, but disabled if PathPrefix was used. - strictSlash bool + // The type of match + regexpType regexpType + // Options for matching + options routeRegexpOptions // Expanded regexp. regexp *regexp.Regexp // Reverse template. @@ -138,28 +162,31 @@ type routeRegexp struct { // Match matches the regexp against the URL host or path. func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if !r.matchHost { - if r.matchQuery { - return r.regexp.MatchString(req.URL.RawQuery) - } else { - return r.regexp.MatchString(req.URL.Path) + if r.regexpType != regexpTypeHost { + if r.regexpType == regexpTypeQuery { + return r.matchQueryString(req) } + path := req.URL.Path + if r.options.useEncodedPath { + path = req.URL.EscapedPath() + } + return r.regexp.MatchString(path) } + return r.regexp.MatchString(getHost(req)) } // url builds a URL part using the given values. -func (r *routeRegexp) url(pairs ...string) (string, error) { - values, err := mapFromPairs(pairs...) - if err != nil { - return "", err - } +func (r *routeRegexp) url(values map[string]string) (string, error) { urlValues := make([]interface{}, len(r.varsN)) for k, v := range r.varsN { value, ok := values[v] if !ok { return "", fmt.Errorf("mux: missing route variable %q", v) } + if r.regexpType == regexpTypeQuery { + value = url.QueryEscape(value) + } urlValues[k] = value } rv := fmt.Sprintf(r.reverse, urlValues...) @@ -178,11 +205,31 @@ func (r *routeRegexp) url(pairs ...string) (string, error) { return rv, nil } +// getURLQuery returns a single query parameter from a request URL. +// For a URL with foo=bar&baz=ding, we return only the relevant key +// value pair for the routeRegexp. +func (r *routeRegexp) getURLQuery(req *http.Request) string { + if r.regexpType != regexpTypeQuery { + return "" + } + templateKey := strings.SplitN(r.template, "=", 2)[0] + for key, vals := range req.URL.Query() { + if key == templateKey && len(vals) > 0 { + return key + "=" + vals[0] + } + } + return "" +} + +func (r *routeRegexp) matchQueryString(req *http.Request) bool { + return r.regexp.MatchString(r.getURLQuery(req)) +} + // braceIndices returns the first level curly brace indices from a string. // It returns an error in case of unbalanced braces. func braceIndices(s string) ([]int, error) { var level, idx int - idxs := make([]int, 0) + var idxs []int for i := 0; i < len(s); i++ { switch s[i] { case '{': @@ -203,6 +250,11 @@ func braceIndices(s string) ([]int, error) { return idxs, nil } +// varGroupName builds a capturing group name for the indexed variable. +func varGroupName(idx int) string { + return "v" + strconv.Itoa(idx) +} + // ---------------------------------------------------------------------------- // routeRegexpGroup // ---------------------------------------------------------------------------- @@ -215,26 +267,27 @@ type routeRegexpGroup struct { } // setMatch extracts the variables from the URL once a route matches. -func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { +func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { // Store host variables. if v.host != nil { - hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) - if hostVars != nil { - for k, v := range v.host.varsN { - m.Vars[v] = hostVars[k+1] - } + host := getHost(req) + matches := v.host.regexp.FindStringSubmatchIndex(host) + if len(matches) > 0 { + extractVars(host, matches, v.host.varsN, m.Vars) } } + path := req.URL.Path + if r.useEncodedPath { + path = req.URL.EscapedPath() + } // Store path variables. if v.path != nil { - pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) - if pathVars != nil { - for k, v := range v.path.varsN { - m.Vars[v] = pathVars[k+1] - } + matches := v.path.regexp.FindStringSubmatchIndex(path) + if len(matches) > 0 { + extractVars(path, matches, v.path.varsN, m.Vars) // Check if we should redirect. - if v.path.strictSlash { - p1 := strings.HasSuffix(req.URL.Path, "/") + if v.path.options.strictSlash { + p1 := strings.HasSuffix(path, "/") p2 := strings.HasSuffix(v.path.template, "/") if p1 != p2 { u, _ := url.Parse(req.URL.String()) @@ -243,32 +296,33 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) } else { u.Path += "/" } - m.Handler = http.RedirectHandler(u.String(), 301) + m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently) } } } } // Store query string variables. - rawQuery := req.URL.RawQuery for _, q := range v.queries { - queryVars := q.regexp.FindStringSubmatch(rawQuery) - if queryVars != nil { - for k, v := range q.varsN { - m.Vars[v] = queryVars[k+1] - } + queryURL := q.getURLQuery(req) + matches := q.regexp.FindStringSubmatchIndex(queryURL) + if len(matches) > 0 { + extractVars(queryURL, matches, q.varsN, m.Vars) } } } // getHost tries its best to return the request host. +// According to section 14.23 of RFC 2616 the Host header +// can include the port number if the default value of 80 is not used. func getHost(r *http.Request) string { - if !r.URL.IsAbs() { - host := r.Host - // Slice off any port information. - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - return host + if r.URL.IsAbs() { + return r.URL.Host + } + return r.Host +} + +func extractVars(input string, matches []int, names []string, output map[string]string) { + for i, name := range names { + output[name] = input[matches[2*i+2]:matches[2*i+3]] } - return r.URL.Host } diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go index 4fb85f714..16a7cdf4e 100644 --- a/vendor/github.com/gorilla/mux/route.go +++ b/vendor/github.com/gorilla/mux/route.go @@ -9,28 +9,32 @@ import ( "fmt" "net/http" "net/url" + "regexp" "strings" ) // Route stores information to match a request and build URLs. type Route struct { - // Parent where the route was registered (a Router). - parent parentRoute // Request handler for the route. handler http.Handler - // List of matchers. - matchers []matcher - // Manager for the variables from host and path. - regexp *routeRegexpGroup - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool // If true, this route never matches: it is only used to build URLs. buildOnly bool // The name used to build URLs. name string // Error resulted from building a route. err error + + // "global" reference to all named routes + namedRoutes map[string]*Route + + // config possibly passed in from `Router` + routeConf +} + +// SkipClean reports whether path cleaning is enabled for this route via +// Router.SkipClean. +func (r *Route) SkipClean() bool { + return r.skipClean } // Match matches the route against the request. @@ -38,12 +42,45 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool { if r.buildOnly || r.err != nil { return false } + + var matchErr error + // Match everything. for _, m := range r.matchers { if matched := m.Match(req, match); !matched { + if _, ok := m.(methodMatcher); ok { + matchErr = ErrMethodMismatch + continue + } + + // Ignore ErrNotFound errors. These errors arise from match call + // to Subrouters. + // + // This prevents subsequent matching subrouters from failing to + // run middleware. If not ignored, the middleware would see a + // non-nil MatchErr and be skipped, even when there was a + // matching route. + if match.MatchErr == ErrNotFound { + match.MatchErr = nil + } + + matchErr = nil return false } } + + if matchErr != nil { + match.MatchErr = matchErr + return false + } + + if match.MatchErr == ErrMethodMismatch { + // We found a route which matches request method, clear MatchErr + match.MatchErr = nil + // Then override the mis-matched handler + match.Handler = r.handler + } + // Yay, we have a match. Let's collect some info about it. if match.Route == nil { match.Route = r @@ -54,10 +91,9 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool { if match.Vars == nil { match.Vars = make(map[string]string) } + // Set variables. - if r.regexp != nil { - r.regexp.setMatch(req, match, r) - } + r.regexp.setMatch(req, match, r) return true } @@ -99,7 +135,7 @@ func (r *Route) GetHandler() http.Handler { // Name ----------------------------------------------------------------------- // Name sets the name for the route, used to build URLs. -// If the name was registered already it will be overwritten. +// It is an error to call Name more than once on a route. func (r *Route) Name(name string) *Route { if r.name != "" { r.err = fmt.Errorf("mux: route already has name %q, can't set %q", @@ -107,7 +143,7 @@ func (r *Route) Name(name string) *Route { } if r.err == nil { r.name = name - r.getNamedRoutes()[name] = r + r.namedRoutes[name] = r } return r } @@ -135,20 +171,22 @@ func (r *Route) addMatcher(m matcher) *Route { } // addRegexpMatcher adds a host or path matcher and builder to a route. -func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { +func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error { if r.err != nil { return r.err } - r.regexp = r.getRegexpGroup() - if !matchHost && !matchQuery { - if len(tpl) == 0 || tpl[0] != '/' { + if typ == regexpTypePath || typ == regexpTypePrefix { + if len(tpl) > 0 && tpl[0] != '/' { return fmt.Errorf("mux: path must start with a slash, got %q", tpl) } if r.regexp.path != nil { tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl } } - rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) + rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{ + strictSlash: r.strictSlash, + useEncodedPath: r.useEncodedPath, + }) if err != nil { return err } @@ -157,7 +195,7 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery return err } } - if matchHost { + if typ == regexpTypeHost { if r.regexp.path != nil { if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { return err @@ -170,7 +208,7 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery return err } } - if matchQuery { + if typ == regexpTypeQuery { r.regexp.queries = append(r.regexp.queries, rr) } else { r.regexp.path = rr @@ -186,7 +224,7 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery type headerMatcher map[string]string func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMap(m, r.Header, true) + return matchMapWithString(m, r.Header, true) } // Headers adds a matcher for request header values. @@ -197,22 +235,47 @@ func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { // "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both request header values match. -// -// It the value is an empty string, it will match any value if the key is set. +// If the value is an empty string, it will match any value if the key is set. func (r *Route) Headers(pairs ...string) *Route { if r.err == nil { var headers map[string]string - headers, r.err = mapFromPairs(pairs...) + headers, r.err = mapFromPairsToString(pairs...) return r.addMatcher(headerMatcher(headers)) } return r } +// headerRegexMatcher matches the request against the route given a regex for the header +type headerRegexMatcher map[string]*regexp.Regexp + +func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithRegex(m, r.Header, true) +} + +// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex +// support. For example: +// +// r := mux.NewRouter() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both the request header matches both regular expressions. +// If the value is an empty string, it will match any value if the key is set. +// Use the start and end of string anchors (^ and $) to match an exact value. +func (r *Route) HeadersRegexp(pairs ...string) *Route { + if r.err == nil { + var headers map[string]*regexp.Regexp + headers, r.err = mapFromPairsToRegex(pairs...) + return r.addMatcher(headerRegexMatcher(headers)) + } + return r +} + // Host ----------------------------------------------------------------------- // Host adds a matcher for the URL host. // It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to me matched: +// Variables can define an optional regexp pattern to be matched: // // - {name} matches anything until the next dot. // @@ -221,14 +284,14 @@ func (r *Route) Headers(pairs ...string) *Route { // For example: // // r := mux.NewRouter() -// r.Host("www.domain.com") +// r.Host("www.example.com") // r.Host("{subdomain}.domain.com") // r.Host("{subdomain:[a-z]+}.domain.com") // // Variable names must be unique in a given route. They can be retrieved // calling mux.Vars(request). func (r *Route) Host(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, true, false, false) + r.err = r.addRegexpMatcher(tpl, regexpTypeHost) return r } @@ -237,6 +300,7 @@ func (r *Route) Host(tpl string) *Route { // MatcherFunc is the function signature used by custom matchers. type MatcherFunc func(*http.Request, *RouteMatch) bool +// Match returns the match for a given request. func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { return m(r, match) } @@ -270,7 +334,7 @@ func (r *Route) Methods(methods ...string) *Route { // Path adds a matcher for the URL path. // It accepts a template with zero or more URL variables enclosed by {}. The // template must start with a "/". -// Variables can define an optional regexp pattern to me matched: +// Variables can define an optional regexp pattern to be matched: // // - {name} matches anything until the next slash. // @@ -287,7 +351,7 @@ func (r *Route) Methods(methods ...string) *Route { // Variable names must be unique in a given route. They can be retrieved // calling mux.Vars(request). func (r *Route) Path(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, false, false, false) + r.err = r.addRegexpMatcher(tpl, regexpTypePath) return r } @@ -303,7 +367,7 @@ func (r *Route) Path(tpl string) *Route { // Also note that the setting of Router.StrictSlash() has no effect on routes // with a PathPrefix matcher. func (r *Route) PathPrefix(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, false, true, false) + r.err = r.addRegexpMatcher(tpl, regexpTypePrefix) return r } @@ -321,12 +385,11 @@ func (r *Route) PathPrefix(tpl string) *Route { // // It the value is an empty string, it will match any value if the key is set. // -// Variables can define an optional regexp pattern to me matched: +// Variables can define an optional regexp pattern to be matched: // // - {name} matches anything until the next slash. // // - {name:pattern} matches the given regexp pattern. - func (r *Route) Queries(pairs ...string) *Route { length := len(pairs) if length%2 != 0 { @@ -335,7 +398,7 @@ func (r *Route) Queries(pairs ...string) *Route { return nil } for i := 0; i < length; i += 2 { - if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, true, true); r.err != nil { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil { return r } } @@ -358,9 +421,33 @@ func (r *Route) Schemes(schemes ...string) *Route { for k, v := range schemes { schemes[k] = strings.ToLower(v) } + if len(schemes) > 0 { + r.buildScheme = schemes[0] + } return r.addMatcher(schemeMatcher(schemes)) } +// BuildVarsFunc -------------------------------------------------------------- + +// BuildVarsFunc is the function signature used by custom build variable +// functions (which can modify route variables before a route's URL is built). +type BuildVarsFunc func(map[string]string) map[string]string + +// BuildVarsFunc adds a custom function to be used to modify build variables +// before a route's URL is built. +func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { + if r.buildVarsFunc != nil { + // compose the old and new functions + old := r.buildVarsFunc + r.buildVarsFunc = func(m map[string]string) map[string]string { + return f(old(m)) + } + } else { + r.buildVarsFunc = f + } + return r +} + // Subrouter ------------------------------------------------------------------ // Subrouter creates a subrouter for the route. @@ -368,7 +455,7 @@ func (r *Route) Schemes(schemes ...string) *Route { // It will test the inner routes only if the parent route matched. For example: // // r := mux.NewRouter() -// s := r.Host("www.domain.com").Subrouter() +// s := r.Host("www.example.com").Subrouter() // s.HandleFunc("/products/", ProductsHandler) // s.HandleFunc("/products/{key}", ProductHandler) // s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) @@ -376,7 +463,8 @@ func (r *Route) Schemes(schemes ...string) *Route { // Here, the routes registered in the subrouter won't be tested if the host // doesn't match. func (r *Route) Subrouter() *Router { - router := &Router{parent: r, strictSlash: r.strictSlash} + // initialize a subrouter with a copy of the parent route's configuration + router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} r.addMatcher(router) return router } @@ -420,27 +508,38 @@ func (r *Route) URL(pairs ...string) (*url.URL, error) { if r.err != nil { return nil, r.err } - if r.regexp == nil { - return nil, errors.New("mux: route doesn't have a host or path") + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err } var scheme, host, path string - var err error + queries := make([]string, 0, len(r.regexp.queries)) if r.regexp.host != nil { - // Set a default scheme. - scheme = "http" - if host, err = r.regexp.host.url(pairs...); err != nil { + if host, err = r.regexp.host.url(values); err != nil { return nil, err } + scheme = "http" + if r.buildScheme != "" { + scheme = r.buildScheme + } } if r.regexp.path != nil { - if path, err = r.regexp.path.url(pairs...); err != nil { + if path, err = r.regexp.path.url(values); err != nil { + return nil, err + } + } + for _, q := range r.regexp.queries { + var query string + if query, err = q.url(values); err != nil { return nil, err } + queries = append(queries, query) } return &url.URL{ - Scheme: scheme, - Host: host, - Path: path, + Scheme: scheme, + Host: host, + Path: path, + RawQuery: strings.Join(queries, "&"), }, nil } @@ -451,17 +550,25 @@ func (r *Route) URLHost(pairs ...string) (*url.URL, error) { if r.err != nil { return nil, r.err } - if r.regexp == nil || r.regexp.host == nil { + if r.regexp.host == nil { return nil, errors.New("mux: route doesn't have a host") } - host, err := r.regexp.host.url(pairs...) + values, err := r.prepareVars(pairs...) if err != nil { return nil, err } - return &url.URL{ + host, err := r.regexp.host.url(values) + if err != nil { + return nil, err + } + u := &url.URL{ Scheme: "http", Host: host, - }, nil + } + if r.buildScheme != "" { + u.Scheme = r.buildScheme + } + return u, nil } // URLPath builds the path part of the URL for a route. See Route.URL(). @@ -471,10 +578,14 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) { if r.err != nil { return nil, r.err } - if r.regexp == nil || r.regexp.path == nil { + if r.regexp.path == nil { return nil, errors.New("mux: route doesn't have a path") } - path, err := r.regexp.path.url(pairs...) + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + path, err := r.regexp.path.url(values) if err != nil { return nil, err } @@ -483,43 +594,117 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) { }, nil } -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- +// GetPathTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a path. +func (r *Route) GetPathTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp.path == nil { + return "", errors.New("mux: route doesn't have a path") + } + return r.regexp.path.template, nil +} -// parentRoute allows routes to know about parent host and path definitions. -type parentRoute interface { - getNamedRoutes() map[string]*Route - getRegexpGroup() *routeRegexpGroup +// GetPathRegexp returns the expanded regular expression used to match route path. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a path. +func (r *Route) GetPathRegexp() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp.path == nil { + return "", errors.New("mux: route does not have a path") + } + return r.regexp.path.regexp.String(), nil } -// getNamedRoutes returns the map where named routes are registered. -func (r *Route) getNamedRoutes() map[string]*Route { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() +// GetQueriesRegexp returns the expanded regular expressions used to match the +// route queries. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not have queries. +func (r *Route) GetQueriesRegexp() ([]string, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.queries == nil { + return nil, errors.New("mux: route doesn't have queries") } - return r.parent.getNamedRoutes() + var queries []string + for _, query := range r.regexp.queries { + queries = append(queries, query.regexp.String()) + } + return queries, nil } -// getRegexpGroup returns regexp definitions from this route. -func (r *Route) getRegexpGroup() *routeRegexpGroup { - if r.regexp == nil { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - regexp := r.parent.getRegexpGroup() - if regexp == nil { - r.regexp = new(routeRegexpGroup) - } else { - // Copy. - r.regexp = &routeRegexpGroup{ - host: regexp.host, - path: regexp.path, - queries: regexp.queries, - } +// GetQueriesTemplates returns the templates used to build the +// query matching. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define queries. +func (r *Route) GetQueriesTemplates() ([]string, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.queries == nil { + return nil, errors.New("mux: route doesn't have queries") + } + var queries []string + for _, query := range r.regexp.queries { + queries = append(queries, query.template) + } + return queries, nil +} + +// GetMethods returns the methods the route matches against +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if route does not have methods. +func (r *Route) GetMethods() ([]string, error) { + if r.err != nil { + return nil, r.err + } + for _, m := range r.matchers { + if methods, ok := m.(methodMatcher); ok { + return []string(methods), nil } } - return r.regexp + return nil, errors.New("mux: route doesn't have methods") +} + +// GetHostTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a host. +func (r *Route) GetHostTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp.host == nil { + return "", errors.New("mux: route doesn't have a host") + } + return r.regexp.host.template, nil +} + +// prepareVars converts the route variable pairs into a map. If the route has a +// BuildVarsFunc, it is invoked. +func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { + m, err := mapFromPairsToString(pairs...) + if err != nil { + return nil, err + } + return r.buildVars(m), nil +} + +func (r *Route) buildVars(m map[string]string) map[string]string { + if r.buildVarsFunc != nil { + m = r.buildVarsFunc(m) + } + return m } diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go new file mode 100644 index 000000000..32ecffde4 --- /dev/null +++ b/vendor/github.com/gorilla/mux/test_helpers.go @@ -0,0 +1,19 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import "net/http" + +// SetURLVars sets the URL variables for the given request, to be accessed via +// mux.Vars for testing route behaviour. Arguments are not modified, a shallow +// copy is returned. +// +// This API should only be used for testing purposes; it provides a way to +// inject variables into the request context. Alternatively, URL variables +// can be set by making a route that captures the required variables, +// starting a server and sending the request to that server. +func SetURLVars(r *http.Request, val map[string]string) *http.Request { + return setVars(r, val) +} diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore new file mode 100644 index 000000000..5091fb073 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.gitignore @@ -0,0 +1,4 @@ +/jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml new file mode 100644 index 000000000..1f9807757 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml @@ -0,0 +1,9 @@ +language: go + +sudo: false + +go: + - 1.4 + +install: go get -v -t ./... +script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 000000000..b03310a91 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 000000000..a828d2848 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,44 @@ + +CMD = jpgo + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 000000000..187ef676d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 000000000..8e26ffeec --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 000000000..1cd2d239c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 000000000..9b7cd89b4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 000000000..13c74604c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 000000000..817900c8f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 000000000..1240a1755 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 000000000..dae79cbdf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 000000000..ddc1b7d7d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/vendor/github.com/tidepool-org/go-common/atomics/atomics.go b/vendor/github.com/tidepool-org/go-common/atomics/atomics.go deleted file mode 100644 index cc54b6960..000000000 --- a/vendor/github.com/tidepool-org/go-common/atomics/atomics.go +++ /dev/null @@ -1,45 +0,0 @@ -package atomics - -import "sync" - -type AtomicString struct { - mut sync.RWMutex - val string -} - -// Set sets the value of the AtomicString. It returns the old value -func (as *AtomicString) Set(val string) string { - as.mut.Lock() - defer as.mut.Unlock() - retVal := as.val - as.val = val - return retVal -} - -// Get gets the current value of the AtomicString -func (as *AtomicString) Get() string { - as.mut.RLock() - defer as.mut.RUnlock() - return as.val -} - -type AtomicInterface struct { - mut sync.RWMutex - val interface{} -} - -// Set sets the value of the AtomicInterface. It returns the old value -func (as *AtomicInterface) Set(val interface{}) interface{} { - as.mut.Lock() - defer as.mut.Unlock() - retVal := as.val - as.val = val - return retVal -} - -// Get gets the current value of the AtomicInterface -func (as *AtomicInterface) Get() interface{} { - as.mut.RLock() - defer as.mut.RUnlock() - return as.val -} diff --git a/vendor/github.com/tidepool-org/go-common/clients/hakken/coordinator_test.go b/vendor/github.com/tidepool-org/go-common/clients/hakken/coordinator_test.go deleted file mode 100644 index 45fdf861c..000000000 --- a/vendor/github.com/tidepool-org/go-common/clients/hakken/coordinator_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package hakken - -import ( - "fmt" - common "github.com/tidepool-org/go-common" - "github.com/tidepool-org/go-common/atomics" - "net/http" - "net/http/httptest" - "strconv" - "strings" - "testing" - "time" -) - -func TestPoll(t *testing.T) { - makeServer := func(as *atomics.AtomicString) *httptest.Server { - return httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { - switch req.URL.Path { - case "/v1/coordinator": - retVal := as.Get() - - if strings.Contains(retVal, "FAIL") { - t.Error(retVal) - return - } else if strings.Contains(retVal, "RETURN:") { - value, _ := strconv.Atoi(strings.Split(retVal, ":")[1]) - res.WriteHeader(value) - return - } - - fmt.Fprint(res, retVal) - default: - t.Errorf("Uknown url[%s]", req.URL.Path) - } - })) - } - - coordOneReturn := &atomics.AtomicString{} - coordTwoReturn := &atomics.AtomicString{} - resyncReturn := &atomics.AtomicString{} - - coordOne := makeServer(coordOneReturn) - defer coordOne.Close() - coordTwo := makeServer(coordTwoReturn) - defer coordTwo.Close() - resyncServer := makeServer(resyncReturn) - defer resyncServer.Close() - - coordOneURL := common.URLOrPanic(coordOne.URL) - coordTwoURL := common.URLOrPanic(coordTwo.URL) - - initialVal := fmt.Sprintf(`[{"host": "%s"}, {"host": "%s"}]`, coordOneURL.Host, coordTwoURL.Host) - coordOneReturn.Set(initialVal) - coordTwoReturn.Set(initialVal) - resyncReturn.Set(initialVal) - - resyncClient := coordinatorClient{Coordinator{*common.URLOrPanic(resyncServer.URL)}} - - resyncTicker := make(chan time.Time) - pollTicker := make(chan time.Time) - dropCooChan := make(chan *coordinatorClient) - - manager := &coordinatorManager{ - resyncClient: resyncClient, - resyncTicker: &time.Ticker{C: resyncTicker}, - pollTicker: &time.Ticker{C: pollTicker}, - dropCooChan: dropCooChan, - } - - if c := manager.getClient(); c != nil { - t.Errorf("Expected client to NOT exist, got[%+v]", c) - } - manager.start() - - if c := manager.getClient(); c == nil { - t.Errorf("Expected to get a client, didn't") - } else if c.String() != coordOne.URL { - t.Errorf("Expected to get client for coordOne, got[%s]", c.String()) - } - if c := manager.getClients(); c == nil || len(c) != 2 { - t.Errorf("Expected two clients, got [%d]", len(c)) - } - - // Remove coordOne client - manager.reportBadClient(manager.getClient()) - <-time.After(10 * time.Millisecond) - if c := manager.getClient(); c == nil { - t.Errorf("Expected to get a client, didn't") - } else if c.String() != coordTwo.URL { - t.Errorf("Expected to get client for coordTwo, got[%s]", c.String()) - } - - coordOneReturn.Set("FAIL - coordOne should not be called") - pollTicker <- time.Now() - - for count := 0; count < 10; count++ { - <-time.After(10 * time.Millisecond) - if len(manager.getClients()) == 2 { - break - } - } - if c := manager.getClient(); c == nil { - t.Errorf("Expected to get a client, didn't") - } else if c.String() != coordTwo.URL { - t.Errorf("Expected to get client for coordTwo, got[%s]", c.String()) - } - if c := manager.getClients(); c == nil || len(c) != 2 { - t.Errorf("Expected two clients, got [%d]", len(c)) - } - - coordOneReturn.Set("RETURN:500") - pollTicker <- time.Now() - - for count := 0; count < 10; count++ { - <-time.After(10 * time.Millisecond) - if len(manager.getClients()) == 1 { - break - } - } - if c := manager.getClient(); c == nil { - t.Errorf("Expected to get a client, didn't") - } else if c.String() != coordTwo.URL { - t.Errorf("Expected to get client for coordTwo, got[%s]", c.String()) - } - if c := manager.getClients(); c == nil || len(c) != 1 { - t.Errorf("Expected one client, got [%d]", len(c)) - } - - coordTwoReturn.Set("RETURN:500") - pollTicker <- time.Now() - for count := 0; count < 10; count++ { - <-time.After(10 * time.Millisecond) - if len(manager.getClients()) == 1 { - break - } - } - if c := manager.getClient(); c != nil { - t.Errorf("Expected to NOT get a client") - } - if c := manager.getClients(); c == nil || len(c) != 0 { - t.Errorf("Expected zero clients, got [%d]", len(c)) - } - - resyncTicker <- time.Now() - for count := 0; count < 10; count++ { - <-time.After(10 * time.Millisecond) - if len(manager.getClients()) == 2 { - break - } - } - if c := manager.getClient(); c == nil { - t.Errorf("Expected to get a client, didn't") - } else if c.String() != coordOne.URL { - t.Errorf("Expected to get client for coordTwo, got[%s]", c.String()) - } - if c := manager.getClients(); c == nil || len(c) != 2 { - t.Errorf("Expected two clients, got [%d]", len(c)) - } - - if err := manager.Close(); err != nil { - t.Errorf("Error when closing manager: %s", err) - } -} diff --git a/vendor/github.com/tidepool-org/go-common/clients/highwater/highwaterMock_test.go b/vendor/github.com/tidepool-org/go-common/clients/highwater/highwaterMock_test.go deleted file mode 100644 index 9eda1d8c5..000000000 --- a/vendor/github.com/tidepool-org/go-common/clients/highwater/highwaterMock_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package highwater - -import ( - "testing" -) - -const ( - EVENT_NAME = "testing" - USERID = "123-456-cc-2" - TOKEN = "a.fake.token.for.this" -) - -func TestMock(t *testing.T) { - - p := make(map[string]string) - - p["one"] = "two" - p["buckle"] = "my" - p["shoe"] = "three ..." - - client := NewMock() - - client.PostServer(EVENT_NAME, TOKEN, p) - - client.PostThisUser(EVENT_NAME, TOKEN, p) - - client.PostWithUser(USERID, EVENT_NAME, TOKEN, p) -} - -//log.Panic is called when not all required args are passed. -//This test fails if the panic and subseqent recover are not called -func TestMock_Fails(t *testing.T) { - - defer func() { - if r := recover(); r == nil { - t.Error("should have paniced") - } - }() - - p := make(map[string]string) - - p["one"] = "two" - p["buckle"] = "my" - p["shoe"] = "three ..." - - client := NewMock() - - client.PostServer("", TOKEN, p) - - client.PostThisUser(EVENT_NAME, "", p) - - client.PostWithUser("", EVENT_NAME, TOKEN, p) - - client.PostWithUser("", EVENT_NAME, TOKEN, nil) -} diff --git a/vendor/github.com/tidepool-org/go-common/clients/mocks_test.go b/vendor/github.com/tidepool-org/go-common/clients/mocks_test.go deleted file mode 100644 index eeb90caab..000000000 --- a/vendor/github.com/tidepool-org/go-common/clients/mocks_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package clients - -import ( - "reflect" - "testing" -) - -//The purpose of this test is to ensure you canreply on the mocks - -const USERID, GROUPID, TOKEN_MOCK = "123user", "456group", "this is a token" - -func makeExpectedPermissons() Permissions { - return Permissions{USERID: Allowed} -} - -func makeExpectedUsersPermissions() UsersPermissions { - return UsersPermissions{GROUPID: Permissions{GROUPID: Allowed}} -} - -func TestGatekeeperMock_UserInGroup(t *testing.T) { - - expected := makeExpectedPermissons() - - gkc := NewGatekeeperMock(nil, nil) - - if perms, err := gkc.UserInGroup(USERID, GROUPID); err != nil { - t.Fatal("No error should be returned") - } else if !reflect.DeepEqual(perms, expected) { - t.Fatalf("Perms where [%v] but expected [%v]", perms, expected) - } -} - -func TestGatekeeperMock_UsersInGroup(t *testing.T) { - - expected := makeExpectedUsersPermissions() - - gkc := NewGatekeeperMock(nil, nil) - - if perms, err := gkc.UsersInGroup(GROUPID); err != nil { - t.Fatal("No error should be returned") - } else if !reflect.DeepEqual(perms, expected) { - t.Fatalf("Perms were [%v] but expected [%v]", perms, expected) - } -} - -func TestGatekeeperMock_SetPermissions(t *testing.T) { - - gkc := NewGatekeeperMock(nil, nil) - - expected := makeExpectedPermissons() - - if perms, err := gkc.SetPermissions(USERID, GROUPID, expected); err != nil { - t.Fatal("No error should be returned") - } else if !reflect.DeepEqual(perms, expected) { - t.Fatalf("Perms where [%v] but expected [%v]", perms, expected) - - } -} - -func TestSeagullMock_GetCollection(t *testing.T) { - - sc := NewSeagullMock() - var col struct{ Something string } - - sc.GetCollection("123.456", "stuff", TOKEN_MOCK, &col) - - if col.Something != "anit no thing" { - t.Error("Should have given mocked collection") - } -} - -func TestSeagullMock_GetPrivatePair(t *testing.T) { - sc := NewSeagullMock() - - if pp := sc.GetPrivatePair("123.456", "Stuff", TOKEN_MOCK); pp == nil { - t.Error("Should give us mocked private pair") - } - -} diff --git a/vendor/github.com/tidepool-org/go-common/clients/shoreline/shorelineMock_test.go b/vendor/github.com/tidepool-org/go-common/clients/shoreline/shorelineMock_test.go deleted file mode 100644 index c9583e494..000000000 --- a/vendor/github.com/tidepool-org/go-common/clients/shoreline/shorelineMock_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package shoreline - -import ( - "testing" -) - -func TestMock(t *testing.T) { - - const TOKEN_MOCK = "this is a token" - - client := NewMock(TOKEN_MOCK) - - if err := client.Start(); err != nil { - t.Errorf("Failed start with error[%v]", err) - } - - if tok := client.TokenProvide(); tok != TOKEN_MOCK { - t.Errorf("Unexpected token[%s]", tok) - } - - if usr, token, err := client.Login("billy", "howdy"); err != nil { - t.Errorf("Failed start with error[%v]", err) - } else { - if usr == nil { - t.Error("Should give us a fake usr details") - } - if token == "" { - t.Error("Should give us a fake token") - } - } - - if checkedTd := client.CheckToken(TOKEN_MOCK); checkedTd == nil { - t.Error("Should give us token data") - } - - if usr, _ := client.GetUser("billy@howdy.org", TOKEN_MOCK); usr == nil { - t.Error("Should give us a mock user") - } - - username := "name" - password := "myN3wPw" - user := UserUpdate{Username: &username, Emails: &[]string{"an@email.org"}, Password: &password} - - if err := client.UpdateUser("123", user, TOKEN_MOCK); err != nil { - t.Error("Should return no error on success") - } - - if sd, se := client.Signup("username", "password", "email@place.org"); sd == nil || se != nil { - t.Errorf("Signup not return err[%s]", se.Error()) - } - -} diff --git a/vendor/github.com/tidepool-org/go-common/clients/shoreline/shoreline_test.go b/vendor/github.com/tidepool-org/go-common/clients/shoreline/shoreline_test.go deleted file mode 100644 index 720bf4db8..000000000 --- a/vendor/github.com/tidepool-org/go-common/clients/shoreline/shoreline_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package shoreline - -import ( - "fmt" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/tidepool-org/go-common/clients/disc" -) - -const name = "test" -const secret = "howdy ho, neighbor joe" -const token = "this is a token" - -func TestStart(t *testing.T) { - srvr := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { - switch req.URL.Path { - case "/serverlogin": - if nam := req.Header.Get("x-tidepool-server-name"); nam != name { - t.Errorf("Bad value for name[%v]", nam) - } - - if sec := req.Header.Get("x-tidepool-server-secret"); sec != secret { - t.Errorf("Bad secret value[%v]", sec) - } - - res.Header().Set("x-tidepool-session-token", token) - default: - t.Errorf("Unknown path[%s]", req.URL.Path) - } - })) - defer srvr.Close() - - shorelineClient := NewShorelineClientBuilder(). - WithHostGetter(disc.NewStaticHostGetterFromString(srvr.URL)). - WithName("test"). - WithSecret("howdy ho, neighbor joe"). - WithTokenRefreshInterval(10 * time.Millisecond). - Build() - - err := shorelineClient.Start() - - if err != nil { - t.Errorf("Failed start with error[%v]", err) - } - if tok := shorelineClient.TokenProvide(); tok != token { - t.Errorf("Unexpected token[%s]", tok) - } - - <-time.After(100 * time.Millisecond) - shorelineClient.Close() -} - -func TestLogin(t *testing.T) { - srvr := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { - switch req.URL.Path { - case "/serverlogin": - res.Header().Set("x-tidepool-session-token", token) - case "/login": - if auth := req.Header.Get("Authorization"); auth != "Basic YmlsbHk6aG93ZHk=" { - t.Errorf("Bad Authorization Header[%v]", auth) - } - - res.Header().Set("x-tidepool-session-token", token) - fmt.Fprint(res, `{"userid": "1234abc", "username": "billy", "emails": ["billy@1234.abc"]}`) - default: - t.Errorf("Unknown path[%s]", req.URL.Path) - } - })) - defer srvr.Close() - - shorelineClient := NewShorelineClientBuilder(). - WithHostGetter(disc.NewStaticHostGetterFromString(srvr.URL)). - WithName("test"). - WithSecret("howdy ho, neighbor joe"). - Build() - - err := shorelineClient.Start() - if err != nil { - t.Errorf("Failed start with error[%v]", err) - } - defer shorelineClient.Close() - - ud, tok, err := shorelineClient.Login("billy", "howdy") - if err != nil { - t.Errorf("Error on login[%v]", err) - } - if tok != token { - t.Errorf("Unexpected token[%s]", tok) - } - if ud.UserID != "1234abc" || ud.Username != "billy" || len(ud.Emails) != 1 || ud.Emails[0] != "billy@1234.abc" { - t.Errorf("Bad userData object[%+v]", ud) - } -} - -func TestSignup(t *testing.T) { - srvr := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { - switch req.URL.Path { - case "/serverlogin": - res.Header().Set("x-tidepool-session-token", token) - case "/user": - res.WriteHeader(http.StatusCreated) - fmt.Fprint(res, `{"userid": "1234abc", "username": "new me", "emails": ["new.me@1234.abc"]}`) - default: - t.Errorf("Unknown path[%s]", req.URL.Path) - } - })) - defer srvr.Close() - - client := NewShorelineClientBuilder(). - WithHostGetter(disc.NewStaticHostGetterFromString(srvr.URL)). - WithName("test"). - WithSecret("howdy ho, neighbor joe"). - Build() - - err := client.Start() - if err != nil { - t.Errorf("Failed start with error[%v]", err) - } - defer client.Close() - - ud, err := client.Signup("new me", "howdy", "new.me@1234.abc") - if err != nil { - t.Errorf("Error on signup [%s]", err.Error()) - } - if ud.UserID != "1234abc" || ud.Username != "new me" || len(ud.Emails) != 1 || ud.Emails[0] != "new.me@1234.abc" { - t.Errorf("Bad userData object[%+v]", ud) - } - -} diff --git a/vendor/labix.org/v2/mgo/auth_test.go b/vendor/labix.org/v2/mgo/auth_test.go deleted file mode 100644 index 07080ca4a..000000000 --- a/vendor/labix.org/v2/mgo/auth_test.go +++ /dev/null @@ -1,935 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo_test - -import ( - "flag" - "fmt" - "labix.org/v2/mgo" - . "launchpad.net/gocheck" - "net/url" - "sync" - "time" -) - -func (s *S) TestAuthLoginDatabase(c *C) { - // Test both with a normal database and with an authenticated shard. - for _, addr := range []string{"localhost:40002", "localhost:40203"} { - session, err := mgo.Dial(addr) - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") - - admindb := session.DB("admin") - - err = admindb.Login("root", "wrong") - c.Assert(err, ErrorMatches, "auth fail(s|ed)") - - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) - } -} - -func (s *S) TestAuthLoginSession(c *C) { - // Test both with a normal database and with an authenticated shard. - for _, addr := range []string{"localhost:40002", "localhost:40203"} { - session, err := mgo.Dial(addr) - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") - - cred := mgo.Credential{ - Username: "root", - Password: "wrong", - } - err = session.Login(&cred) - c.Assert(err, ErrorMatches, "auth fail(s|ed)") - - cred.Password = "rapadura" - - err = session.Login(&cred) - c.Assert(err, IsNil) - - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) - } -} - -func (s *S) TestAuthLoginLogout(c *C) { - // Test both with a normal database and with an authenticated shard. - for _, addr := range []string{"localhost:40002", "localhost:40203"} { - session, err := mgo.Dial(addr) - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - admindb.Logout() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") - - // Must have dropped auth from the session too. - session = session.Copy() - defer session.Close() - - coll = session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") - } -} - -func (s *S) TestAuthLoginLogoutAll(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - session.LogoutAll() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") - - // Must have dropped auth from the session too. - session = session.Copy() - defer session.Close() - - coll = session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") -} - -func (s *S) TestAuthUpsertUserErrors(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - mydb := session.DB("mydb") - - err = mydb.UpsertUser(&mgo.User{}) - c.Assert(err, ErrorMatches, "user has no Username") - - err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", UserSource: "source"}) - c.Assert(err, ErrorMatches, "user has both Password/PasswordHash and UserSource set") - - err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", OtherDBRoles: map[string][]mgo.Role{"db": nil}}) - c.Assert(err, ErrorMatches, "user with OtherDBRoles is only supported in admin database") -} - -func (s *S) TestAuthUpsertUser(c *C) { - if !s.versionAtLeast(2, 4) { - c.Skip("UpsertUser only works on 2.4+") - } - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - mydb := session.DB("mydb") - - ruser := &mgo.User{ - Username: "myruser", - Password: "mypass", - Roles: []mgo.Role{mgo.RoleRead}, - } - rwuser := &mgo.User{ - Username: "myrwuser", - Password: "mypass", - Roles: []mgo.Role{mgo.RoleReadWrite}, - } - - err = mydb.UpsertUser(ruser) - c.Assert(err, IsNil) - err = mydb.UpsertUser(rwuser) - c.Assert(err, IsNil) - - err = mydb.Login("myruser", "mypass") - c.Assert(err, IsNil) - - admindb.Logout() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - err = mydb.Login("myrwuser", "mypass") - c.Assert(err, IsNil) - - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) - - myotherdb := session.DB("myotherdb") - - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - // Test UserSource. - rwuserother := &mgo.User{ - Username: "myrwuser", - UserSource: "mydb", - Roles: []mgo.Role{mgo.RoleRead}, - } - - err = myotherdb.UpsertUser(rwuserother) - if s.versionAtLeast(2, 6) { - c.Assert(err, ErrorMatches, `MongoDB 2.6\+ does not support the UserSource setting`) - return - } - c.Assert(err, IsNil) - - admindb.Logout() - - // Test indirection via UserSource: we can't write to it, because - // the roles for myrwuser are different there. - othercoll := myotherdb.C("myothercoll") - err = othercoll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - // Reading works, though. - err = othercoll.Find(nil).One(nil) - c.Assert(err, Equals, mgo.ErrNotFound) - - // Can't login directly into the database using UserSource, though. - err = myotherdb.Login("myrwuser", "mypass") - c.Assert(err, ErrorMatches, "auth fail(s|ed)") -} - -func (s *S) TestAuthUpsertUserOtherDBRoles(c *C) { - if !s.versionAtLeast(2, 4) { - c.Skip("UpsertUser only works on 2.4+") - } - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - ruser := &mgo.User{ - Username: "myruser", - Password: "mypass", - OtherDBRoles: map[string][]mgo.Role{"mydb": []mgo.Role{mgo.RoleRead}}, - } - - err = admindb.UpsertUser(ruser) - c.Assert(err, IsNil) - defer admindb.RemoveUser("myruser") - - admindb.Logout() - err = admindb.Login("myruser", "mypass") - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - err = coll.Find(nil).One(nil) - c.Assert(err, Equals, mgo.ErrNotFound) -} - -func (s *S) TestAuthUpsertUserUpdates(c *C) { - if !s.versionAtLeast(2, 4) { - c.Skip("UpsertUser only works on 2.4+") - } - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - mydb := session.DB("mydb") - - // Insert a user that can read. - user := &mgo.User{ - Username: "myruser", - Password: "mypass", - Roles: []mgo.Role{mgo.RoleRead}, - } - err = mydb.UpsertUser(user) - c.Assert(err, IsNil) - - // Now update the user password. - user = &mgo.User{ - Username: "myruser", - Password: "mynewpass", - } - err = mydb.UpsertUser(user) - c.Assert(err, IsNil) - - // Login with the new user. - usession, err := mgo.Dial("myruser:mynewpass@localhost:40002/mydb") - c.Assert(err, IsNil) - defer usession.Close() - - // Can read, but not write. - err = usession.DB("mydb").C("mycoll").Find(nil).One(nil) - c.Assert(err, Equals, mgo.ErrNotFound) - err = usession.DB("mydb").C("mycoll").Insert(M{"ok": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - // Update the user role. - user = &mgo.User{ - Username: "myruser", - Roles: []mgo.Role{mgo.RoleReadWrite}, - } - err = mydb.UpsertUser(user) - c.Assert(err, IsNil) - - // Dial again to ensure the password hasn't changed. - usession, err = mgo.Dial("myruser:mynewpass@localhost:40002/mydb") - c.Assert(err, IsNil) - defer usession.Close() - - // Now it can write. - err = usession.DB("mydb").C("mycoll").Insert(M{"ok": 1}) - c.Assert(err, IsNil) -} - -func (s *S) TestAuthAddUser(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - mydb := session.DB("mydb") - err = mydb.AddUser("myruser", "mypass", true) - c.Assert(err, IsNil) - err = mydb.AddUser("mywuser", "mypass", false) - c.Assert(err, IsNil) - - err = mydb.Login("myruser", "mypass") - c.Assert(err, IsNil) - - admindb.Logout() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - err = mydb.Login("mywuser", "mypass") - c.Assert(err, IsNil) - - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) -} - -func (s *S) TestAuthAddUserReplaces(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - mydb := session.DB("mydb") - err = mydb.AddUser("myuser", "myoldpass", false) - c.Assert(err, IsNil) - err = mydb.AddUser("myuser", "mynewpass", true) - c.Assert(err, IsNil) - - admindb.Logout() - - err = mydb.Login("myuser", "myoldpass") - c.Assert(err, ErrorMatches, "auth fail(s|ed)") - err = mydb.Login("myuser", "mynewpass") - c.Assert(err, IsNil) - - // ReadOnly flag was changed too. - err = mydb.C("mycoll").Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") -} - -func (s *S) TestAuthRemoveUser(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - mydb := session.DB("mydb") - err = mydb.AddUser("myuser", "mypass", true) - c.Assert(err, IsNil) - err = mydb.RemoveUser("myuser") - c.Assert(err, IsNil) - - err = mydb.Login("myuser", "mypass") - c.Assert(err, ErrorMatches, "auth fail(s|ed)") -} - -func (s *S) TestAuthLoginTwiceDoesNothing(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - oldStats := mgo.GetStats() - - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - newStats := mgo.GetStats() - c.Assert(newStats.SentOps, Equals, oldStats.SentOps) -} - -func (s *S) TestAuthLoginLogoutLoginDoesNothing(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - oldStats := mgo.GetStats() - - admindb.Logout() - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - newStats := mgo.GetStats() - c.Assert(newStats.SentOps, Equals, oldStats.SentOps) -} - -func (s *S) TestAuthLoginSwitchUser(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) - - err = admindb.Login("reader", "rapadura") - c.Assert(err, IsNil) - - // Can't write. - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - // But can read. - result := struct{ N int }{} - err = coll.Find(nil).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 1) -} - -func (s *S) TestAuthLoginChangePassword(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - mydb := session.DB("mydb") - err = mydb.AddUser("myuser", "myoldpass", false) - c.Assert(err, IsNil) - - err = mydb.Login("myuser", "myoldpass") - c.Assert(err, IsNil) - - err = mydb.AddUser("myuser", "mynewpass", true) - c.Assert(err, IsNil) - - err = mydb.Login("myuser", "mynewpass") - c.Assert(err, IsNil) - - admindb.Logout() - - // The second login must be in effect, which means read-only. - err = mydb.C("mycoll").Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") -} - -func (s *S) TestAuthLoginCachingWithSessionRefresh(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - session.Refresh() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) -} - -func (s *S) TestAuthLoginCachingWithSessionCopy(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - session = session.Copy() - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) -} - -func (s *S) TestAuthLoginCachingWithSessionClone(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - session = session.Clone() - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) -} - -func (s *S) TestAuthLoginCachingWithNewSession(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - session = session.New() - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized for .*") -} - -func (s *S) TestAuthLoginCachingAcrossPool(c *C) { - // Logins are cached even when the conenction goes back - // into the pool. - - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - // Add another user to test the logout case at the same time. - mydb := session.DB("mydb") - err = mydb.AddUser("myuser", "mypass", false) - c.Assert(err, IsNil) - - err = mydb.Login("myuser", "mypass") - c.Assert(err, IsNil) - - // Logout root explicitly, to test both cases. - admindb.Logout() - - // Give socket back to pool. - session.Refresh() - - // Brand new session, should use socket from the pool. - other := session.New() - defer other.Close() - - oldStats := mgo.GetStats() - - err = other.DB("admin").Login("root", "rapadura") - c.Assert(err, IsNil) - err = other.DB("mydb").Login("myuser", "mypass") - c.Assert(err, IsNil) - - // Both logins were cached, so no ops. - newStats := mgo.GetStats() - c.Assert(newStats.SentOps, Equals, oldStats.SentOps) - - // And they actually worked. - err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) - c.Assert(err, IsNil) - - other.DB("admin").Logout() - - err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) - c.Assert(err, IsNil) -} - -func (s *S) TestAuthLoginCachingAcrossPoolWithLogout(c *C) { - // Now verify that logouts are properly flushed if they - // are not revalidated after leaving the pool. - - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - // Add another user to test the logout case at the same time. - mydb := session.DB("mydb") - err = mydb.AddUser("myuser", "mypass", true) - c.Assert(err, IsNil) - - err = mydb.Login("myuser", "mypass") - c.Assert(err, IsNil) - - // Just some data to query later. - err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) - c.Assert(err, IsNil) - - // Give socket back to pool. - session.Refresh() - - // Brand new session, should use socket from the pool. - other := session.New() - defer other.Close() - - oldStats := mgo.GetStats() - - err = other.DB("mydb").Login("myuser", "mypass") - c.Assert(err, IsNil) - - // Login was cached, so no ops. - newStats := mgo.GetStats() - c.Assert(newStats.SentOps, Equals, oldStats.SentOps) - - // Can't write, since root has been implicitly logged out - // when the collection went into the pool, and not revalidated. - err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - // But can read due to the revalidated myuser login. - result := struct{ N int }{} - err = other.DB("mydb").C("mycoll").Find(nil).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 1) -} - -func (s *S) TestAuthEventual(c *C) { - // Eventual sessions don't keep sockets around, so they are - // an interesting test case. - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - admindb := session.DB("admin") - err = admindb.Login("root", "rapadura") - c.Assert(err, IsNil) - - err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) - c.Assert(err, IsNil) - - var wg sync.WaitGroup - wg.Add(20) - - for i := 0; i != 10; i++ { - go func() { - defer wg.Done() - var result struct{ N int } - err := session.DB("mydb").C("mycoll").Find(nil).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 1) - }() - } - - for i := 0; i != 10; i++ { - go func() { - defer wg.Done() - err := session.DB("mydb").C("mycoll").Insert(M{"n": 1}) - c.Assert(err, IsNil) - }() - } - - wg.Wait() -} - -func (s *S) TestAuthURL(c *C) { - session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002/") - c.Assert(err, IsNil) - defer session.Close() - - err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) - c.Assert(err, IsNil) -} - -func (s *S) TestAuthURLWrongCredentials(c *C) { - session, err := mgo.Dial("mongodb://root:wrong@localhost:40002/") - if session != nil { - session.Close() - } - c.Assert(err, ErrorMatches, "auth fail(s|ed)") - c.Assert(session, IsNil) -} - -func (s *S) TestAuthURLWithNewSession(c *C) { - // When authentication is in the URL, the new session will - // actually carry it on as well, even if logged out explicitly. - session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002/") - c.Assert(err, IsNil) - defer session.Close() - - session.DB("admin").Logout() - - // Do it twice to ensure it passes the needed data on. - session = session.New() - defer session.Close() - session = session.New() - defer session.Close() - - err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) - c.Assert(err, IsNil) -} - -func (s *S) TestAuthURLWithDatabase(c *C) { - session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - mydb := session.DB("mydb") - err = mydb.AddUser("myruser", "mypass", true) - c.Assert(err, IsNil) - - // Test once with database, and once with source. - for i := 0; i < 2; i++ { - var url string - if i == 0 { - url = "mongodb://myruser:mypass@localhost:40002/mydb" - } else { - url = "mongodb://myruser:mypass@localhost:40002/admin?authSource=mydb" - } - usession, err := mgo.Dial(url) - c.Assert(err, IsNil) - defer usession.Close() - - ucoll := usession.DB("mydb").C("mycoll") - err = ucoll.FindId(0).One(nil) - c.Assert(err, Equals, mgo.ErrNotFound) - err = ucoll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - } -} - -func (s *S) TestDefaultDatabase(c *C) { - tests := []struct{ url, db string }{ - {"mongodb://root:rapadura@localhost:40002", "test"}, - {"mongodb://root:rapadura@localhost:40002/admin", "admin"}, - {"mongodb://localhost:40001", "test"}, - {"mongodb://localhost:40001/", "test"}, - {"mongodb://localhost:40001/mydb", "mydb"}, - } - - for _, test := range tests { - session, err := mgo.Dial(test.url) - c.Assert(err, IsNil) - defer session.Close() - - c.Logf("test: %#v", test) - c.Assert(session.DB("").Name, Equals, test.db) - - scopy := session.Copy() - c.Check(scopy.DB("").Name, Equals, test.db) - scopy.Close() - } -} - -func (s *S) TestAuthDirect(c *C) { - // Direct connections must work to the master and slaves. - for _, port := range []string{"40031", "40032", "40033"} { - url := fmt.Sprintf("mongodb://root:rapadura@localhost:%s/?connect=direct", port) - session, err := mgo.Dial(url) - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, true) - - var result struct{} - err = session.DB("mydb").C("mycoll").Find(nil).One(&result) - c.Assert(err, Equals, mgo.ErrNotFound) - } -} - -func (s *S) TestAuthDirectWithLogin(c *C) { - // Direct connections must work to the master and slaves. - for _, port := range []string{"40031", "40032", "40033"} { - url := fmt.Sprintf("mongodb://localhost:%s/?connect=direct", port) - session, err := mgo.Dial(url) - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, true) - session.SetSyncTimeout(3 * time.Second) - - err = session.DB("admin").Login("root", "rapadura") - c.Assert(err, IsNil) - - var result struct{} - err = session.DB("mydb").C("mycoll").Find(nil).One(&result) - c.Assert(err, Equals, mgo.ErrNotFound) - } -} - -var ( - plainFlag = flag.String("plain", "", "Host to test PLAIN authentication against (depends on custom environment)") - plainUser = "einstein" - plainPass = "password" -) - -func (s *S) TestAuthPlainCred(c *C) { - if *plainFlag == "" { - c.Skip("no -plain") - } - cred := &mgo.Credential{ - Username: plainUser, - Password: plainPass, - Source: "$external", - Mechanism: "PLAIN", - } - c.Logf("Connecting to %s...", *plainFlag) - session, err := mgo.Dial(*plainFlag) - c.Assert(err, IsNil) - defer session.Close() - - records := session.DB("records").C("records") - - c.Logf("Connected! Testing the need for authentication...") - err = records.Find(nil).One(nil) - c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") - - c.Logf("Authenticating...") - err = session.Login(cred) - c.Assert(err, IsNil) - c.Logf("Authenticated!") - - c.Logf("Connected! Testing the need for authentication...") - err = records.Find(nil).One(nil) - c.Assert(err, Equals, mgo.ErrNotFound) -} - -func (s *S) TestAuthPlainURL(c *C) { - if *plainFlag == "" { - c.Skip("no -plain") - } - c.Logf("Connecting to %s...", *plainFlag) - session, err := mgo.Dial(fmt.Sprintf("%s:%s@%s?authMechanism=PLAIN", url.QueryEscape(plainUser), url.QueryEscape(plainPass), *plainFlag)) - c.Assert(err, IsNil) - defer session.Close() - - c.Logf("Connected! Testing the need for authentication...") - err = session.DB("records").C("records").Find(nil).One(nil) - c.Assert(err, Equals, mgo.ErrNotFound) -} - -var ( - kerberosFlag = flag.Bool("kerberos", false, "Test Kerberos authentication (depends on custom environment)") - kerberosHost = "mmscustmongo.10gen.me" - kerberosUser = "mmsagent/mmscustagent.10gen.me@10GEN.ME" -) - -func (s *S) TestAuthKerberosCred(c *C) { - if !*kerberosFlag { - c.Skip("no -kerberos") - } - cred := &mgo.Credential{ - Username: kerberosUser, - Mechanism: "GSSAPI", - } - c.Logf("Connecting to %s...", kerberosHost) - session, err := mgo.Dial(kerberosHost) - c.Assert(err, IsNil) - defer session.Close() - - c.Logf("Connected! Testing the need for authentication...") - names, err := session.DatabaseNames() - c.Assert(err, ErrorMatches, "unauthorized") - - c.Logf("Authenticating...") - err = session.Login(cred) - c.Assert(err, IsNil) - c.Logf("Authenticated!") - - names, err = session.DatabaseNames() - c.Assert(err, IsNil) - c.Assert(len(names) > 0, Equals, true) -} - -func (s *S) TestAuthKerberosURL(c *C) { - if !*kerberosFlag { - c.Skip("no -kerberos") - } - c.Logf("Connecting to %s...", kerberosHost) - session, err := mgo.Dial(url.QueryEscape(kerberosUser) + "@" + kerberosHost + "?authMechanism=GSSAPI") - c.Assert(err, IsNil) - defer session.Close() - names, err := session.DatabaseNames() - c.Assert(err, IsNil) - c.Assert(len(names) > 0, Equals, true) -} diff --git a/vendor/labix.org/v2/mgo/bson/bson_test.go b/vendor/labix.org/v2/mgo/bson/bson_test.go deleted file mode 100644 index 1263e97a1..000000000 --- a/vendor/labix.org/v2/mgo/bson/bson_test.go +++ /dev/null @@ -1,1466 +0,0 @@ -// BSON library for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// gobson - BSON library for Go. - -package bson_test - -import ( - "encoding/binary" - "encoding/json" - "errors" - "labix.org/v2/mgo/bson" - . "launchpad.net/gocheck" - "net/url" - "reflect" - "testing" - "time" -) - -func TestAll(t *testing.T) { - TestingT(t) -} - -type S struct{} - -var _ = Suite(&S{}) - -// Wrap up the document elements contained in data, prepending the int32 -// length of the data, and appending the '\x00' value closing the document. -func wrapInDoc(data string) string { - result := make([]byte, len(data)+5) - binary.LittleEndian.PutUint32(result, uint32(len(result))) - copy(result[4:], []byte(data)) - return string(result) -} - -func makeZeroDoc(value interface{}) (zero interface{}) { - v := reflect.ValueOf(value) - t := v.Type() - switch t.Kind() { - case reflect.Map: - mv := reflect.MakeMap(t) - zero = mv.Interface() - case reflect.Ptr: - pv := reflect.New(v.Type().Elem()) - zero = pv.Interface() - case reflect.Slice: - zero = reflect.New(t).Interface() - default: - panic("unsupported doc type") - } - return zero -} - -func testUnmarshal(c *C, data string, obj interface{}) { - zero := makeZeroDoc(obj) - err := bson.Unmarshal([]byte(data), zero) - c.Assert(err, IsNil) - c.Assert(zero, DeepEquals, obj) -} - -type testItemType struct { - obj interface{} - data string -} - -// -------------------------------------------------------------------------- -// Samples from bsonspec.org: - -var sampleItems = []testItemType{ - {bson.M{"hello": "world"}, - "\x16\x00\x00\x00\x02hello\x00\x06\x00\x00\x00world\x00\x00"}, - {bson.M{"BSON": []interface{}{"awesome", float64(5.05), 1986}}, - "1\x00\x00\x00\x04BSON\x00&\x00\x00\x00\x020\x00\x08\x00\x00\x00" + - "awesome\x00\x011\x00333333\x14@\x102\x00\xc2\x07\x00\x00\x00\x00"}, -} - -func (s *S) TestMarshalSampleItems(c *C) { - for i, item := range sampleItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, item.data, Commentf("Failed on item %d", i)) - } -} - -func (s *S) TestUnmarshalSampleItems(c *C) { - for i, item := range sampleItems { - value := bson.M{} - err := bson.Unmarshal([]byte(item.data), value) - c.Assert(err, IsNil) - c.Assert(value, DeepEquals, item.obj, Commentf("Failed on item %d", i)) - } -} - -// -------------------------------------------------------------------------- -// Every type, ordered by the type flag. These are not wrapped with the -// length and last \x00 from the document. wrapInDoc() computes them. -// Note that all of them should be supported as two-way conversions. - -var allItems = []testItemType{ - {bson.M{}, - ""}, - {bson.M{"_": float64(5.05)}, - "\x01_\x00333333\x14@"}, - {bson.M{"_": "yo"}, - "\x02_\x00\x03\x00\x00\x00yo\x00"}, - {bson.M{"_": bson.M{"a": true}}, - "\x03_\x00\x09\x00\x00\x00\x08a\x00\x01\x00"}, - {bson.M{"_": []interface{}{true, false}}, - "\x04_\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, - {bson.M{"_": []byte("yo")}, - "\x05_\x00\x02\x00\x00\x00\x00yo"}, - {bson.M{"_": bson.Binary{0x80, []byte("udef")}}, - "\x05_\x00\x04\x00\x00\x00\x80udef"}, - {bson.M{"_": bson.Undefined}, // Obsolete, but still seen in the wild. - "\x06_\x00"}, - {bson.M{"_": bson.ObjectId("0123456789ab")}, - "\x07_\x000123456789ab"}, - {bson.M{"_": false}, - "\x08_\x00\x00"}, - {bson.M{"_": true}, - "\x08_\x00\x01"}, - {bson.M{"_": time.Unix(0, 258e6)}, // Note the NS <=> MS conversion. - "\x09_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, - {bson.M{"_": nil}, - "\x0A_\x00"}, - {bson.M{"_": bson.RegEx{"ab", "cd"}}, - "\x0B_\x00ab\x00cd\x00"}, - {bson.M{"_": bson.JavaScript{"code", nil}}, - "\x0D_\x00\x05\x00\x00\x00code\x00"}, - {bson.M{"_": bson.Symbol("sym")}, - "\x0E_\x00\x04\x00\x00\x00sym\x00"}, - {bson.M{"_": bson.JavaScript{"code", bson.M{"": nil}}}, - "\x0F_\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" + - "\x07\x00\x00\x00\x0A\x00\x00"}, - {bson.M{"_": 258}, - "\x10_\x00\x02\x01\x00\x00"}, - {bson.M{"_": bson.MongoTimestamp(258)}, - "\x11_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, - {bson.M{"_": int64(258)}, - "\x12_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, - {bson.M{"_": int64(258 << 32)}, - "\x12_\x00\x00\x00\x00\x00\x02\x01\x00\x00"}, - {bson.M{"_": bson.MaxKey}, - "\x7F_\x00"}, - {bson.M{"_": bson.MinKey}, - "\xFF_\x00"}, -} - -func (s *S) TestMarshalAllItems(c *C) { - for i, item := range allItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc(item.data), Commentf("Failed on item %d: %#v", i, item)) - } -} - -func (s *S) TestUnmarshalAllItems(c *C) { - for i, item := range allItems { - value := bson.M{} - err := bson.Unmarshal([]byte(wrapInDoc(item.data)), value) - c.Assert(err, IsNil) - c.Assert(value, DeepEquals, item.obj, Commentf("Failed on item %d: %#v", i, item)) - } -} - -func (s *S) TestUnmarshalRawAllItems(c *C) { - for i, item := range allItems { - if len(item.data) == 0 { - continue - } - value := item.obj.(bson.M)["_"] - if value == nil { - continue - } - pv := reflect.New(reflect.ValueOf(value).Type()) - raw := bson.Raw{item.data[0], []byte(item.data[3:])} - c.Logf("Unmarshal raw: %#v, %#v", raw, pv.Interface()) - err := raw.Unmarshal(pv.Interface()) - c.Assert(err, IsNil) - c.Assert(pv.Elem().Interface(), DeepEquals, value, Commentf("Failed on item %d: %#v", i, item)) - } -} - -func (s *S) TestUnmarshalRawIncompatible(c *C) { - raw := bson.Raw{0x08, []byte{0x01}} // true - err := raw.Unmarshal(&struct{}{}) - c.Assert(err, ErrorMatches, "BSON kind 0x08 isn't compatible with type struct \\{\\}") -} - -func (s *S) TestUnmarshalZeroesStruct(c *C) { - data, err := bson.Marshal(bson.M{"b": 2}) - c.Assert(err, IsNil) - type T struct{ A, B int } - v := T{A: 1} - err = bson.Unmarshal(data, &v) - c.Assert(err, IsNil) - c.Assert(v.A, Equals, 0) - c.Assert(v.B, Equals, 2) -} - -func (s *S) TestUnmarshalZeroesMap(c *C) { - data, err := bson.Marshal(bson.M{"b": 2}) - c.Assert(err, IsNil) - m := bson.M{"a": 1} - err = bson.Unmarshal(data, &m) - c.Assert(err, IsNil) - c.Assert(m, DeepEquals, bson.M{"b": 2}) -} - -func (s *S) TestUnmarshalNonNilInterface(c *C) { - data, err := bson.Marshal(bson.M{"b": 2}) - c.Assert(err, IsNil) - m := bson.M{"a": 1} - var i interface{} - i = m - err = bson.Unmarshal(data, &i) - c.Assert(err, IsNil) - c.Assert(i, DeepEquals, bson.M{"b": 2}) - c.Assert(m, DeepEquals, bson.M{"a": 1}) -} - -// -------------------------------------------------------------------------- -// Some one way marshaling operations which would unmarshal differently. - -var oneWayMarshalItems = []testItemType{ - // These are being passed as pointers, and will unmarshal as values. - {bson.M{"": &bson.Binary{0x02, []byte("old")}}, - "\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, - {bson.M{"": &bson.Binary{0x80, []byte("udef")}}, - "\x05\x00\x04\x00\x00\x00\x80udef"}, - {bson.M{"": &bson.RegEx{"ab", "cd"}}, - "\x0B\x00ab\x00cd\x00"}, - {bson.M{"": &bson.JavaScript{"code", nil}}, - "\x0D\x00\x05\x00\x00\x00code\x00"}, - {bson.M{"": &bson.JavaScript{"code", bson.M{"": nil}}}, - "\x0F\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" + - "\x07\x00\x00\x00\x0A\x00\x00"}, - - // There's no float32 type in BSON. Will encode as a float64. - {bson.M{"": float32(5.05)}, - "\x01\x00\x00\x00\x00@33\x14@"}, - - // The array will be unmarshaled as a slice instead. - {bson.M{"": [2]bool{true, false}}, - "\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, - - // The typed slice will be unmarshaled as []interface{}. - {bson.M{"": []bool{true, false}}, - "\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, - - // Will unmarshal as a []byte. - {bson.M{"": bson.Binary{0x00, []byte("yo")}}, - "\x05\x00\x02\x00\x00\x00\x00yo"}, - {bson.M{"": bson.Binary{0x02, []byte("old")}}, - "\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, - - // No way to preserve the type information here. We might encode as a zero - // value, but this would mean that pointer values in structs wouldn't be - // able to correctly distinguish between unset and set to the zero value. - {bson.M{"": (*byte)(nil)}, - "\x0A\x00"}, - - // No int types smaller than int32 in BSON. Could encode this as a char, - // but it would still be ambiguous, take more, and be awkward in Go when - // loaded without typing information. - {bson.M{"": byte(8)}, - "\x10\x00\x08\x00\x00\x00"}, - - // There are no unsigned types in BSON. Will unmarshal as int32 or int64. - {bson.M{"": uint32(258)}, - "\x10\x00\x02\x01\x00\x00"}, - {bson.M{"": uint64(258)}, - "\x12\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, - {bson.M{"": uint64(258 << 32)}, - "\x12\x00\x00\x00\x00\x00\x02\x01\x00\x00"}, - - // This will unmarshal as int. - {bson.M{"": int32(258)}, - "\x10\x00\x02\x01\x00\x00"}, - - // That's a special case. The unsigned value is too large for an int32, - // so an int64 is used instead. - {bson.M{"": uint32(1<<32 - 1)}, - "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"}, - {bson.M{"": uint(1<<32 - 1)}, - "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"}, -} - -func (s *S) TestOneWayMarshalItems(c *C) { - for i, item := range oneWayMarshalItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc(item.data), - Commentf("Failed on item %d", i)) - } -} - -// -------------------------------------------------------------------------- -// Two-way tests for user-defined structures using the samples -// from bsonspec.org. - -type specSample1 struct { - Hello string -} - -type specSample2 struct { - BSON []interface{} "BSON" -} - -var structSampleItems = []testItemType{ - {&specSample1{"world"}, - "\x16\x00\x00\x00\x02hello\x00\x06\x00\x00\x00world\x00\x00"}, - {&specSample2{[]interface{}{"awesome", float64(5.05), 1986}}, - "1\x00\x00\x00\x04BSON\x00&\x00\x00\x00\x020\x00\x08\x00\x00\x00" + - "awesome\x00\x011\x00333333\x14@\x102\x00\xc2\x07\x00\x00\x00\x00"}, -} - -func (s *S) TestMarshalStructSampleItems(c *C) { - for i, item := range structSampleItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, item.data, - Commentf("Failed on item %d", i)) - } -} - -func (s *S) TestUnmarshalStructSampleItems(c *C) { - for _, item := range structSampleItems { - testUnmarshal(c, item.data, item.obj) - } -} - -func (s *S) Test64bitInt(c *C) { - var i int64 = (1 << 31) - if int(i) > 0 { - data, err := bson.Marshal(bson.M{"i": int(i)}) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc("\x12i\x00\x00\x00\x00\x80\x00\x00\x00\x00")) - - var result struct{ I int } - err = bson.Unmarshal(data, &result) - c.Assert(err, IsNil) - c.Assert(int64(result.I), Equals, i) - } -} - -// -------------------------------------------------------------------------- -// Generic two-way struct marshaling tests. - -var bytevar = byte(8) -var byteptr = &bytevar - -var structItems = []testItemType{ - {&struct{ Ptr *byte }{nil}, - "\x0Aptr\x00"}, - {&struct{ Ptr *byte }{&bytevar}, - "\x10ptr\x00\x08\x00\x00\x00"}, - {&struct{ Ptr **byte }{&byteptr}, - "\x10ptr\x00\x08\x00\x00\x00"}, - {&struct{ Byte byte }{8}, - "\x10byte\x00\x08\x00\x00\x00"}, - {&struct{ Byte byte }{0}, - "\x10byte\x00\x00\x00\x00\x00"}, - {&struct { - V byte "Tag" - }{8}, - "\x10Tag\x00\x08\x00\x00\x00"}, - {&struct { - V *struct { - Byte byte - } - }{&struct{ Byte byte }{8}}, - "\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"}, - {&struct{ priv byte }{}, ""}, - - // The order of the dumped fields should be the same in the struct. - {&struct{ A, C, B, D, F, E *byte }{}, - "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x0Ae\x00"}, - - {&struct{ V bson.Raw }{bson.Raw{0x03, []byte("\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00")}}, - "\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"}, - {&struct{ V bson.Raw }{bson.Raw{0x10, []byte("\x00\x00\x00\x00")}}, - "\x10v\x00" + "\x00\x00\x00\x00"}, - - // Byte arrays. - {&struct{ V [2]byte }{[2]byte{'y', 'o'}}, - "\x05v\x00\x02\x00\x00\x00\x00yo"}, -} - -func (s *S) TestMarshalStructItems(c *C) { - for i, item := range structItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc(item.data), - Commentf("Failed on item %d", i)) - } -} - -func (s *S) TestUnmarshalStructItems(c *C) { - for _, item := range structItems { - testUnmarshal(c, wrapInDoc(item.data), item.obj) - } -} - -func (s *S) TestUnmarshalRawStructItems(c *C) { - for i, item := range structItems { - raw := bson.Raw{0x03, []byte(wrapInDoc(item.data))} - zero := makeZeroDoc(item.obj) - err := raw.Unmarshal(zero) - c.Assert(err, IsNil) - c.Assert(zero, DeepEquals, item.obj, Commentf("Failed on item %d: %#v", i, item)) - } -} - -func (s *S) TestUnmarshalRawNil(c *C) { - // Regression test: shouldn't try to nil out the pointer itself, - // as it's not settable. - raw := bson.Raw{0x0A, []byte{}} - err := raw.Unmarshal(&struct{}{}) - c.Assert(err, IsNil) -} - -// -------------------------------------------------------------------------- -// One-way marshaling tests. - -type dOnIface struct { - D interface{} -} - -type ignoreField struct { - Before string - Ignore string `bson:"-"` - After string -} - -var marshalItems = []testItemType{ - // Ordered document dump. Will unmarshal as a dictionary by default. - {bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}}, - "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"}, - {MyD{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}}, - "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"}, - {&dOnIface{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}}, - "\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")}, - - {bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}, - "\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"}, - {MyRawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}, - "\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"}, - {&dOnIface{bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}}, - "\x03d\x00" + wrapInDoc("\x0Aa\x00"+"\x0Ac\x00"+"\x08b\x00\x01")}, - - {&ignoreField{"before", "ignore", "after"}, - "\x02before\x00\a\x00\x00\x00before\x00\x02after\x00\x06\x00\x00\x00after\x00"}, - - // Marshalling a Raw document does nothing. - {bson.Raw{0x03, []byte(wrapInDoc("anything"))}, - "anything"}, - {bson.Raw{Data: []byte(wrapInDoc("anything"))}, - "anything"}, -} - -func (s *S) TestMarshalOneWayItems(c *C) { - for _, item := range marshalItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc(item.data)) - } -} - -// -------------------------------------------------------------------------- -// One-way unmarshaling tests. - -var unmarshalItems = []testItemType{ - // Field is private. Should not attempt to unmarshal it. - {&struct{ priv byte }{}, - "\x10priv\x00\x08\x00\x00\x00"}, - - // Wrong casing. Field names are lowercased. - {&struct{ Byte byte }{}, - "\x10Byte\x00\x08\x00\x00\x00"}, - - // Ignore non-existing field. - {&struct{ Byte byte }{9}, - "\x10boot\x00\x08\x00\x00\x00" + "\x10byte\x00\x09\x00\x00\x00"}, - - // Do not unmarshal on ignored field. - {&ignoreField{"before", "", "after"}, - "\x02before\x00\a\x00\x00\x00before\x00" + - "\x02-\x00\a\x00\x00\x00ignore\x00" + - "\x02after\x00\x06\x00\x00\x00after\x00"}, - - // Ignore unsuitable types silently. - {map[string]string{"str": "s"}, - "\x02str\x00\x02\x00\x00\x00s\x00" + "\x10int\x00\x01\x00\x00\x00"}, - {map[string][]int{"array": []int{5, 9}}, - "\x04array\x00" + wrapInDoc("\x100\x00\x05\x00\x00\x00"+"\x021\x00\x02\x00\x00\x00s\x00"+"\x102\x00\x09\x00\x00\x00")}, - - // Wrong type. Shouldn't init pointer. - {&struct{ Str *byte }{}, - "\x02str\x00\x02\x00\x00\x00s\x00"}, - {&struct{ Str *struct{ Str string } }{}, - "\x02str\x00\x02\x00\x00\x00s\x00"}, - - // Ordered document. - {&struct{ bson.D }{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}}, - "\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")}, - - // Raw document. - {&bson.Raw{0x03, []byte(wrapInDoc("\x10byte\x00\x08\x00\x00\x00"))}, - "\x10byte\x00\x08\x00\x00\x00"}, - - // RawD document. - {&struct{ bson.RawD }{bson.RawD{{"a", bson.Raw{0x0A, []byte{}}}, {"c", bson.Raw{0x0A, []byte{}}}, {"b", bson.Raw{0x08, []byte{0x01}}}}}, - "\x03rawd\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x08b\x00\x01")}, - - // Decode old binary. - {bson.M{"_": []byte("old")}, - "\x05_\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, - - // Decode old binary without length. According to the spec, this shouldn't happen. - {bson.M{"_": []byte("old")}, - "\x05_\x00\x03\x00\x00\x00\x02old"}, -} - -func (s *S) TestUnmarshalOneWayItems(c *C) { - for _, item := range unmarshalItems { - testUnmarshal(c, wrapInDoc(item.data), item.obj) - } -} - -func (s *S) TestUnmarshalNilInStruct(c *C) { - // Nil is the default value, so we need to ensure it's indeed being set. - b := byte(1) - v := &struct{ Ptr *byte }{&b} - err := bson.Unmarshal([]byte(wrapInDoc("\x0Aptr\x00")), v) - c.Assert(err, IsNil) - c.Assert(v, DeepEquals, &struct{ Ptr *byte }{nil}) -} - -// -------------------------------------------------------------------------- -// Marshalling error cases. - -type structWithDupKeys struct { - Name byte - Other byte "name" // Tag should precede. -} - -var marshalErrorItems = []testItemType{ - {bson.M{"": uint64(1 << 63)}, - "BSON has no uint64 type, and value is too large to fit correctly in an int64"}, - {bson.M{"": bson.ObjectId("tooshort")}, - "ObjectIDs must be exactly 12 bytes long \\(got 8\\)"}, - {int64(123), - "Can't marshal int64 as a BSON document"}, - {bson.M{"": 1i}, - "Can't marshal complex128 in a BSON document"}, - {&structWithDupKeys{}, - "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, - {bson.Raw{0x0A, []byte{}}, - "Attempted to unmarshal Raw kind 10 as a document"}, - {&inlineCantPtr{&struct{ A, B int }{1, 2}}, - "Option ,inline needs a struct value or map field"}, - {&inlineDupName{1, struct{ A, B int }{2, 3}}, - "Duplicated key 'a' in struct bson_test.inlineDupName"}, - {&inlineDupMap{}, - "Multiple ,inline maps in struct bson_test.inlineDupMap"}, - {&inlineBadKeyMap{}, - "Option ,inline needs a map with string keys in struct bson_test.inlineBadKeyMap"}, - {&inlineMap{A: 1, M: map[string]interface{}{"a": 1}}, - `Can't have key "a" in inlined map; conflicts with struct field`}, -} - -func (s *S) TestMarshalErrorItems(c *C) { - for _, item := range marshalErrorItems { - data, err := bson.Marshal(item.obj) - c.Assert(err, ErrorMatches, item.data) - c.Assert(data, IsNil) - } -} - -// -------------------------------------------------------------------------- -// Unmarshalling error cases. - -type unmarshalErrorType struct { - obj interface{} - data string - error string -} - -var unmarshalErrorItems = []unmarshalErrorType{ - // Tag name conflicts with existing parameter. - {&structWithDupKeys{}, - "\x10name\x00\x08\x00\x00\x00", - "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, - - // Non-string map key. - {map[int]interface{}{}, - "\x10name\x00\x08\x00\x00\x00", - "BSON map must have string keys. Got: map\\[int\\]interface \\{\\}"}, - - {nil, - "\xEEname\x00", - "Unknown element kind \\(0xEE\\)"}, - - {struct{ Name bool }{}, - "\x10name\x00\x08\x00\x00\x00", - "Unmarshal can't deal with struct values. Use a pointer."}, - - {123, - "\x10name\x00\x08\x00\x00\x00", - "Unmarshal needs a map or a pointer to a struct."}, -} - -func (s *S) TestUnmarshalErrorItems(c *C) { - for _, item := range unmarshalErrorItems { - data := []byte(wrapInDoc(item.data)) - var value interface{} - switch reflect.ValueOf(item.obj).Kind() { - case reflect.Map, reflect.Ptr: - value = makeZeroDoc(item.obj) - case reflect.Invalid: - value = bson.M{} - default: - value = item.obj - } - err := bson.Unmarshal(data, value) - c.Assert(err, ErrorMatches, item.error) - } -} - -type unmarshalRawErrorType struct { - obj interface{} - raw bson.Raw - error string -} - -var unmarshalRawErrorItems = []unmarshalRawErrorType{ - // Tag name conflicts with existing parameter. - {&structWithDupKeys{}, - bson.Raw{0x03, []byte("\x10byte\x00\x08\x00\x00\x00")}, - "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, - - {&struct{}{}, - bson.Raw{0xEE, []byte{}}, - "Unknown element kind \\(0xEE\\)"}, - - {struct{ Name bool }{}, - bson.Raw{0x10, []byte("\x08\x00\x00\x00")}, - "Raw Unmarshal can't deal with struct values. Use a pointer."}, - - {123, - bson.Raw{0x10, []byte("\x08\x00\x00\x00")}, - "Raw Unmarshal needs a map or a valid pointer."}, -} - -func (s *S) TestUnmarshalRawErrorItems(c *C) { - for i, item := range unmarshalRawErrorItems { - err := item.raw.Unmarshal(item.obj) - c.Assert(err, ErrorMatches, item.error, Commentf("Failed on item %d: %#v\n", i, item)) - } -} - -var corruptedData = []string{ - "\x04\x00\x00\x00\x00", // Shorter than minimum - "\x06\x00\x00\x00\x00", // Not enough data - "\x05\x00\x00", // Broken length - "\x05\x00\x00\x00\xff", // Corrupted termination - "\x0A\x00\x00\x00\x0Aooop\x00", // Unfinished C string - - // Array end past end of string (s[2]=0x07 is correct) - wrapInDoc("\x04\x00\x09\x00\x00\x00\x0A\x00\x00"), - - // Array end within string, but past acceptable. - wrapInDoc("\x04\x00\x08\x00\x00\x00\x0A\x00\x00"), - - // Document end within string, but past acceptable. - wrapInDoc("\x03\x00\x08\x00\x00\x00\x0A\x00\x00"), - - // String with corrupted end. - wrapInDoc("\x02\x00\x03\x00\x00\x00yo\xFF"), -} - -func (s *S) TestUnmarshalMapDocumentTooShort(c *C) { - for _, data := range corruptedData { - err := bson.Unmarshal([]byte(data), bson.M{}) - c.Assert(err, ErrorMatches, "Document is corrupted") - - err = bson.Unmarshal([]byte(data), &struct{}{}) - c.Assert(err, ErrorMatches, "Document is corrupted") - } -} - -// -------------------------------------------------------------------------- -// Setter test cases. - -var setterResult = map[string]error{} - -type setterType struct { - received interface{} -} - -func (o *setterType) SetBSON(raw bson.Raw) error { - err := raw.Unmarshal(&o.received) - if err != nil { - panic("The panic:" + err.Error()) - } - if s, ok := o.received.(string); ok { - if result, ok := setterResult[s]; ok { - return result - } - } - return nil -} - -type ptrSetterDoc struct { - Field *setterType "_" -} - -type valSetterDoc struct { - Field setterType "_" -} - -func (s *S) TestUnmarshalAllItemsWithPtrSetter(c *C) { - for _, item := range allItems { - for i := 0; i != 2; i++ { - var field *setterType - if i == 0 { - obj := &ptrSetterDoc{} - err := bson.Unmarshal([]byte(wrapInDoc(item.data)), obj) - c.Assert(err, IsNil) - field = obj.Field - } else { - obj := &valSetterDoc{} - err := bson.Unmarshal([]byte(wrapInDoc(item.data)), obj) - c.Assert(err, IsNil) - field = &obj.Field - } - if item.data == "" { - // Nothing to unmarshal. Should be untouched. - if i == 0 { - c.Assert(field, IsNil) - } else { - c.Assert(field.received, IsNil) - } - } else { - expected := item.obj.(bson.M)["_"] - c.Assert(field, NotNil, Commentf("Pointer not initialized (%#v)", expected)) - c.Assert(field.received, DeepEquals, expected) - } - } - } -} - -func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) { - obj := &setterType{} - err := bson.Unmarshal([]byte(sampleItems[0].data), obj) - c.Assert(err, IsNil) - c.Assert(obj.received, DeepEquals, bson.M{"hello": "world"}) -} - -func (s *S) TestUnmarshalSetterOmits(c *C) { - setterResult["2"] = &bson.TypeError{} - setterResult["4"] = &bson.TypeError{} - defer func() { - delete(setterResult, "2") - delete(setterResult, "4") - }() - - m := map[string]*setterType{} - data := wrapInDoc("\x02abc\x00\x02\x00\x00\x001\x00" + - "\x02def\x00\x02\x00\x00\x002\x00" + - "\x02ghi\x00\x02\x00\x00\x003\x00" + - "\x02jkl\x00\x02\x00\x00\x004\x00") - err := bson.Unmarshal([]byte(data), m) - c.Assert(err, IsNil) - c.Assert(m["abc"], NotNil) - c.Assert(m["def"], IsNil) - c.Assert(m["ghi"], NotNil) - c.Assert(m["jkl"], IsNil) - - c.Assert(m["abc"].received, Equals, "1") - c.Assert(m["ghi"].received, Equals, "3") -} - -func (s *S) TestUnmarshalSetterErrors(c *C) { - boom := errors.New("BOOM") - setterResult["2"] = boom - defer delete(setterResult, "2") - - m := map[string]*setterType{} - data := wrapInDoc("\x02abc\x00\x02\x00\x00\x001\x00" + - "\x02def\x00\x02\x00\x00\x002\x00" + - "\x02ghi\x00\x02\x00\x00\x003\x00") - err := bson.Unmarshal([]byte(data), m) - c.Assert(err, Equals, boom) - c.Assert(m["abc"], NotNil) - c.Assert(m["def"], IsNil) - c.Assert(m["ghi"], IsNil) - - c.Assert(m["abc"].received, Equals, "1") -} - -func (s *S) TestDMap(c *C) { - d := bson.D{{"a", 1}, {"b", 2}} - c.Assert(d.Map(), DeepEquals, bson.M{"a": 1, "b": 2}) -} - -func (s *S) TestUnmarshalSetterSetZero(c *C) { - setterResult["foo"] = bson.SetZero - defer delete(setterResult, "field") - - data, err := bson.Marshal(bson.M{"field": "foo"}) - c.Assert(err, IsNil) - - m := map[string]*setterType{} - err = bson.Unmarshal([]byte(data), m) - c.Assert(err, IsNil) - - value, ok := m["field"] - c.Assert(ok, Equals, true) - c.Assert(value, IsNil) -} - -// -------------------------------------------------------------------------- -// Getter test cases. - -type typeWithGetter struct { - result interface{} - err error -} - -func (t *typeWithGetter) GetBSON() (interface{}, error) { - if t == nil { - return "", nil - } - return t.result, t.err -} - -type docWithGetterField struct { - Field *typeWithGetter "_" -} - -func (s *S) TestMarshalAllItemsWithGetter(c *C) { - for i, item := range allItems { - if item.data == "" { - continue - } - obj := &docWithGetterField{} - obj.Field = &typeWithGetter{result: item.obj.(bson.M)["_"]} - data, err := bson.Marshal(obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, wrapInDoc(item.data), - Commentf("Failed on item #%d", i)) - } -} - -func (s *S) TestMarshalWholeDocumentWithGetter(c *C) { - obj := &typeWithGetter{result: sampleItems[0].obj} - data, err := bson.Marshal(obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, sampleItems[0].data) -} - -func (s *S) TestGetterErrors(c *C) { - e := errors.New("oops") - - obj1 := &docWithGetterField{} - obj1.Field = &typeWithGetter{sampleItems[0].obj, e} - data, err := bson.Marshal(obj1) - c.Assert(err, ErrorMatches, "oops") - c.Assert(data, IsNil) - - obj2 := &typeWithGetter{sampleItems[0].obj, e} - data, err = bson.Marshal(obj2) - c.Assert(err, ErrorMatches, "oops") - c.Assert(data, IsNil) -} - -type intGetter int64 - -func (t intGetter) GetBSON() (interface{}, error) { - return int64(t), nil -} - -type typeWithIntGetter struct { - V intGetter ",minsize" -} - -func (s *S) TestMarshalShortWithGetter(c *C) { - obj := typeWithIntGetter{42} - data, err := bson.Marshal(obj) - c.Assert(err, IsNil) - m := bson.M{} - err = bson.Unmarshal(data, m) - c.Assert(err, IsNil) - c.Assert(m["v"], Equals, 42) -} - -func (s *S) TestMarshalWithGetterNil(c *C) { - obj := docWithGetterField{} - data, err := bson.Marshal(obj) - c.Assert(err, IsNil) - m := bson.M{} - err = bson.Unmarshal(data, m) - c.Assert(err, IsNil) - c.Assert(m, DeepEquals, bson.M{"_": ""}) -} - -// -------------------------------------------------------------------------- -// Cross-type conversion tests. - -type crossTypeItem struct { - obj1 interface{} - obj2 interface{} -} - -type condStr struct { - V string ",omitempty" -} -type condStrNS struct { - V string `a:"A" bson:",omitempty" b:"B"` -} -type condBool struct { - V bool ",omitempty" -} -type condInt struct { - V int ",omitempty" -} -type condUInt struct { - V uint ",omitempty" -} -type condFloat struct { - V float64 ",omitempty" -} -type condIface struct { - V interface{} ",omitempty" -} -type condPtr struct { - V *bool ",omitempty" -} -type condSlice struct { - V []string ",omitempty" -} -type condMap struct { - V map[string]int ",omitempty" -} -type namedCondStr struct { - V string "myv,omitempty" -} -type condTime struct { - V time.Time ",omitempty" -} -type condStruct struct { - V struct{ A []int } ",omitempty" -} - -type shortInt struct { - V int64 ",minsize" -} -type shortUint struct { - V uint64 ",minsize" -} -type shortIface struct { - V interface{} ",minsize" -} -type shortPtr struct { - V *int64 ",minsize" -} -type shortNonEmptyInt struct { - V int64 ",minsize,omitempty" -} - -type inlineInt struct { - V struct{ A, B int } ",inline" -} -type inlineCantPtr struct { - V *struct{ A, B int } ",inline" -} -type inlineDupName struct { - A int - V struct{ A, B int } ",inline" -} -type inlineMap struct { - A int - M map[string]interface{} ",inline" -} -type inlineMapInt struct { - A int - M map[string]int ",inline" -} -type inlineMapMyM struct { - A int - M MyM ",inline" -} -type inlineDupMap struct { - M1 map[string]interface{} ",inline" - M2 map[string]interface{} ",inline" -} -type inlineBadKeyMap struct { - M map[int]int ",inline" -} - -type ( - MyString string - MyBytes []byte - MyBool bool - MyD []bson.DocElem - MyRawD []bson.RawDocElem - MyM map[string]interface{} -) - -var ( - truevar = true - falsevar = false - - int64var = int64(42) - int64ptr = &int64var - intvar = int(42) - intptr = &intvar -) - -func parseURL(s string) *url.URL { - u, err := url.Parse(s) - if err != nil { - panic(err) - } - return u -} - -// That's a pretty fun test. It will dump the first item, generate a zero -// value equivalent to the second one, load the dumped data onto it, and then -// verify that the resulting value is deep-equal to the untouched second value. -// Then, it will do the same in the *opposite* direction! -var twoWayCrossItems = []crossTypeItem{ - // int<=>int - {&struct{ I int }{42}, &struct{ I int8 }{42}}, - {&struct{ I int }{42}, &struct{ I int32 }{42}}, - {&struct{ I int }{42}, &struct{ I int64 }{42}}, - {&struct{ I int8 }{42}, &struct{ I int32 }{42}}, - {&struct{ I int8 }{42}, &struct{ I int64 }{42}}, - {&struct{ I int32 }{42}, &struct{ I int64 }{42}}, - - // uint<=>uint - {&struct{ I uint }{42}, &struct{ I uint8 }{42}}, - {&struct{ I uint }{42}, &struct{ I uint32 }{42}}, - {&struct{ I uint }{42}, &struct{ I uint64 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I uint32 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I uint64 }{42}}, - {&struct{ I uint32 }{42}, &struct{ I uint64 }{42}}, - - // float32<=>float64 - {&struct{ I float32 }{42}, &struct{ I float64 }{42}}, - - // int<=>uint - {&struct{ I uint }{42}, &struct{ I int }{42}}, - {&struct{ I uint }{42}, &struct{ I int8 }{42}}, - {&struct{ I uint }{42}, &struct{ I int32 }{42}}, - {&struct{ I uint }{42}, &struct{ I int64 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I int }{42}}, - {&struct{ I uint8 }{42}, &struct{ I int8 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I int32 }{42}}, - {&struct{ I uint8 }{42}, &struct{ I int64 }{42}}, - {&struct{ I uint32 }{42}, &struct{ I int }{42}}, - {&struct{ I uint32 }{42}, &struct{ I int8 }{42}}, - {&struct{ I uint32 }{42}, &struct{ I int32 }{42}}, - {&struct{ I uint32 }{42}, &struct{ I int64 }{42}}, - {&struct{ I uint64 }{42}, &struct{ I int }{42}}, - {&struct{ I uint64 }{42}, &struct{ I int8 }{42}}, - {&struct{ I uint64 }{42}, &struct{ I int32 }{42}}, - {&struct{ I uint64 }{42}, &struct{ I int64 }{42}}, - - // int <=> float - {&struct{ I int }{42}, &struct{ I float64 }{42}}, - - // int <=> bool - {&struct{ I int }{1}, &struct{ I bool }{true}}, - {&struct{ I int }{0}, &struct{ I bool }{false}}, - - // uint <=> float64 - {&struct{ I uint }{42}, &struct{ I float64 }{42}}, - - // uint <=> bool - {&struct{ I uint }{1}, &struct{ I bool }{true}}, - {&struct{ I uint }{0}, &struct{ I bool }{false}}, - - // float64 <=> bool - {&struct{ I float64 }{1}, &struct{ I bool }{true}}, - {&struct{ I float64 }{0}, &struct{ I bool }{false}}, - - // string <=> string and string <=> []byte - {&struct{ S []byte }{[]byte("abc")}, &struct{ S string }{"abc"}}, - {&struct{ S []byte }{[]byte("def")}, &struct{ S bson.Symbol }{"def"}}, - {&struct{ S string }{"ghi"}, &struct{ S bson.Symbol }{"ghi"}}, - - // map <=> struct - {&struct { - A struct { - B, C int - } - }{struct{ B, C int }{1, 2}}, - map[string]map[string]int{"a": map[string]int{"b": 1, "c": 2}}}, - - {&struct{ A bson.Symbol }{"abc"}, map[string]string{"a": "abc"}}, - {&struct{ A bson.Symbol }{"abc"}, map[string][]byte{"a": []byte("abc")}}, - {&struct{ A []byte }{[]byte("abc")}, map[string]string{"a": "abc"}}, - {&struct{ A uint }{42}, map[string]int{"a": 42}}, - {&struct{ A uint }{42}, map[string]float64{"a": 42}}, - {&struct{ A uint }{1}, map[string]bool{"a": true}}, - {&struct{ A int }{42}, map[string]uint{"a": 42}}, - {&struct{ A int }{42}, map[string]float64{"a": 42}}, - {&struct{ A int }{1}, map[string]bool{"a": true}}, - {&struct{ A float64 }{42}, map[string]float32{"a": 42}}, - {&struct{ A float64 }{42}, map[string]int{"a": 42}}, - {&struct{ A float64 }{42}, map[string]uint{"a": 42}}, - {&struct{ A float64 }{1}, map[string]bool{"a": true}}, - {&struct{ A bool }{true}, map[string]int{"a": 1}}, - {&struct{ A bool }{true}, map[string]uint{"a": 1}}, - {&struct{ A bool }{true}, map[string]float64{"a": 1}}, - {&struct{ A **byte }{&byteptr}, map[string]byte{"a": 8}}, - - // url.URL <=> string - {&struct{ URL *url.URL }{parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}}, - {&struct{ URL url.URL }{*parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}}, - - // Slices - {&struct{ S []int }{[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}}, - {&struct{ S *[]int }{&[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}}, - - // Conditionals - {&condBool{true}, map[string]bool{"v": true}}, - {&condBool{}, map[string]bool{}}, - {&condInt{1}, map[string]int{"v": 1}}, - {&condInt{}, map[string]int{}}, - {&condUInt{1}, map[string]uint{"v": 1}}, - {&condUInt{}, map[string]uint{}}, - {&condFloat{}, map[string]int{}}, - {&condStr{"yo"}, map[string]string{"v": "yo"}}, - {&condStr{}, map[string]string{}}, - {&condStrNS{"yo"}, map[string]string{"v": "yo"}}, - {&condStrNS{}, map[string]string{}}, - {&condSlice{[]string{"yo"}}, map[string][]string{"v": []string{"yo"}}}, - {&condSlice{}, map[string][]string{}}, - {&condMap{map[string]int{"k": 1}}, bson.M{"v": bson.M{"k": 1}}}, - {&condMap{}, map[string][]string{}}, - {&condIface{"yo"}, map[string]string{"v": "yo"}}, - {&condIface{""}, map[string]string{"v": ""}}, - {&condIface{}, map[string]string{}}, - {&condPtr{&truevar}, map[string]bool{"v": true}}, - {&condPtr{&falsevar}, map[string]bool{"v": false}}, - {&condPtr{}, map[string]string{}}, - - {&condTime{time.Unix(123456789, 123e6)}, map[string]time.Time{"v": time.Unix(123456789, 123e6)}}, - {&condTime{}, map[string]string{}}, - - {&condStruct{struct{ A []int }{[]int{1}}}, bson.M{"v": bson.M{"a": []interface{}{1}}}}, - {&condStruct{struct{ A []int }{}}, bson.M{}}, - - {&namedCondStr{"yo"}, map[string]string{"myv": "yo"}}, - {&namedCondStr{}, map[string]string{}}, - - {&shortInt{1}, map[string]interface{}{"v": 1}}, - {&shortInt{1 << 30}, map[string]interface{}{"v": 1 << 30}}, - {&shortInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, - {&shortUint{1 << 30}, map[string]interface{}{"v": 1 << 30}}, - {&shortUint{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, - {&shortIface{int64(1) << 31}, map[string]interface{}{"v": int64(1 << 31)}}, - {&shortPtr{int64ptr}, map[string]interface{}{"v": intvar}}, - - {&shortNonEmptyInt{1}, map[string]interface{}{"v": 1}}, - {&shortNonEmptyInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, - {&shortNonEmptyInt{}, map[string]interface{}{}}, - - {&inlineInt{struct{ A, B int }{1, 2}}, map[string]interface{}{"a": 1, "b": 2}}, - {&inlineMap{A: 1, M: map[string]interface{}{"b": 2}}, map[string]interface{}{"a": 1, "b": 2}}, - {&inlineMap{A: 1, M: nil}, map[string]interface{}{"a": 1}}, - {&inlineMapInt{A: 1, M: map[string]int{"b": 2}}, map[string]int{"a": 1, "b": 2}}, - {&inlineMapInt{A: 1, M: nil}, map[string]int{"a": 1}}, - {&inlineMapMyM{A: 1, M: MyM{"b": MyM{"c": 3}}}, map[string]interface{}{"a": 1, "b": map[string]interface{}{"c": 3}}}, - - // []byte <=> MyBytes - {&struct{ B MyBytes }{[]byte("abc")}, map[string]string{"b": "abc"}}, - {&struct{ B MyBytes }{[]byte{}}, map[string]string{"b": ""}}, - {&struct{ B MyBytes }{}, map[string]bool{}}, - {&struct{ B []byte }{[]byte("abc")}, map[string]MyBytes{"b": []byte("abc")}}, - - // bool <=> MyBool - {&struct{ B MyBool }{true}, map[string]bool{"b": true}}, - {&struct{ B MyBool }{}, map[string]bool{"b": false}}, - {&struct{ B MyBool }{}, map[string]string{}}, - {&struct{ B bool }{}, map[string]MyBool{"b": false}}, - - // arrays - {&struct{ V [2]int }{[...]int{1, 2}}, map[string][2]int{"v": [2]int{1, 2}}}, - - // zero time - {&struct{ V time.Time }{}, map[string]interface{}{"v": time.Time{}}}, - - // zero time + 1 second + 1 millisecond; overflows int64 as nanoseconds - {&struct{ V time.Time }{time.Unix(-62135596799, 1e6).Local()}, - map[string]interface{}{"v": time.Unix(-62135596799, 1e6).Local()}}, - - // bson.D <=> []DocElem - {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}}, - {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &MyD{{"a", MyD{{"b", 1}, {"c", 2}}}}}, - - // bson.RawD <=> []RawDocElem - {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, - {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &MyRawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, - - // bson.M <=> map - {bson.M{"a": bson.M{"b": 1, "c": 2}}, MyM{"a": MyM{"b": 1, "c": 2}}}, - {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[string]interface{}{"a": map[string]interface{}{"b": 1, "c": 2}}}, - - // bson.M <=> map[MyString] - {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[MyString]interface{}{"a": map[MyString]interface{}{"b": 1, "c": 2}}}, -} - -// Same thing, but only one way (obj1 => obj2). -var oneWayCrossItems = []crossTypeItem{ - // map <=> struct - {map[string]interface{}{"a": 1, "b": "2", "c": 3}, map[string]int{"a": 1, "c": 3}}, - - // inline map elides badly typed values - {map[string]interface{}{"a": 1, "b": "2", "c": 3}, &inlineMapInt{A: 1, M: map[string]int{"c": 3}}}, - - // Can't decode int into struct. - {bson.M{"a": bson.M{"b": 2}}, &struct{ A bool }{}}, - - // Would get decoded into a int32 too in the opposite direction. - {&shortIface{int64(1) << 30}, map[string]interface{}{"v": 1 << 30}}, -} - -func testCrossPair(c *C, dump interface{}, load interface{}) { - c.Logf("Dump: %#v", dump) - c.Logf("Load: %#v", load) - zero := makeZeroDoc(load) - data, err := bson.Marshal(dump) - c.Assert(err, IsNil) - c.Logf("Dumped: %#v", string(data)) - err = bson.Unmarshal(data, zero) - c.Assert(err, IsNil) - c.Logf("Loaded: %#v", zero) - c.Assert(zero, DeepEquals, load) -} - -func (s *S) TestTwoWayCrossPairs(c *C) { - for _, item := range twoWayCrossItems { - testCrossPair(c, item.obj1, item.obj2) - testCrossPair(c, item.obj2, item.obj1) - } -} - -func (s *S) TestOneWayCrossPairs(c *C) { - for _, item := range oneWayCrossItems { - testCrossPair(c, item.obj1, item.obj2) - } -} - -// -------------------------------------------------------------------------- -// ObjectId hex representation test. - -func (s *S) TestObjectIdHex(c *C) { - id := bson.ObjectIdHex("4d88e15b60f486e428412dc9") - c.Assert(id.String(), Equals, `ObjectIdHex("4d88e15b60f486e428412dc9")`) - c.Assert(id.Hex(), Equals, "4d88e15b60f486e428412dc9") -} - -func (s *S) TestIsObjectIdHex(c *C) { - test := []struct { - id string - valid bool - }{ - {"4d88e15b60f486e428412dc9", true}, - {"4d88e15b60f486e428412dc", false}, - {"4d88e15b60f486e428412dc9e", false}, - {"4d88e15b60f486e428412dcx", false}, - } - for _, t := range test { - c.Assert(bson.IsObjectIdHex(t.id), Equals, t.valid) - } -} - -// -------------------------------------------------------------------------- -// ObjectId parts extraction tests. - -type objectIdParts struct { - id bson.ObjectId - timestamp int64 - machine []byte - pid uint16 - counter int32 -} - -var objectIds = []objectIdParts{ - objectIdParts{ - bson.ObjectIdHex("4d88e15b60f486e428412dc9"), - 1300816219, - []byte{0x60, 0xf4, 0x86}, - 0xe428, - 4271561, - }, - objectIdParts{ - bson.ObjectIdHex("000000000000000000000000"), - 0, - []byte{0x00, 0x00, 0x00}, - 0x0000, - 0, - }, - objectIdParts{ - bson.ObjectIdHex("00000000aabbccddee000001"), - 0, - []byte{0xaa, 0xbb, 0xcc}, - 0xddee, - 1, - }, -} - -func (s *S) TestObjectIdPartsExtraction(c *C) { - for i, v := range objectIds { - t := time.Unix(v.timestamp, 0) - c.Assert(v.id.Time(), Equals, t, Commentf("#%d Wrong timestamp value", i)) - c.Assert(v.id.Machine(), DeepEquals, v.machine, Commentf("#%d Wrong machine id value", i)) - c.Assert(v.id.Pid(), Equals, v.pid, Commentf("#%d Wrong pid value", i)) - c.Assert(v.id.Counter(), Equals, v.counter, Commentf("#%d Wrong counter value", i)) - } -} - -func (s *S) TestNow(c *C) { - before := time.Now() - time.Sleep(1e6) - now := bson.Now() - time.Sleep(1e6) - after := time.Now() - c.Assert(now.After(before) && now.Before(after), Equals, true, Commentf("now=%s, before=%s, after=%s", now, before, after)) -} - -// -------------------------------------------------------------------------- -// ObjectId generation tests. - -func (s *S) TestNewObjectId(c *C) { - // Generate 10 ids - ids := make([]bson.ObjectId, 10) - for i := 0; i < 10; i++ { - ids[i] = bson.NewObjectId() - } - for i := 1; i < 10; i++ { - prevId := ids[i-1] - id := ids[i] - // Test for uniqueness among all other 9 generated ids - for j, tid := range ids { - if j != i { - c.Assert(id, Not(Equals), tid, Commentf("Generated ObjectId is not unique")) - } - } - // Check that timestamp was incremented and is within 30 seconds of the previous one - secs := id.Time().Sub(prevId.Time()).Seconds() - c.Assert((secs >= 0 && secs <= 30), Equals, true, Commentf("Wrong timestamp in generated ObjectId")) - // Check that machine ids are the same - c.Assert(id.Machine(), DeepEquals, prevId.Machine()) - // Check that pids are the same - c.Assert(id.Pid(), Equals, prevId.Pid()) - // Test for proper increment - delta := int(id.Counter() - prevId.Counter()) - c.Assert(delta, Equals, 1, Commentf("Wrong increment in generated ObjectId")) - } -} - -func (s *S) TestNewObjectIdWithTime(c *C) { - t := time.Unix(12345678, 0) - id := bson.NewObjectIdWithTime(t) - c.Assert(id.Time(), Equals, t) - c.Assert(id.Machine(), DeepEquals, []byte{0x00, 0x00, 0x00}) - c.Assert(int(id.Pid()), Equals, 0) - c.Assert(int(id.Counter()), Equals, 0) -} - -// -------------------------------------------------------------------------- -// ObjectId JSON marshalling. - -type jsonType struct { - Id *bson.ObjectId -} - -func (s *S) TestObjectIdJSONMarshaling(c *C) { - id := bson.ObjectIdHex("4d88e15b60f486e428412dc9") - v := jsonType{Id: &id} - data, err := json.Marshal(&v) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, `{"Id":"4d88e15b60f486e428412dc9"}`) -} - -func (s *S) TestObjectIdJSONUnmarshaling(c *C) { - data := []byte(`{"Id":"4d88e15b60f486e428412dc9"}`) - v := jsonType{} - err := json.Unmarshal(data, &v) - c.Assert(err, IsNil) - c.Assert(*v.Id, Equals, bson.ObjectIdHex("4d88e15b60f486e428412dc9")) -} - -func (s *S) TestObjectIdJSONUnmarshalingError(c *C) { - v := jsonType{} - err := json.Unmarshal([]byte(`{"Id":"4d88e15b60f486e428412dc9A"}`), &v) - c.Assert(err, ErrorMatches, `Invalid ObjectId in JSON: "4d88e15b60f486e428412dc9A"`) - err = json.Unmarshal([]byte(`{"Id":"4d88e15b60f486e428412dcZ"}`), &v) - c.Assert(err, ErrorMatches, `Invalid ObjectId in JSON: "4d88e15b60f486e428412dcZ" .*`) -} - -// -------------------------------------------------------------------------- -// Some simple benchmarks. - -type BenchT struct { - A, B, C, D, E, F string -} - -func BenchmarkUnmarhsalStruct(b *testing.B) { - v := BenchT{A: "A", D: "D", E: "E"} - data, err := bson.Marshal(&v) - if err != nil { - panic(err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - err = bson.Unmarshal(data, &v) - } - if err != nil { - panic(err) - } -} - -func BenchmarkUnmarhsalMap(b *testing.B) { - m := bson.M{"a": "a", "d": "d", "e": "e"} - data, err := bson.Marshal(&m) - if err != nil { - panic(err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - err = bson.Unmarshal(data, &m) - } - if err != nil { - panic(err) - } -} diff --git a/vendor/labix.org/v2/mgo/bulk_test.go b/vendor/labix.org/v2/mgo/bulk_test.go deleted file mode 100644 index f8abca80a..000000000 --- a/vendor/labix.org/v2/mgo/bulk_test.go +++ /dev/null @@ -1,89 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2014 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo_test - -import ( - "labix.org/v2/mgo" - . "launchpad.net/gocheck" -) - -func (s *S) TestBulkInsert(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - bulk := coll.Bulk() - bulk.Insert(M{"n": 1}) - bulk.Insert(M{"n": 2}, M{"n": 3}) - r, err := bulk.Run() - c.Assert(err, IsNil) - c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) - - type doc struct{ N int } - var res []doc - err = coll.Find(nil).Sort("n").All(&res) - c.Assert(err, IsNil) - c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}}) -} - -func (s *S) TestBulkInsertError(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - bulk := coll.Bulk() - bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"n": 3}) - _, err = bulk.Run() - c.Assert(err, ErrorMatches, ".*duplicate key.*") - - type doc struct{ N int `_id` } - var res []doc - err = coll.Find(nil).Sort("_id").All(&res) - c.Assert(err, IsNil) - c.Assert(res, DeepEquals, []doc{{1}, {2}}) -} - -func (s *S) TestBulkInsertErrorUnordered(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - bulk := coll.Bulk() - bulk.Unordered() - bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3}) - _, err = bulk.Run() - c.Assert(err, ErrorMatches, ".*duplicate key.*") - - type doc struct{ N int `_id` } - var res []doc - err = coll.Find(nil).Sort("_id").All(&res) - c.Assert(err, IsNil) - c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}}) -} diff --git a/vendor/labix.org/v2/mgo/cluster_test.go b/vendor/labix.org/v2/mgo/cluster_test.go deleted file mode 100644 index d6d2810b7..000000000 --- a/vendor/labix.org/v2/mgo/cluster_test.go +++ /dev/null @@ -1,1559 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo_test - -import ( - "fmt" - "io" - "labix.org/v2/mgo" - "labix.org/v2/mgo/bson" - . "launchpad.net/gocheck" - "net" - "strings" - "sync" - "time" -) - -func (s *S) TestNewSession(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - // Do a dummy operation to wait for connection. - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - - // Tweak safety and query settings to ensure other has copied those. - session.SetSafe(nil) - session.SetBatch(-1) - other := session.New() - defer other.Close() - session.SetSafe(&mgo.Safe{}) - - // Clone was copied while session was unsafe, so no errors. - otherColl := other.DB("mydb").C("mycoll") - err = otherColl.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - - // Original session was made safe again. - err = coll.Insert(M{"_id": 1}) - c.Assert(err, NotNil) - - // With New(), each session has its own socket now. - stats := mgo.GetStats() - c.Assert(stats.MasterConns, Equals, 2) - c.Assert(stats.SocketsInUse, Equals, 2) - - // Ensure query parameters were cloned. - err = otherColl.Insert(M{"_id": 2}) - c.Assert(err, IsNil) - - // Ping the database to ensure the nonce has been received already. - c.Assert(other.Ping(), IsNil) - - mgo.ResetStats() - - iter := otherColl.Find(M{}).Iter() - c.Assert(err, IsNil) - - m := M{} - ok := iter.Next(m) - c.Assert(ok, Equals, true) - err = iter.Close() - c.Assert(err, IsNil) - - // If Batch(-1) is in effect, a single document must have been received. - stats = mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 1) -} - -func (s *S) TestCloneSession(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - // Do a dummy operation to wait for connection. - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - - // Tweak safety and query settings to ensure clone is copying those. - session.SetSafe(nil) - session.SetBatch(-1) - clone := session.Clone() - defer clone.Close() - session.SetSafe(&mgo.Safe{}) - - // Clone was copied while session was unsafe, so no errors. - cloneColl := clone.DB("mydb").C("mycoll") - err = cloneColl.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - - // Original session was made safe again. - err = coll.Insert(M{"_id": 1}) - c.Assert(err, NotNil) - - // With Clone(), same socket is shared between sessions now. - stats := mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 1) - c.Assert(stats.SocketRefs, Equals, 2) - - // Refreshing one of them should let the original socket go, - // while preserving the safety settings. - clone.Refresh() - err = cloneColl.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - - // Must have used another connection now. - stats = mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 2) - c.Assert(stats.SocketRefs, Equals, 2) - - // Ensure query parameters were cloned. - err = cloneColl.Insert(M{"_id": 2}) - c.Assert(err, IsNil) - - // Ping the database to ensure the nonce has been received already. - c.Assert(clone.Ping(), IsNil) - - mgo.ResetStats() - - iter := cloneColl.Find(M{}).Iter() - c.Assert(err, IsNil) - - m := M{} - ok := iter.Next(m) - c.Assert(ok, Equals, true) - err = iter.Close() - c.Assert(err, IsNil) - - // If Batch(-1) is in effect, a single document must have been received. - stats = mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 1) -} - -func (s *S) TestSetModeStrong(c *C) { - session, err := mgo.Dial("localhost:40012") - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, false) - session.SetMode(mgo.Strong, false) - - c.Assert(session.Mode(), Equals, mgo.Strong) - - result := M{} - cmd := session.DB("admin").C("$cmd") - err = cmd.Find(M{"ismaster": 1}).One(&result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, true) - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - // Wait since the sync also uses sockets. - for len(session.LiveServers()) != 3 { - c.Log("Waiting for cluster sync to finish...") - time.Sleep(5e8) - } - - stats := mgo.GetStats() - c.Assert(stats.MasterConns, Equals, 1) - c.Assert(stats.SlaveConns, Equals, 2) - c.Assert(stats.SocketsInUse, Equals, 1) - - session.SetMode(mgo.Strong, true) - - stats = mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestSetModeMonotonic(c *C) { - // Must necessarily connect to a slave, otherwise the - // master connection will be available first. - session, err := mgo.Dial("localhost:40012") - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, false) - - c.Assert(session.Mode(), Equals, mgo.Monotonic) - - result := M{} - cmd := session.DB("admin").C("$cmd") - err = cmd.Find(M{"ismaster": 1}).One(&result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, false) - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - result = M{} - err = cmd.Find(M{"ismaster": 1}).One(&result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, true) - - // Wait since the sync also uses sockets. - for len(session.LiveServers()) != 3 { - c.Log("Waiting for cluster sync to finish...") - time.Sleep(5e8) - } - - stats := mgo.GetStats() - c.Assert(stats.MasterConns, Equals, 1) - c.Assert(stats.SlaveConns, Equals, 2) - c.Assert(stats.SocketsInUse, Equals, 2) - - session.SetMode(mgo.Monotonic, true) - - stats = mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestSetModeMonotonicAfterStrong(c *C) { - // Test that a strong session shifting to a monotonic - // one preserves the socket untouched. - - session, err := mgo.Dial("localhost:40012") - c.Assert(err, IsNil) - defer session.Close() - - // Insert something to force a connection to the master. - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - session.SetMode(mgo.Monotonic, false) - - // Wait since the sync also uses sockets. - for len(session.LiveServers()) != 3 { - c.Log("Waiting for cluster sync to finish...") - time.Sleep(5e8) - } - - // Master socket should still be reserved. - stats := mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 1) - - // Confirm it's the master even though it's Monotonic by now. - result := M{} - cmd := session.DB("admin").C("$cmd") - err = cmd.Find(M{"ismaster": 1}).One(&result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, true) -} - -func (s *S) TestSetModeStrongAfterMonotonic(c *C) { - // Test that shifting from Monotonic to Strong while - // using a slave socket will keep the socket reserved - // until the master socket is necessary, so that no - // switch over occurs unless it's actually necessary. - - // Must necessarily connect to a slave, otherwise the - // master connection will be available first. - session, err := mgo.Dial("localhost:40012") - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, false) - - // Ensure we're talking to a slave, and reserve the socket. - result := M{} - err = session.Run("ismaster", &result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, false) - - // Switch to a Strong session. - session.SetMode(mgo.Strong, false) - - // Wait since the sync also uses sockets. - for len(session.LiveServers()) != 3 { - c.Log("Waiting for cluster sync to finish...") - time.Sleep(5e8) - } - - // Slave socket should still be reserved. - stats := mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 1) - - // But any operation will switch it to the master. - result = M{} - err = session.Run("ismaster", &result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, true) -} - -func (s *S) TestSetModeMonotonicWriteOnIteration(c *C) { - // Must necessarily connect to a slave, otherwise the - // master connection will be available first. - session, err := mgo.Dial("localhost:40012") - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, false) - - c.Assert(session.Mode(), Equals, mgo.Monotonic) - - coll1 := session.DB("mydb").C("mycoll1") - coll2 := session.DB("mydb").C("mycoll2") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll1.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - // Release master so we can grab a slave again. - session.Refresh() - - // Wait until synchronization is done. - for { - n, err := coll1.Count() - c.Assert(err, IsNil) - if n == len(ns) { - break - } - } - - iter := coll1.Find(nil).Batch(2).Iter() - i := 0 - m := M{} - for iter.Next(&m) { - i++ - if i > 3 { - err := coll2.Insert(M{"n": 47 + i}) - c.Assert(err, IsNil) - } - } - c.Assert(i, Equals, len(ns)) -} - -func (s *S) TestSetModeEventual(c *C) { - // Must necessarily connect to a slave, otherwise the - // master connection will be available first. - session, err := mgo.Dial("localhost:40012") - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Eventual, false) - - c.Assert(session.Mode(), Equals, mgo.Eventual) - - result := M{} - err = session.Run("ismaster", &result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, false) - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - result = M{} - err = session.Run("ismaster", &result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, false) - - // Wait since the sync also uses sockets. - for len(session.LiveServers()) != 3 { - c.Log("Waiting for cluster sync to finish...") - time.Sleep(5e8) - } - - stats := mgo.GetStats() - c.Assert(stats.MasterConns, Equals, 1) - c.Assert(stats.SlaveConns, Equals, 2) - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestSetModeEventualAfterStrong(c *C) { - // Test that a strong session shifting to an eventual - // one preserves the socket untouched. - - session, err := mgo.Dial("localhost:40012") - c.Assert(err, IsNil) - defer session.Close() - - // Insert something to force a connection to the master. - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - session.SetMode(mgo.Eventual, false) - - // Wait since the sync also uses sockets. - for len(session.LiveServers()) != 3 { - c.Log("Waiting for cluster sync to finish...") - time.Sleep(5e8) - } - - // Master socket should still be reserved. - stats := mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 1) - - // Confirm it's the master even though it's Eventual by now. - result := M{} - cmd := session.DB("admin").C("$cmd") - err = cmd.Find(M{"ismaster": 1}).One(&result) - c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, true) - - session.SetMode(mgo.Eventual, true) - - stats = mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestPrimaryShutdownStrong(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40021") - c.Assert(err, IsNil) - defer session.Close() - - // With strong consistency, this will open a socket to the master. - result := &struct{ Host string }{} - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - - // Kill the master. - host := result.Host - s.Stop(host) - - // This must fail, since the connection was broken. - err = session.Run("serverStatus", result) - c.Assert(err, Equals, io.EOF) - - // With strong consistency, it fails again until reset. - err = session.Run("serverStatus", result) - c.Assert(err, Equals, io.EOF) - - session.Refresh() - - // Now we should be able to talk to the new master. - // Increase the timeout since this may take quite a while. - session.SetSyncTimeout(3 * time.Minute) - - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - c.Assert(result.Host, Not(Equals), host) - - // Insert some data to confirm it's indeed a master. - err = session.DB("mydb").C("mycoll").Insert(M{"n": 42}) - c.Assert(err, IsNil) -} - -func (s *S) TestPrimaryHiccup(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40021") - c.Assert(err, IsNil) - defer session.Close() - - // With strong consistency, this will open a socket to the master. - result := &struct{ Host string }{} - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - - // Establish a few extra sessions to create spare sockets to - // the master. This increases a bit the chances of getting an - // incorrect cached socket. - var sessions []*mgo.Session - for i := 0; i < 20; i++ { - sessions = append(sessions, session.Copy()) - err = sessions[len(sessions)-1].Run("serverStatus", result) - c.Assert(err, IsNil) - } - for i := range sessions { - sessions[i].Close() - } - - // Kill the master, but bring it back immediatelly. - host := result.Host - s.Stop(host) - s.StartAll() - - // This must fail, since the connection was broken. - err = session.Run("serverStatus", result) - c.Assert(err, Equals, io.EOF) - - // With strong consistency, it fails again until reset. - err = session.Run("serverStatus", result) - c.Assert(err, Equals, io.EOF) - - session.Refresh() - - // Now we should be able to talk to the new master. - // Increase the timeout since this may take quite a while. - session.SetSyncTimeout(3 * time.Minute) - - // Insert some data to confirm it's indeed a master. - err = session.DB("mydb").C("mycoll").Insert(M{"n": 42}) - c.Assert(err, IsNil) -} - -func (s *S) TestPrimaryShutdownMonotonic(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40021") - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, true) - - // Insert something to force a switch to the master. - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - // Wait a bit for this to be synchronized to slaves. - time.Sleep(3 * time.Second) - - result := &struct{ Host string }{} - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - - // Kill the master. - host := result.Host - s.Stop(host) - - // This must fail, since the connection was broken. - err = session.Run("serverStatus", result) - c.Assert(err, Equals, io.EOF) - - // With monotonic consistency, it fails again until reset. - err = session.Run("serverStatus", result) - c.Assert(err, Equals, io.EOF) - - session.Refresh() - - // Now we should be able to talk to the new master. - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - c.Assert(result.Host, Not(Equals), host) -} - -func (s *S) TestPrimaryShutdownMonotonicWithSlave(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40021") - c.Assert(err, IsNil) - defer session.Close() - - ssresult := &struct{ Host string }{} - imresult := &struct{ IsMaster bool }{} - - // Figure the master while still using the strong session. - err = session.Run("serverStatus", ssresult) - c.Assert(err, IsNil) - err = session.Run("isMaster", imresult) - c.Assert(err, IsNil) - master := ssresult.Host - c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) - - // Create new monotonic session with an explicit address to ensure - // a slave is synchronized before the master, otherwise a connection - // with the master may be used below for lack of other options. - var addr string - switch { - case strings.HasSuffix(ssresult.Host, ":40021"): - addr = "localhost:40022" - case strings.HasSuffix(ssresult.Host, ":40022"): - addr = "localhost:40021" - case strings.HasSuffix(ssresult.Host, ":40023"): - addr = "localhost:40021" - default: - c.Fatal("Unknown host: ", ssresult.Host) - } - - session, err = mgo.Dial(addr) - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, true) - - // Check the address of the socket associated with the monotonic session. - c.Log("Running serverStatus and isMaster with monotonic session") - err = session.Run("serverStatus", ssresult) - c.Assert(err, IsNil) - err = session.Run("isMaster", imresult) - c.Assert(err, IsNil) - slave := ssresult.Host - c.Assert(imresult.IsMaster, Equals, false, Commentf("%s is not a slave", slave)) - - c.Assert(master, Not(Equals), slave) - - // Kill the master. - s.Stop(master) - - // Session must still be good, since we were talking to a slave. - err = session.Run("serverStatus", ssresult) - c.Assert(err, IsNil) - - c.Assert(ssresult.Host, Equals, slave, - Commentf("Monotonic session moved from %s to %s", slave, ssresult.Host)) - - // If we try to insert something, it'll have to hold until the new - // master is available to move the connection, and work correctly. - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - // Must now be talking to the new master. - err = session.Run("serverStatus", ssresult) - c.Assert(err, IsNil) - err = session.Run("isMaster", imresult) - c.Assert(err, IsNil) - c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) - - // ... which is not the old one, since it's still dead. - c.Assert(ssresult.Host, Not(Equals), master) -} - -func (s *S) TestPrimaryShutdownEventual(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40021") - c.Assert(err, IsNil) - defer session.Close() - - result := &struct{ Host string }{} - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - master := result.Host - - session.SetMode(mgo.Eventual, true) - - // Should connect to the master when needed. - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - // Wait a bit for this to be synchronized to slaves. - time.Sleep(3 * time.Second) - - // Kill the master. - s.Stop(master) - - // Should still work, with the new master now. - coll = session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - c.Assert(result.Host, Not(Equals), master) -} - -func (s *S) TestPreserveSocketCountOnSync(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - stats := mgo.GetStats() - for stats.MasterConns+stats.SlaveConns != 3 { - stats = mgo.GetStats() - c.Log("Waiting for all connections to be established...") - time.Sleep(5e8) - } - - c.Assert(stats.SocketsAlive, Equals, 3) - - // Kill the master (with rs1, 'a' is always the master). - s.Stop("localhost:40011") - - // Wait for the logic to run for a bit and bring it back. - startedAll := make(chan bool) - go func() { - time.Sleep(5e9) - s.StartAll() - startedAll <- true - }() - - // Do not allow the test to return before the goroutine above is done. - defer func() { - <-startedAll - }() - - // Do an action to kick the resync logic in, and also to - // wait until the cluster recognizes the server is back. - result := struct{ Ok bool }{} - err = session.Run("getLastError", &result) - c.Assert(err, IsNil) - c.Assert(result.Ok, Equals, true) - - for i := 0; i != 20; i++ { - stats = mgo.GetStats() - if stats.SocketsAlive == 3 { - break - } - c.Logf("Waiting for 3 sockets alive, have %d", stats.SocketsAlive) - time.Sleep(5e8) - } - - // Ensure the number of sockets is preserved after syncing. - stats = mgo.GetStats() - c.Assert(stats.SocketsAlive, Equals, 3) - c.Assert(stats.SocketsInUse, Equals, 1) - c.Assert(stats.SocketRefs, Equals, 1) -} - -// Connect to the master of a deployment with a single server, -// run an insert, and then ensure the insert worked and that a -// single connection was established. -func (s *S) TestTopologySyncWithSingleMaster(c *C) { - // Use hostname here rather than IP, to make things trickier. - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1, "b": 2}) - c.Assert(err, IsNil) - - // One connection used for discovery. Master socket recycled for - // insert. Socket is reserved after insert. - stats := mgo.GetStats() - c.Assert(stats.MasterConns, Equals, 1) - c.Assert(stats.SlaveConns, Equals, 0) - c.Assert(stats.SocketsInUse, Equals, 1) - - // Refresh session and socket must be released. - session.Refresh() - stats = mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestTopologySyncWithSlaveSeed(c *C) { - // That's supposed to be a slave. Must run discovery - // and find out master to insert successfully. - session, err := mgo.Dial("localhost:40012") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"a": 1, "b": 2}) - - result := struct{ Ok bool }{} - err = session.Run("getLastError", &result) - c.Assert(err, IsNil) - c.Assert(result.Ok, Equals, true) - - // One connection to each during discovery. Master - // socket recycled for insert. - stats := mgo.GetStats() - c.Assert(stats.MasterConns, Equals, 1) - c.Assert(stats.SlaveConns, Equals, 2) - - // Only one socket reference alive, in the master socket owned - // by the above session. - c.Assert(stats.SocketsInUse, Equals, 1) - - // Refresh it, and it must be gone. - session.Refresh() - stats = mgo.GetStats() - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestSyncTimeout(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - s.Stop("localhost:40001") - - timeout := 3 * time.Second - session.SetSyncTimeout(timeout) - started := time.Now() - - // Do something. - result := struct{ Ok bool }{} - err = session.Run("getLastError", &result) - c.Assert(err, ErrorMatches, "no reachable servers") - c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) - c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) -} - -func (s *S) TestDialWithTimeout(c *C) { - if *fast { - c.Skip("-fast") - } - - timeout := 2 * time.Second - started := time.Now() - - // 40009 isn't used by the test servers. - session, err := mgo.DialWithTimeout("localhost:40009", timeout) - if session != nil { - session.Close() - } - c.Assert(err, ErrorMatches, "no reachable servers") - c.Assert(session, IsNil) - c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) - c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) -} - -func (s *S) TestSocketTimeout(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - s.Freeze("localhost:40001") - - timeout := 3 * time.Second - session.SetSocketTimeout(timeout) - started := time.Now() - - // Do something. - result := struct{ Ok bool }{} - err = session.Run("getLastError", &result) - c.Assert(err, ErrorMatches, ".*: i/o timeout") - c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) - c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) -} - -func (s *S) TestSocketTimeoutOnDial(c *C) { - if *fast { - c.Skip("-fast") - } - - timeout := 1 * time.Second - - defer mgo.HackSyncSocketTimeout(timeout)() - - s.Freeze("localhost:40001") - - started := time.Now() - - session, err := mgo.DialWithTimeout("localhost:40001", timeout) - c.Assert(err, ErrorMatches, "no reachable servers") - c.Assert(session, IsNil) - - c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) - c.Assert(started.After(time.Now().Add(-20*time.Second)), Equals, true) -} - -func (s *S) TestSocketTimeoutOnInactiveSocket(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - timeout := 2 * time.Second - session.SetSocketTimeout(timeout) - - // Do something that relies on the timeout and works. - c.Assert(session.Ping(), IsNil) - - // Freeze and wait for the timeout to go by. - s.Freeze("localhost:40001") - time.Sleep(timeout + 500*time.Millisecond) - s.Thaw("localhost:40001") - - // Do something again. The timeout above should not have killed - // the socket as there was nothing to be done. - c.Assert(session.Ping(), IsNil) -} - -func (s *S) TestDirect(c *C) { - session, err := mgo.Dial("localhost:40012?connect=direct") - c.Assert(err, IsNil) - defer session.Close() - - // We know that server is a slave. - session.SetMode(mgo.Monotonic, true) - - result := &struct{ Host string }{} - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true) - - stats := mgo.GetStats() - c.Assert(stats.SocketsAlive, Equals, 1) - c.Assert(stats.SocketsInUse, Equals, 1) - c.Assert(stats.SocketRefs, Equals, 1) - - // We've got no master, so it'll timeout. - session.SetSyncTimeout(5e8 * time.Nanosecond) - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"test": 1}) - c.Assert(err, ErrorMatches, "no reachable servers") - - // Writing to the local database is okay. - coll = session.DB("local").C("mycoll") - defer coll.RemoveAll(nil) - id := bson.NewObjectId() - err = coll.Insert(M{"_id": id}) - c.Assert(err, IsNil) - - // Data was stored in the right server. - n, err := coll.Find(M{"_id": id}).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 1) - - // Server hasn't changed. - result.Host = "" - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true) -} - -func (s *S) TestDirectToUnknownStateMember(c *C) { - session, err := mgo.Dial("localhost:40041?connect=direct") - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Monotonic, true) - - result := &struct{ Host string }{} - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - c.Assert(strings.HasSuffix(result.Host, ":40041"), Equals, true) - - // We've got no master, so it'll timeout. - session.SetSyncTimeout(5e8 * time.Nanosecond) - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"test": 1}) - c.Assert(err, ErrorMatches, "no reachable servers") - - // Slave is still reachable. - result.Host = "" - err = session.Run("serverStatus", result) - c.Assert(err, IsNil) - c.Assert(strings.HasSuffix(result.Host, ":40041"), Equals, true) -} - -func (s *S) TestFailFast(c *C) { - info := mgo.DialInfo{ - Addrs: []string{"localhost:99999"}, - Timeout: 5 * time.Second, - FailFast: true, - } - - started := time.Now() - - _, err := mgo.DialWithInfo(&info) - c.Assert(err, ErrorMatches, "no reachable servers") - - c.Assert(started.After(time.Now().Add(-time.Second)), Equals, true) -} - -type OpCounters struct { - Insert int - Query int - Update int - Delete int - GetMore int - Command int -} - -func getOpCounters(server string) (c *OpCounters, err error) { - session, err := mgo.Dial(server + "?connect=direct") - if err != nil { - return nil, err - } - defer session.Close() - session.SetMode(mgo.Monotonic, true) - result := struct{ OpCounters }{} - err = session.Run("serverStatus", &result) - return &result.OpCounters, err -} - -func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) { - session, err := mgo.Dial("localhost:40021") - c.Assert(err, IsNil) - defer session.Close() - - ssresult := &struct{ Host string }{} - imresult := &struct{ IsMaster bool }{} - - // Figure the master while still using the strong session. - err = session.Run("serverStatus", ssresult) - c.Assert(err, IsNil) - err = session.Run("isMaster", imresult) - c.Assert(err, IsNil) - master := ssresult.Host - c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) - - // Collect op counters for everyone. - opc21a, err := getOpCounters("localhost:40021") - c.Assert(err, IsNil) - opc22a, err := getOpCounters("localhost:40022") - c.Assert(err, IsNil) - opc23a, err := getOpCounters("localhost:40023") - c.Assert(err, IsNil) - - // Do a SlaveOk query through MongoS - - mongos, err := mgo.Dial("localhost:40202") - c.Assert(err, IsNil) - defer mongos.Close() - - mongos.SetMode(mgo.Monotonic, true) - - coll := mongos.DB("mydb").C("mycoll") - result := &struct{}{} - for i := 0; i != 5; i++ { - err := coll.Find(nil).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) - } - - // Collect op counters for everyone again. - opc21b, err := getOpCounters("localhost:40021") - c.Assert(err, IsNil) - opc22b, err := getOpCounters("localhost:40022") - c.Assert(err, IsNil) - opc23b, err := getOpCounters("localhost:40023") - c.Assert(err, IsNil) - - masterPort := master[strings.Index(master, ":")+1:] - - var masterDelta, slaveDelta int - switch masterPort { - case "40021": - masterDelta = opc21b.Query - opc21a.Query - slaveDelta = (opc22b.Query - opc22a.Query) + (opc23b.Query - opc23a.Query) - case "40022": - masterDelta = opc22b.Query - opc22a.Query - slaveDelta = (opc21b.Query - opc21a.Query) + (opc23b.Query - opc23a.Query) - case "40023": - masterDelta = opc23b.Query - opc23a.Query - slaveDelta = (opc21b.Query - opc21a.Query) + (opc22b.Query - opc22a.Query) - default: - c.Fatal("Uh?") - } - - c.Check(masterDelta, Equals, 0) // Just the counting itself. - c.Check(slaveDelta, Equals, 5) // The counting for both, plus 5 queries above. -} - -func (s *S) TestRemovalOfClusterMember(c *C) { - if *fast { - c.Skip("-fast") - } - - master, err := mgo.Dial("localhost:40021") - c.Assert(err, IsNil) - defer master.Close() - - // Wait for cluster to fully sync up. - for i := 0; i < 10; i++ { - if len(master.LiveServers()) == 3 { - break - } - time.Sleep(5e8) - } - if len(master.LiveServers()) != 3 { - c.Fatalf("Test started with bad cluster state: %v", master.LiveServers()) - } - - result := &struct { - IsMaster bool - Me string - }{} - slave := master.Copy() - slave.SetMode(mgo.Monotonic, true) // Monotonic can hold a non-master socket persistently. - err = slave.Run("isMaster", result) - c.Assert(err, IsNil) - c.Assert(result.IsMaster, Equals, false) - slaveAddr := result.Me - - defer func() { - master.Refresh() - master.Run(bson.D{{"$eval", `rs.add("` + slaveAddr + `")`}}, nil) - master.Close() - slave.Close() - }() - - c.Logf("========== Removing slave: %s ==========", slaveAddr) - - master.Run(bson.D{{"$eval", `rs.remove("` + slaveAddr + `")`}}, nil) - err = master.Ping() - c.Assert(err, Equals, io.EOF) - - master.Refresh() - - // Give the cluster a moment to catch up by doing a roundtrip to the master. - err = master.Ping() - c.Assert(err, IsNil) - - time.Sleep(3e9) - - // This must fail since the slave has been taken off the cluster. - err = slave.Ping() - c.Assert(err, NotNil) - - for i := 0; i < 15; i++ { - if len(master.LiveServers()) == 2 { - break - } - time.Sleep(time.Second) - } - live := master.LiveServers() - if len(live) != 2 { - c.Errorf("Removed server still considered live: %#s", live) - } - - c.Log("========== Test succeeded. ==========") -} - -func (s *S) TestSocketLimit(c *C) { - if *fast { - c.Skip("-fast") - } - const socketLimit = 64 - restore := mgo.HackSocketsPerServer(socketLimit) - defer restore() - - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - stats := mgo.GetStats() - for stats.MasterConns+stats.SlaveConns != 3 { - stats = mgo.GetStats() - c.Log("Waiting for all connections to be established...") - time.Sleep(5e8) - } - c.Assert(stats.SocketsAlive, Equals, 3) - - // Consume the whole limit for the master. - var master []*mgo.Session - for i := 0; i < socketLimit; i++ { - s := session.Copy() - defer s.Close() - err := s.Ping() - c.Assert(err, IsNil) - master = append(master, s) - } - - before := time.Now() - go func() { - time.Sleep(3e9) - master[0].Refresh() - }() - - // Now a single ping must block, since it would need another - // connection to the master, over the limit. Once the goroutine - // above releases its socket, it should move on. - session.Ping() - delay := time.Now().Sub(before) - c.Assert(delay > 3e9, Equals, true) - c.Assert(delay < 6e9, Equals, true) -} - -func (s *S) TestSetModeEventualIterBug(c *C) { - session1, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session1.Close() - - session1.SetMode(mgo.Eventual, false) - - coll1 := session1.DB("mydb").C("mycoll") - - const N = 100 - for i := 0; i < N; i++ { - err = coll1.Insert(M{"_id": i}) - c.Assert(err, IsNil) - } - - c.Logf("Waiting until secondary syncs") - for { - n, err := coll1.Count() - c.Assert(err, IsNil) - if n == N { - c.Logf("Found all") - break - } - } - - session2, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session2.Close() - - session2.SetMode(mgo.Eventual, false) - - coll2 := session2.DB("mydb").C("mycoll") - - i := 0 - iter := coll2.Find(nil).Batch(10).Iter() - var result struct{} - for iter.Next(&result) { - i++ - } - c.Assert(iter.Close(), Equals, nil) - c.Assert(i, Equals, N) -} - -func (s *S) TestCustomDialOld(c *C) { - dials := make(chan bool, 16) - dial := func(addr net.Addr) (net.Conn, error) { - tcpaddr, ok := addr.(*net.TCPAddr) - if !ok { - return nil, fmt.Errorf("unexpected address type: %T", addr) - } - dials <- true - return net.DialTCP("tcp", nil, tcpaddr) - } - info := mgo.DialInfo{ - Addrs: []string{"localhost:40012"}, - Dial: dial, - } - - // Use hostname here rather than IP, to make things trickier. - session, err := mgo.DialWithInfo(&info) - c.Assert(err, IsNil) - defer session.Close() - - const N = 3 - for i := 0; i < N; i++ { - select { - case <-dials: - case <-time.After(5 * time.Second): - c.Fatalf("expected %d dials, got %d", N, i) - } - } - select { - case <-dials: - c.Fatalf("got more dials than expected") - case <-time.After(100 * time.Millisecond): - } -} - -func (s *S) TestCustomDialNew(c *C) { - dials := make(chan bool, 16) - dial := func(addr *mgo.ServerAddr) (net.Conn, error) { - dials <- true - if addr.TCPAddr().Port == 40012 { - c.Check(addr.String(), Equals, "localhost:40012") - } - return net.DialTCP("tcp", nil, addr.TCPAddr()) - } - info := mgo.DialInfo{ - Addrs: []string{"localhost:40012"}, - DialServer: dial, - } - - // Use hostname here rather than IP, to make things trickier. - session, err := mgo.DialWithInfo(&info) - c.Assert(err, IsNil) - defer session.Close() - - const N = 3 - for i := 0; i < N; i++ { - select { - case <-dials: - case <-time.After(5 * time.Second): - c.Fatalf("expected %d dials, got %d", N, i) - } - } - select { - case <-dials: - c.Fatalf("got more dials than expected") - case <-time.After(100 * time.Millisecond): - } -} - -func (s *S) TestPrimaryShutdownOnAuthShard(c *C) { - if *fast { - c.Skip("-fast") - } - - // Dial the shard. - session, err := mgo.Dial("localhost:40203") - c.Assert(err, IsNil) - defer session.Close() - - // Login and insert something to make it more realistic. - session.DB("admin").Login("root", "rapadura") - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(bson.M{"n": 1}) - c.Assert(err, IsNil) - - // Dial the replica set to figure the master out. - rs, err := mgo.Dial("root:rapadura@localhost:40031") - c.Assert(err, IsNil) - defer rs.Close() - - // With strong consistency, this will open a socket to the master. - result := &struct{ Host string }{} - err = rs.Run("serverStatus", result) - c.Assert(err, IsNil) - - // Kill the master. - host := result.Host - s.Stop(host) - - // This must fail, since the connection was broken. - err = rs.Run("serverStatus", result) - c.Assert(err, Equals, io.EOF) - - // This won't work because the master just died. - err = coll.Insert(bson.M{"n": 2}) - c.Assert(err, NotNil) - - // Refresh session and wait for re-election. - session.Refresh() - for i := 0; i < 60; i++ { - err = coll.Insert(bson.M{"n": 3}) - if err == nil { - break - } - c.Logf("Waiting for replica set to elect a new master. Last error: %v", err) - time.Sleep(500 * time.Millisecond) - } - c.Assert(err, IsNil) - - count, err := coll.Count() - c.Assert(count > 1, Equals, true) -} - -func (s *S) TestNearestSecondary(c *C) { - defer mgo.HackPingDelay(3 * time.Second)() - - rs1a := "127.0.0.1:40011" - rs1b := "127.0.0.1:40012" - rs1c := "127.0.0.1:40013" - s.Freeze(rs1b) - - session, err := mgo.Dial(rs1a) - c.Assert(err, IsNil) - defer session.Close() - - // Wait for the sync up to run through the first couple of servers. - for len(session.LiveServers()) != 2 { - c.Log("Waiting for two servers to be alive...") - time.Sleep(100 * time.Millisecond) - } - - // Extra delay to ensure the third server gets penalized. - time.Sleep(500 * time.Millisecond) - - // Release third server. - s.Thaw(rs1b) - - // Wait for it to come up. - for len(session.LiveServers()) != 3 { - c.Log("Waiting for all servers to be alive...") - time.Sleep(100 * time.Millisecond) - } - - session.SetMode(mgo.Monotonic, true) - var result struct{ Host string } - - // See which slave picks the line, several times to avoid chance. - for i := 0; i < 10; i++ { - session.Refresh() - err = session.Run("serverStatus", &result) - c.Assert(err, IsNil) - c.Assert(hostPort(result.Host), Equals, hostPort(rs1c)) - } - - if *fast { - // Don't hold back for several seconds. - return - } - - // Now hold the other server for long enough to penalize it. - s.Freeze(rs1c) - time.Sleep(5 * time.Second) - s.Thaw(rs1c) - - // Wait for the ping to be processed. - time.Sleep(500 * time.Millisecond) - - // Repeating the test should now pick the former server consistently. - for i := 0; i < 10; i++ { - session.Refresh() - err = session.Run("serverStatus", &result) - c.Assert(err, IsNil) - c.Assert(hostPort(result.Host), Equals, hostPort(rs1b)) - } -} - -func (s *S) TestConnectCloseConcurrency(c *C) { - restore := mgo.HackPingDelay(500 * time.Millisecond) - defer restore() - var wg sync.WaitGroup - const n = 500 - wg.Add(n) - for i := 0; i < n; i++ { - go func() { - defer wg.Done() - session, err := mgo.Dial("localhost:40001") - if err != nil { - c.Fatal(err) - } - time.Sleep(1) - session.Close() - }() - } - wg.Wait() -} - -func (s *S) TestSelectServers(c *C) { - if !s.versionAtLeast(2, 2) { - c.Skip("read preferences introduced in 2.2") - } - - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - session.SetMode(mgo.Eventual, true) - - var result struct{ Host string } - - session.Refresh() - session.SelectServers(bson.D{{"rs1", "b"}}) - err = session.Run("serverStatus", &result) - c.Assert(err, IsNil) - c.Assert(hostPort(result.Host), Equals, "40012") - - session.Refresh() - session.SelectServers(bson.D{{"rs1", "c"}}) - err = session.Run("serverStatus", &result) - c.Assert(err, IsNil) - c.Assert(hostPort(result.Host), Equals, "40013") -} - -func (s *S) TestSelectServersWithMongos(c *C) { - if !s.versionAtLeast(2, 2) { - c.Skip("read preferences introduced in 2.2") - } - - session, err := mgo.Dial("localhost:40021") - c.Assert(err, IsNil) - defer session.Close() - - ssresult := &struct{ Host string }{} - imresult := &struct{ IsMaster bool }{} - - // Figure the master while still using the strong session. - err = session.Run("serverStatus", ssresult) - c.Assert(err, IsNil) - err = session.Run("isMaster", imresult) - c.Assert(err, IsNil) - master := ssresult.Host - c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) - - var slave1, slave2 string - switch hostPort(master) { - case "40021": - slave1, slave2 = "b", "c" - case "40022": - slave1, slave2 = "a", "c" - case "40023": - slave1, slave2 = "a", "b" - } - - // Collect op counters for everyone. - opc21a, err := getOpCounters("localhost:40021") - c.Assert(err, IsNil) - opc22a, err := getOpCounters("localhost:40022") - c.Assert(err, IsNil) - opc23a, err := getOpCounters("localhost:40023") - c.Assert(err, IsNil) - - // Do a SlaveOk query through MongoS - mongos, err := mgo.Dial("localhost:40202") - c.Assert(err, IsNil) - defer mongos.Close() - - mongos.SetMode(mgo.Monotonic, true) - - mongos.Refresh() - mongos.SelectServers(bson.D{{"rs2", slave1}}) - coll := mongos.DB("mydb").C("mycoll") - result := &struct{}{} - for i := 0; i != 5; i++ { - err := coll.Find(nil).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) - } - - mongos.Refresh() - mongos.SelectServers(bson.D{{"rs2", slave2}}) - coll = mongos.DB("mydb").C("mycoll") - for i := 0; i != 7; i++ { - err := coll.Find(nil).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) - } - - // Collect op counters for everyone again. - opc21b, err := getOpCounters("localhost:40021") - c.Assert(err, IsNil) - opc22b, err := getOpCounters("localhost:40022") - c.Assert(err, IsNil) - opc23b, err := getOpCounters("localhost:40023") - c.Assert(err, IsNil) - - switch hostPort(master) { - case "40021": - c.Check(opc21b.Query-opc21a.Query, Equals, 0) - c.Check(opc22b.Query-opc22a.Query, Equals, 5) - c.Check(opc23b.Query-opc23a.Query, Equals, 7) - case "40022": - c.Check(opc21b.Query-opc21a.Query, Equals, 5) - c.Check(opc22b.Query-opc22a.Query, Equals, 0) - c.Check(opc23b.Query-opc23a.Query, Equals, 7) - case "40023": - c.Check(opc21b.Query-opc21a.Query, Equals, 5) - c.Check(opc22b.Query-opc22a.Query, Equals, 7) - c.Check(opc23b.Query-opc23a.Query, Equals, 0) - default: - c.Fatal("Uh?") - } -} diff --git a/vendor/labix.org/v2/mgo/export_test.go b/vendor/labix.org/v2/mgo/export_test.go deleted file mode 100644 index b6bfcbc73..000000000 --- a/vendor/labix.org/v2/mgo/export_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package mgo - -import ( - "time" -) - -func HackSocketsPerServer(newLimit int) (restore func()) { - oldLimit := newLimit - restore = func() { - socketsPerServer = oldLimit - } - socketsPerServer = newLimit - return -} - -func HackPingDelay(newDelay time.Duration) (restore func()) { - globalMutex.Lock() - defer globalMutex.Unlock() - - oldDelay := pingDelay - restore = func() { - globalMutex.Lock() - pingDelay = oldDelay - globalMutex.Unlock() - } - pingDelay = newDelay - return -} - -func HackSyncSocketTimeout(newTimeout time.Duration) (restore func()) { - globalMutex.Lock() - defer globalMutex.Unlock() - - oldTimeout := syncSocketTimeout - restore = func() { - globalMutex.Lock() - syncSocketTimeout = oldTimeout - globalMutex.Unlock() - } - syncSocketTimeout = newTimeout - return -} diff --git a/vendor/labix.org/v2/mgo/gridfs_test.go b/vendor/labix.org/v2/mgo/gridfs_test.go deleted file mode 100644 index fbdd5b0de..000000000 --- a/vendor/labix.org/v2/mgo/gridfs_test.go +++ /dev/null @@ -1,644 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo_test - -import ( - "io" - "labix.org/v2/mgo" - "labix.org/v2/mgo/bson" - . "launchpad.net/gocheck" - "os" - "time" -) - -func (s *S) TestGridFSCreate(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - before := bson.Now() - - gfs := db.GridFS("fs") - file, err := gfs.Create("") - c.Assert(err, IsNil) - - n, err := file.Write([]byte("some data")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 9) - - err = file.Close() - c.Assert(err, IsNil) - - after := bson.Now() - - // Check the file information. - result := M{} - err = db.C("fs.files").Find(nil).One(result) - c.Assert(err, IsNil) - - fileId, ok := result["_id"].(bson.ObjectId) - c.Assert(ok, Equals, true) - c.Assert(fileId.Valid(), Equals, true) - result["_id"] = "" - - ud, ok := result["uploadDate"].(time.Time) - c.Assert(ok, Equals, true) - c.Assert(ud.After(before) && ud.Before(after), Equals, true) - result["uploadDate"] = "" - - expected := M{ - "_id": "", - "length": 9, - "chunkSize": 262144, - "uploadDate": "", - "md5": "1e50210a0202497fb79bc38b6ade6c34", - } - c.Assert(result, DeepEquals, expected) - - // Check the chunk. - result = M{} - err = db.C("fs.chunks").Find(nil).One(result) - c.Assert(err, IsNil) - - chunkId, ok := result["_id"].(bson.ObjectId) - c.Assert(ok, Equals, true) - c.Assert(chunkId.Valid(), Equals, true) - result["_id"] = "" - - expected = M{ - "_id": "", - "files_id": fileId, - "n": 0, - "data": []byte("some data"), - } - c.Assert(result, DeepEquals, expected) - - // Check that an index was created. - indexes, err := db.C("fs.chunks").Indexes() - c.Assert(err, IsNil) - c.Assert(len(indexes), Equals, 2) - c.Assert(indexes[1].Key, DeepEquals, []string{"files_id", "n"}) -} - -func (s *S) TestGridFSFileDetails(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - - file, err := gfs.Create("myfile1.txt") - c.Assert(err, IsNil) - - n, err := file.Write([]byte("some")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 4) - - c.Assert(file.Size(), Equals, int64(4)) - - n, err = file.Write([]byte(" data")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 5) - - c.Assert(file.Size(), Equals, int64(9)) - - id, _ := file.Id().(bson.ObjectId) - c.Assert(id.Valid(), Equals, true) - c.Assert(file.Name(), Equals, "myfile1.txt") - c.Assert(file.ContentType(), Equals, "") - - var info interface{} - err = file.GetMeta(&info) - c.Assert(err, IsNil) - c.Assert(info, IsNil) - - file.SetId("myid") - file.SetName("myfile2.txt") - file.SetContentType("text/plain") - file.SetMeta(M{"any": "thing"}) - - c.Assert(file.Id(), Equals, "myid") - c.Assert(file.Name(), Equals, "myfile2.txt") - c.Assert(file.ContentType(), Equals, "text/plain") - - err = file.GetMeta(&info) - c.Assert(err, IsNil) - c.Assert(info, DeepEquals, bson.M{"any": "thing"}) - - err = file.Close() - c.Assert(err, IsNil) - - c.Assert(file.MD5(), Equals, "1e50210a0202497fb79bc38b6ade6c34") - - ud := file.UploadDate() - now := time.Now() - c.Assert(ud.Before(now), Equals, true) - c.Assert(ud.After(now.Add(-3*time.Second)), Equals, true) - - result := M{} - err = db.C("fs.files").Find(nil).One(result) - c.Assert(err, IsNil) - - result["uploadDate"] = "" - - expected := M{ - "_id": "myid", - "length": 9, - "chunkSize": 262144, - "uploadDate": "", - "md5": "1e50210a0202497fb79bc38b6ade6c34", - "filename": "myfile2.txt", - "contentType": "text/plain", - "metadata": M{"any": "thing"}, - } - c.Assert(result, DeepEquals, expected) -} - -func (s *S) TestGridFSCreateWithChunking(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - - file, err := gfs.Create("") - c.Assert(err, IsNil) - - file.SetChunkSize(5) - - // Smaller than the chunk size. - n, err := file.Write([]byte("abc")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 3) - - // Boundary in the middle. - n, err = file.Write([]byte("defg")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 4) - - // Boundary at the end. - n, err = file.Write([]byte("hij")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 3) - - // Larger than the chunk size, with 3 chunks. - n, err = file.Write([]byte("klmnopqrstuv")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 12) - - err = file.Close() - c.Assert(err, IsNil) - - // Check the file information. - result := M{} - err = db.C("fs.files").Find(nil).One(result) - c.Assert(err, IsNil) - - fileId, _ := result["_id"].(bson.ObjectId) - c.Assert(fileId.Valid(), Equals, true) - result["_id"] = "" - result["uploadDate"] = "" - - expected := M{ - "_id": "", - "length": 22, - "chunkSize": 5, - "uploadDate": "", - "md5": "44a66044834cbe55040089cabfc102d5", - } - c.Assert(result, DeepEquals, expected) - - // Check the chunks. - iter := db.C("fs.chunks").Find(nil).Sort("n").Iter() - dataChunks := []string{"abcde", "fghij", "klmno", "pqrst", "uv"} - for i := 0; ; i++ { - result = M{} - if !iter.Next(result) { - if i != 5 { - c.Fatalf("Expected 5 chunks, got %d", i) - } - break - } - c.Assert(iter.Close(), IsNil) - - result["_id"] = "" - - expected = M{ - "_id": "", - "files_id": fileId, - "n": i, - "data": []byte(dataChunks[i]), - } - c.Assert(result, DeepEquals, expected) - } -} - -func (s *S) TestGridFSAbort(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - file, err := gfs.Create("") - c.Assert(err, IsNil) - - file.SetChunkSize(5) - - n, err := file.Write([]byte("some data")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 9) - - var count int - for i := 0; i < 10; i++ { - count, err = db.C("fs.chunks").Count() - if count > 0 || err != nil { - break - } - } - c.Assert(err, IsNil) - c.Assert(count, Equals, 1) - - file.Abort() - - err = file.Close() - c.Assert(err, ErrorMatches, "write aborted") - - count, err = db.C("fs.chunks").Count() - c.Assert(err, IsNil) - c.Assert(count, Equals, 0) -} - -func (s *S) TestGridFSOpenNotFound(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - file, err := gfs.OpenId("non-existent") - c.Assert(err == mgo.ErrNotFound, Equals, true) - c.Assert(file, IsNil) - - file, err = gfs.Open("non-existent") - c.Assert(err == mgo.ErrNotFound, Equals, true) - c.Assert(file, IsNil) -} - -func (s *S) TestGridFSReadAll(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - file, err := gfs.Create("") - c.Assert(err, IsNil) - id := file.Id() - - file.SetChunkSize(5) - - n, err := file.Write([]byte("abcdefghijklmnopqrstuv")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 22) - - err = file.Close() - c.Assert(err, IsNil) - - file, err = gfs.OpenId(id) - c.Assert(err, IsNil) - - b := make([]byte, 30) - n, err = file.Read(b) - c.Assert(n, Equals, 22) - c.Assert(err, IsNil) - - n, err = file.Read(b) - c.Assert(n, Equals, 0) - c.Assert(err == io.EOF, Equals, true) - - err = file.Close() - c.Assert(err, IsNil) -} - -func (s *S) TestGridFSReadChunking(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - - file, err := gfs.Create("") - c.Assert(err, IsNil) - - id := file.Id() - - file.SetChunkSize(5) - - n, err := file.Write([]byte("abcdefghijklmnopqrstuv")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 22) - - err = file.Close() - c.Assert(err, IsNil) - - file, err = gfs.OpenId(id) - c.Assert(err, IsNil) - - b := make([]byte, 30) - - // Smaller than the chunk size. - n, err = file.Read(b[:3]) - c.Assert(err, IsNil) - c.Assert(n, Equals, 3) - c.Assert(b[:3], DeepEquals, []byte("abc")) - - // Boundary in the middle. - n, err = file.Read(b[:4]) - c.Assert(err, IsNil) - c.Assert(n, Equals, 4) - c.Assert(b[:4], DeepEquals, []byte("defg")) - - // Boundary at the end. - n, err = file.Read(b[:3]) - c.Assert(err, IsNil) - c.Assert(n, Equals, 3) - c.Assert(b[:3], DeepEquals, []byte("hij")) - - // Larger than the chunk size, with 3 chunks. - n, err = file.Read(b) - c.Assert(err, IsNil) - c.Assert(n, Equals, 12) - c.Assert(b[:12], DeepEquals, []byte("klmnopqrstuv")) - - n, err = file.Read(b) - c.Assert(n, Equals, 0) - c.Assert(err == io.EOF, Equals, true) - - err = file.Close() - c.Assert(err, IsNil) -} - -func (s *S) TestGridFSOpen(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - - file, err := gfs.Create("myfile.txt") - c.Assert(err, IsNil) - file.Write([]byte{'1'}) - file.Close() - - file, err = gfs.Create("myfile.txt") - c.Assert(err, IsNil) - file.Write([]byte{'2'}) - file.Close() - - file, err = gfs.Open("myfile.txt") - c.Assert(err, IsNil) - defer file.Close() - - var b [1]byte - - _, err = file.Read(b[:]) - c.Assert(err, IsNil) - c.Assert(string(b[:]), Equals, "2") -} - -func (s *S) TestGridFSSeek(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - file, err := gfs.Create("") - c.Assert(err, IsNil) - id := file.Id() - - file.SetChunkSize(5) - - n, err := file.Write([]byte("abcdefghijklmnopqrstuv")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 22) - - err = file.Close() - c.Assert(err, IsNil) - - b := make([]byte, 5) - - file, err = gfs.OpenId(id) - c.Assert(err, IsNil) - - o, err := file.Seek(3, os.SEEK_SET) - c.Assert(err, IsNil) - c.Assert(o, Equals, int64(3)) - _, err = file.Read(b) - c.Assert(err, IsNil) - c.Assert(b, DeepEquals, []byte("defgh")) - - o, err = file.Seek(5, os.SEEK_CUR) - c.Assert(err, IsNil) - c.Assert(o, Equals, int64(13)) - _, err = file.Read(b) - c.Assert(err, IsNil) - c.Assert(b, DeepEquals, []byte("nopqr")) - - o, err = file.Seek(-10, os.SEEK_END) - c.Assert(err, IsNil) - c.Assert(o, Equals, int64(12)) - _, err = file.Read(b) - c.Assert(err, IsNil) - c.Assert(b, DeepEquals, []byte("mnopq")) - - o, err = file.Seek(8, os.SEEK_SET) - c.Assert(err, IsNil) - c.Assert(o, Equals, int64(8)) - _, err = file.Read(b) - c.Assert(err, IsNil) - c.Assert(b, DeepEquals, []byte("ijklm")) - - // Trivial seek forward within same chunk. Already - // got the data, shouldn't touch the database. - sent := mgo.GetStats().SentOps - o, err = file.Seek(1, os.SEEK_CUR) - c.Assert(err, IsNil) - c.Assert(o, Equals, int64(14)) - c.Assert(mgo.GetStats().SentOps, Equals, sent) - _, err = file.Read(b) - c.Assert(err, IsNil) - c.Assert(b, DeepEquals, []byte("opqrs")) - - // Try seeking past end of file. - file.Seek(3, os.SEEK_SET) - o, err = file.Seek(23, os.SEEK_SET) - c.Assert(err, ErrorMatches, "seek past end of file") - c.Assert(o, Equals, int64(3)) -} - -func (s *S) TestGridFSRemoveId(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - - file, err := gfs.Create("myfile.txt") - c.Assert(err, IsNil) - file.Write([]byte{'1'}) - file.Close() - - file, err = gfs.Create("myfile.txt") - c.Assert(err, IsNil) - file.Write([]byte{'2'}) - id := file.Id() - file.Close() - - err = gfs.RemoveId(id) - c.Assert(err, IsNil) - - file, err = gfs.Open("myfile.txt") - c.Assert(err, IsNil) - defer file.Close() - - var b [1]byte - - _, err = file.Read(b[:]) - c.Assert(err, IsNil) - c.Assert(string(b[:]), Equals, "1") - - n, err := db.C("fs.chunks").Find(M{"files_id": id}).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 0) -} - -func (s *S) TestGridFSRemove(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - - file, err := gfs.Create("myfile.txt") - c.Assert(err, IsNil) - file.Write([]byte{'1'}) - file.Close() - - file, err = gfs.Create("myfile.txt") - c.Assert(err, IsNil) - file.Write([]byte{'2'}) - file.Close() - - err = gfs.Remove("myfile.txt") - c.Assert(err, IsNil) - - _, err = gfs.Open("myfile.txt") - c.Assert(err == mgo.ErrNotFound, Equals, true) - - n, err := db.C("fs.chunks").Find(nil).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 0) -} - -func (s *S) TestGridFSOpenNext(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("mydb") - - gfs := db.GridFS("fs") - - file, err := gfs.Create("myfile1.txt") - c.Assert(err, IsNil) - file.Write([]byte{'1'}) - file.Close() - - file, err = gfs.Create("myfile2.txt") - c.Assert(err, IsNil) - file.Write([]byte{'2'}) - file.Close() - - var f *mgo.GridFile - var b [1]byte - - iter := gfs.Find(nil).Sort("-filename").Iter() - - ok := gfs.OpenNext(iter, &f) - c.Assert(ok, Equals, true) - c.Check(f.Name(), Equals, "myfile2.txt") - - _, err = f.Read(b[:]) - c.Assert(err, IsNil) - c.Assert(string(b[:]), Equals, "2") - - ok = gfs.OpenNext(iter, &f) - c.Assert(ok, Equals, true) - c.Check(f.Name(), Equals, "myfile1.txt") - - _, err = f.Read(b[:]) - c.Assert(err, IsNil) - c.Assert(string(b[:]), Equals, "1") - - ok = gfs.OpenNext(iter, &f) - c.Assert(ok, Equals, false) - c.Assert(iter.Close(), IsNil) - c.Assert(f, IsNil) - - // Do it again with a more restrictive query to make sure - // it's actually taken into account. - iter = gfs.Find(bson.M{"filename": "myfile1.txt"}).Iter() - - ok = gfs.OpenNext(iter, &f) - c.Assert(ok, Equals, true) - c.Check(f.Name(), Equals, "myfile1.txt") - - ok = gfs.OpenNext(iter, &f) - c.Assert(ok, Equals, false) - c.Assert(iter.Close(), IsNil) - c.Assert(f, IsNil) -} diff --git a/vendor/labix.org/v2/mgo/queue_test.go b/vendor/labix.org/v2/mgo/queue_test.go deleted file mode 100644 index 38b032531..000000000 --- a/vendor/labix.org/v2/mgo/queue_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo - -import ( - "launchpad.net/gocheck" -) - -type QS struct{} - -var _ = gocheck.Suite(&QS{}) - -func (s *QS) TestSequentialGrowth(c *gocheck.C) { - q := queue{} - n := 2048 - for i := 0; i != n; i++ { - q.Push(i) - } - for i := 0; i != n; i++ { - c.Assert(q.Pop(), gocheck.Equals, i) - } -} - -var queueTestLists = [][]int{ - // {0, 1, 2, 3, 4, 5, 6, 7, 8, 9} - {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, - - // {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7} - {0, 1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, 11}, - - // {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7} - {0, 1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11}, - - // {0, 1, 2, 3, 4, 5, 6, 7, 8} - {0, 1, 2, 3, 4, 5, 6, 7, 8, - -1, -1, -1, -1, -1, -1, -1, -1, -1, - 0, 1, 2, 3, 4, 5, 6, 7, 8}, -} - -func (s *QS) TestQueueTestLists(c *gocheck.C) { - test := []int{} - testi := 0 - reset := func() { - test = test[0:0] - testi = 0 - } - push := func(i int) { - test = append(test, i) - } - pop := func() (i int) { - if testi == len(test) { - return -1 - } - i = test[testi] - testi++ - return - } - - for _, list := range queueTestLists { - reset() - q := queue{} - for _, n := range list { - if n == -1 { - c.Assert(q.Pop(), gocheck.Equals, pop(), - gocheck.Commentf("With list %#v", list)) - } else { - q.Push(n) - push(n) - } - } - - for n := pop(); n != -1; n = pop() { - c.Assert(q.Pop(), gocheck.Equals, n, - gocheck.Commentf("With list %#v", list)) - } - - c.Assert(q.Pop(), gocheck.Equals, nil, - gocheck.Commentf("With list %#v", list)) - } -} diff --git a/vendor/labix.org/v2/mgo/session_test.go b/vendor/labix.org/v2/mgo/session_test.go deleted file mode 100644 index e9f90f56b..000000000 --- a/vendor/labix.org/v2/mgo/session_test.go +++ /dev/null @@ -1,3260 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo_test - -import ( - "flag" - "fmt" - "labix.org/v2/mgo" - "labix.org/v2/mgo/bson" - . "launchpad.net/gocheck" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "time" -) - -func (s *S) TestRunString(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - result := struct{ Ok int }{} - err = session.Run("ping", &result) - c.Assert(err, IsNil) - c.Assert(result.Ok, Equals, 1) -} - -func (s *S) TestRunValue(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - result := struct{ Ok int }{} - err = session.Run(M{"ping": 1}, &result) - c.Assert(err, IsNil) - c.Assert(result.Ok, Equals, 1) -} - -func (s *S) TestPing(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - // Just ensure the nonce has been received. - result := struct{}{} - err = session.Run("ping", &result) - - mgo.ResetStats() - - err = session.Ping() - c.Assert(err, IsNil) - - // Pretty boring. - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 1) - c.Assert(stats.ReceivedOps, Equals, 1) -} - -func (s *S) TestURLSingle(c *C) { - session, err := mgo.Dial("mongodb://localhost:40001/") - c.Assert(err, IsNil) - defer session.Close() - - result := struct{ Ok int }{} - err = session.Run("ping", &result) - c.Assert(err, IsNil) - c.Assert(result.Ok, Equals, 1) -} - -func (s *S) TestURLMany(c *C) { - session, err := mgo.Dial("mongodb://localhost:40011,localhost:40012/") - c.Assert(err, IsNil) - defer session.Close() - - result := struct{ Ok int }{} - err = session.Run("ping", &result) - c.Assert(err, IsNil) - c.Assert(result.Ok, Equals, 1) -} - -func (s *S) TestURLParsing(c *C) { - urls := []string{ - "localhost:40001?foo=1&bar=2", - "localhost:40001?foo=1;bar=2", - } - for _, url := range urls { - session, err := mgo.Dial(url) - if session != nil { - session.Close() - } - c.Assert(err, ErrorMatches, "unsupported connection URL option: (foo=1|bar=2)") - } -} - -func (s *S) TestInsertFindOne(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1, "b": 2}) - c.Assert(err, IsNil) - err = coll.Insert(M{"a": 1, "b": 3}) - c.Assert(err, IsNil) - - result := struct{ A, B int }{} - - err = coll.Find(M{"a": 1}).Sort("b").One(&result) - c.Assert(err, IsNil) - c.Assert(result.A, Equals, 1) - c.Assert(result.B, Equals, 2) -} - -func (s *S) TestInsertFindOneNil(c *C) { - session, err := mgo.Dial("localhost:40002") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Find(nil).One(nil) - c.Assert(err, ErrorMatches, "unauthorized.*|not authorized.*") -} - -func (s *S) TestInsertFindOneMap(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1, "b": 2}) - c.Assert(err, IsNil) - result := make(M) - err = coll.Find(M{"a": 1}).One(result) - c.Assert(err, IsNil) - c.Assert(result["a"], Equals, 1) - c.Assert(result["b"], Equals, 2) -} - -func (s *S) TestInsertFindAll(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"a": 1, "b": 2}) - c.Assert(err, IsNil) - err = coll.Insert(M{"a": 3, "b": 4}) - c.Assert(err, IsNil) - - type R struct{ A, B int } - var result []R - - assertResult := func() { - c.Assert(len(result), Equals, 2) - c.Assert(result[0].A, Equals, 1) - c.Assert(result[0].B, Equals, 2) - c.Assert(result[1].A, Equals, 3) - c.Assert(result[1].B, Equals, 4) - } - - // nil slice - err = coll.Find(nil).Sort("a").All(&result) - c.Assert(err, IsNil) - assertResult() - - // Previously allocated slice - allocd := make([]R, 5) - result = allocd - err = coll.Find(nil).Sort("a").All(&result) - c.Assert(err, IsNil) - assertResult() - - // Ensure result is backed by the originally allocated array - c.Assert(&result[0], Equals, &allocd[0]) - - // Non-pointer slice error - f := func() { coll.Find(nil).All(result) } - c.Assert(f, Panics, "result argument must be a slice address") - - // Non-slice error - f = func() { coll.Find(nil).All(new(int)) } - c.Assert(f, Panics, "result argument must be a slice address") -} - -func (s *S) TestFindRef(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - db1 := session.DB("db1") - db1col1 := db1.C("col1") - - db2 := session.DB("db2") - db2col1 := db2.C("col1") - - err = db1col1.Insert(M{"_id": 1, "n": 1}) - c.Assert(err, IsNil) - err = db1col1.Insert(M{"_id": 2, "n": 2}) - c.Assert(err, IsNil) - err = db2col1.Insert(M{"_id": 2, "n": 3}) - c.Assert(err, IsNil) - - result := struct{ N int }{} - - ref1 := &mgo.DBRef{Collection: "col1", Id: 1} - ref2 := &mgo.DBRef{Collection: "col1", Id: 2, Database: "db2"} - - err = db1.FindRef(ref1).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 1) - - err = db1.FindRef(ref2).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 3) - - err = db2.FindRef(ref1).One(&result) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = db2.FindRef(ref2).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 3) - - err = session.FindRef(ref2).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 3) - - f := func() { session.FindRef(ref1).One(&result) } - c.Assert(f, PanicMatches, "Can't resolve database for &mgo.DBRef{Collection:\"col1\", Id:1, Database:\"\"}") -} - -func (s *S) TestDatabaseAndCollectionNames(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - db1 := session.DB("db1") - db1col1 := db1.C("col1") - db1col2 := db1.C("col2") - - db2 := session.DB("db2") - db2col1 := db2.C("col3") - - err = db1col1.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - err = db1col2.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - err = db2col1.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - - names, err := session.DatabaseNames() - c.Assert(err, IsNil) - if !reflect.DeepEqual(names, []string{"db1", "db2"}) { - // 2.4+ has "local" as well. - c.Assert(names, DeepEquals, []string{"db1", "db2", "local"}) - } - - names, err = db1.CollectionNames() - c.Assert(err, IsNil) - c.Assert(names, DeepEquals, []string{"col1", "col2", "system.indexes"}) - - names, err = db2.CollectionNames() - c.Assert(err, IsNil) - c.Assert(names, DeepEquals, []string{"col3", "system.indexes"}) -} - -func (s *S) TestSelect(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"a": 1, "b": 2}) - - result := struct{ A, B int }{} - - err = coll.Find(M{"a": 1}).Select(M{"b": 1}).One(&result) - c.Assert(err, IsNil) - c.Assert(result.A, Equals, 0) - c.Assert(result.B, Equals, 2) -} - -func (s *S) TestInlineMap(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - var v, result1 struct { - A int - M map[string]int ",inline" - } - - v.A = 1 - v.M = map[string]int{"b": 2} - err = coll.Insert(v) - c.Assert(err, IsNil) - - noId := M{"_id": 0} - - err = coll.Find(nil).Select(noId).One(&result1) - c.Assert(err, IsNil) - c.Assert(result1.A, Equals, 1) - c.Assert(result1.M, DeepEquals, map[string]int{"b": 2}) - - var result2 M - err = coll.Find(nil).Select(noId).One(&result2) - c.Assert(err, IsNil) - c.Assert(result2, DeepEquals, M{"a": 1, "b": 2}) - -} - -func (s *S) TestUpdate(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll.Insert(M{"k": n, "n": n}) - c.Assert(err, IsNil) - } - - err = coll.Update(M{"k": 42}, M{"$inc": M{"n": 1}}) - c.Assert(err, IsNil) - - result := make(M) - err = coll.Find(M{"k": 42}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 43) - - err = coll.Update(M{"k": 47}, M{"k": 47, "n": 47}) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = coll.Find(M{"k": 47}).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) -} - -func (s *S) TestUpdateId(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll.Insert(M{"_id": n, "n": n}) - c.Assert(err, IsNil) - } - - err = coll.UpdateId(42, M{"$inc": M{"n": 1}}) - c.Assert(err, IsNil) - - result := make(M) - err = coll.FindId(42).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 43) - - err = coll.UpdateId(47, M{"k": 47, "n": 47}) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = coll.FindId(47).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) -} - -func (s *S) TestUpdateNil(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.Insert(M{"k": 42, "n": 42}) - c.Assert(err, IsNil) - err = coll.Update(nil, M{"$inc": M{"n": 1}}) - c.Assert(err, IsNil) - - result := make(M) - err = coll.Find(M{"k": 42}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 43) - - err = coll.Insert(M{"k": 45, "n": 45}) - c.Assert(err, IsNil) - _, err = coll.UpdateAll(nil, M{"$inc": M{"n": 1}}) - c.Assert(err, IsNil) - - err = coll.Find(M{"k": 42}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 44) - err = coll.Find(M{"k": 45}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 46) - -} - -func (s *S) TestUpsert(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll.Insert(M{"k": n, "n": n}) - c.Assert(err, IsNil) - } - - info, err := coll.Upsert(M{"k": 42}, M{"k": 42, "n": 24}) - c.Assert(err, IsNil) - c.Assert(info.Updated, Equals, 1) - c.Assert(info.UpsertedId, IsNil) - - result := M{} - err = coll.Find(M{"k": 42}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 24) - - // Insert with internally created id. - info, err = coll.Upsert(M{"k": 47}, M{"k": 47, "n": 47}) - c.Assert(err, IsNil) - c.Assert(info.Updated, Equals, 0) - c.Assert(info.UpsertedId, NotNil) - - err = coll.Find(M{"k": 47}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 47) - - result = M{} - err = coll.Find(M{"_id": info.UpsertedId}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 47) - - // Insert with provided id. - info, err = coll.Upsert(M{"k": 48}, M{"k": 48, "n": 48, "_id": 48}) - c.Assert(err, IsNil) - c.Assert(info.Updated, Equals, 0) - if s.versionAtLeast(2, 6) { - c.Assert(info.UpsertedId, Equals, 48) - } else { - c.Assert(info.UpsertedId, IsNil) // Unfortunate, but that's what Mongo gave us. - } - - err = coll.Find(M{"k": 48}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 48) -} - -func (s *S) TestUpsertId(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll.Insert(M{"_id": n, "n": n}) - c.Assert(err, IsNil) - } - - info, err := coll.UpsertId(42, M{"n": 24}) - c.Assert(err, IsNil) - c.Assert(info.Updated, Equals, 1) - c.Assert(info.UpsertedId, IsNil) - - result := M{} - err = coll.FindId(42).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 24) - - info, err = coll.UpsertId(47, M{"_id": 47, "n": 47}) - c.Assert(err, IsNil) - c.Assert(info.Updated, Equals, 0) - if s.versionAtLeast(2, 6) { - c.Assert(info.UpsertedId, Equals, 47) - } else { - c.Assert(info.UpsertedId, IsNil) - } - - err = coll.FindId(47).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 47) -} - -func (s *S) TestUpdateAll(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll.Insert(M{"k": n, "n": n}) - c.Assert(err, IsNil) - } - - info, err := coll.UpdateAll(M{"k": M{"$gt": 42}}, M{"$inc": M{"n": 1}}) - c.Assert(err, IsNil) - c.Assert(info.Updated, Equals, 4) - - result := make(M) - err = coll.Find(M{"k": 42}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 42) - - err = coll.Find(M{"k": 43}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 44) - - err = coll.Find(M{"k": 44}).One(result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 45) - - if !s.versionAtLeast(2, 6) { - // 2.6 made this invalid. - info, err = coll.UpdateAll(M{"k": 47}, M{"k": 47, "n": 47}) - c.Assert(err, Equals, nil) - c.Assert(info.Updated, Equals, 0) - } -} - -func (s *S) TestRemove(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - err = coll.Remove(M{"n": M{"$gt": 42}}) - c.Assert(err, IsNil) - - result := &struct{ N int }{} - err = coll.Find(M{"n": 42}).One(result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 42) - - err = coll.Find(M{"n": 43}).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = coll.Find(M{"n": 44}).One(result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 44) -} - -func (s *S) TestRemoveId(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.Insert(M{"_id": 40}, M{"_id": 41}, M{"_id": 42}) - c.Assert(err, IsNil) - - err = coll.RemoveId(41) - c.Assert(err, IsNil) - - c.Assert(coll.FindId(40).One(nil), IsNil) - c.Assert(coll.FindId(41).One(nil), Equals, mgo.ErrNotFound) - c.Assert(coll.FindId(42).One(nil), IsNil) -} - -func (s *S) TestRemoveAll(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - info, err := coll.RemoveAll(M{"n": M{"$gt": 42}}) - c.Assert(err, IsNil) - c.Assert(info.Updated, Equals, 0) - c.Assert(info.Removed, Equals, 4) - c.Assert(info.UpsertedId, IsNil) - - result := &struct{ N int }{} - err = coll.Find(M{"n": 42}).One(result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 42) - - err = coll.Find(M{"n": 43}).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = coll.Find(M{"n": 44}).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) -} - -func (s *S) TestDropDatabase(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - db1 := session.DB("db1") - db1.C("col").Insert(M{"_id": 1}) - - db2 := session.DB("db2") - db2.C("col").Insert(M{"_id": 1}) - - err = db1.DropDatabase() - c.Assert(err, IsNil) - - names, err := session.DatabaseNames() - c.Assert(err, IsNil) - if !reflect.DeepEqual(names, []string{"db2"}) { - // 2.4+ has "local" as well. - c.Assert(names, DeepEquals, []string{"db2", "local"}) - } - - err = db2.DropDatabase() - c.Assert(err, IsNil) - - names, err = session.DatabaseNames() - c.Assert(err, IsNil) - if !reflect.DeepEqual(names, []string(nil)) { - // 2.4+ has "local" as well. - c.Assert(names, DeepEquals, []string{"local"}) - } -} - -func (s *S) TestDropCollection(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("db1") - db.C("col1").Insert(M{"_id": 1}) - db.C("col2").Insert(M{"_id": 1}) - - err = db.C("col1").DropCollection() - c.Assert(err, IsNil) - - names, err := db.CollectionNames() - c.Assert(err, IsNil) - c.Assert(names, DeepEquals, []string{"col2", "system.indexes"}) - - err = db.C("col2").DropCollection() - c.Assert(err, IsNil) - - names, err = db.CollectionNames() - c.Assert(err, IsNil) - c.Assert(names, DeepEquals, []string{"system.indexes"}) -} - -func (s *S) TestCreateCollectionCapped(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - info := &mgo.CollectionInfo{ - Capped: true, - MaxBytes: 1024, - MaxDocs: 3, - } - err = coll.Create(info) - c.Assert(err, IsNil) - - ns := []int{1, 2, 3, 4, 5} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - n, err := coll.Find(nil).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 3) -} - -func (s *S) TestCreateCollectionNoIndex(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - info := &mgo.CollectionInfo{ - DisableIdIndex: true, - } - err = coll.Create(info) - c.Assert(err, IsNil) - - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) - - indexes, err := coll.Indexes() - c.Assert(indexes, HasLen, 0) -} - -func (s *S) TestCreateCollectionForceIndex(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - info := &mgo.CollectionInfo{ - ForceIdIndex: true, - Capped: true, - MaxBytes: 1024, - } - err = coll.Create(info) - c.Assert(err, IsNil) - - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) - - indexes, err := coll.Indexes() - c.Assert(indexes, HasLen, 1) -} - -func (s *S) TestIsDupValues(c *C) { - c.Assert(mgo.IsDup(nil), Equals, false) - c.Assert(mgo.IsDup(&mgo.LastError{Code: 1}), Equals, false) - c.Assert(mgo.IsDup(&mgo.QueryError{Code: 1}), Equals, false) - c.Assert(mgo.IsDup(&mgo.LastError{Code: 11000}), Equals, true) - c.Assert(mgo.IsDup(&mgo.QueryError{Code: 11000}), Equals, true) - c.Assert(mgo.IsDup(&mgo.LastError{Code: 11001}), Equals, true) - c.Assert(mgo.IsDup(&mgo.QueryError{Code: 11001}), Equals, true) - c.Assert(mgo.IsDup(&mgo.LastError{Code: 12582}), Equals, true) - c.Assert(mgo.IsDup(&mgo.QueryError{Code: 12582}), Equals, true) - lerr := &mgo.LastError{Code: 16460, Err:"error inserting 1 documents to shard ... caused by :: E11000 duplicate key error index: ..."} - c.Assert(mgo.IsDup(lerr), Equals, true) -} - -func (s *S) TestIsDupPrimary(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - err = coll.Insert(M{"_id": 1}) - c.Assert(err, ErrorMatches, ".*duplicate key error.*") - c.Assert(mgo.IsDup(err), Equals, true) -} - -func (s *S) TestIsDupUnique(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - index := mgo.Index{ - Key: []string{"a", "b"}, - Unique: true, - } - - coll := session.DB("mydb").C("mycoll") - - err = coll.EnsureIndex(index) - c.Assert(err, IsNil) - - err = coll.Insert(M{"a": 1, "b": 1}) - c.Assert(err, IsNil) - err = coll.Insert(M{"a": 1, "b": 1}) - c.Assert(err, ErrorMatches, ".*duplicate key error.*") - c.Assert(mgo.IsDup(err), Equals, true) -} - -func (s *S) TestIsDupCapped(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - info := &mgo.CollectionInfo{ - ForceIdIndex: true, - Capped: true, - MaxBytes: 1024, - } - err = coll.Create(info) - c.Assert(err, IsNil) - - err = coll.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - err = coll.Insert(M{"_id": 1}) - // The error was different for capped collections before 2.6. - c.Assert(err, ErrorMatches, ".*duplicate key.*") - // The issue is reduced by using IsDup. - c.Assert(mgo.IsDup(err), Equals, true) -} - -func (s *S) TestIsDupFindAndModify(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.EnsureIndex(mgo.Index{Key: []string{"n"}, Unique: true}) - c.Assert(err, IsNil) - - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) - err = coll.Insert(M{"n": 2}) - c.Assert(err, IsNil) - _, err = coll.Find(M{"n": 1}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}}, bson.M{}) - c.Assert(err, ErrorMatches, ".*duplicate key error.*") - c.Assert(mgo.IsDup(err), Equals, true) -} - -func (s *S) TestFindAndModify(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.Insert(M{"n": 42}) - - session.SetMode(mgo.Monotonic, true) - - result := M{} - info, err := coll.Find(M{"n": 42}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}}, result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 42) - c.Assert(info.Updated, Equals, 1) - c.Assert(info.Removed, Equals, 0) - c.Assert(info.UpsertedId, IsNil) - - result = M{} - info, err = coll.Find(M{"n": 43}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}, ReturnNew: true}, result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 44) - c.Assert(info.Updated, Equals, 1) - c.Assert(info.Removed, Equals, 0) - c.Assert(info.UpsertedId, IsNil) - - result = M{} - info, err = coll.Find(M{"n": 50}).Apply(mgo.Change{Upsert: true, Update: M{"n": 51, "o": 52}}, result) - c.Assert(err, IsNil) - c.Assert(result["n"], IsNil) - c.Assert(info.Updated, Equals, 0) - c.Assert(info.Removed, Equals, 0) - c.Assert(info.UpsertedId, NotNil) - - result = M{} - info, err = coll.Find(nil).Sort("-n").Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}, ReturnNew: true}, result) - c.Assert(err, IsNil) - c.Assert(result["n"], Equals, 52) - c.Assert(info.Updated, Equals, 1) - c.Assert(info.Removed, Equals, 0) - c.Assert(info.UpsertedId, IsNil) - - result = M{} - info, err = coll.Find(M{"n": 52}).Select(M{"o": 1}).Apply(mgo.Change{Remove: true}, result) - c.Assert(err, IsNil) - c.Assert(result["n"], IsNil) - c.Assert(result["o"], Equals, 52) - c.Assert(info.Updated, Equals, 0) - c.Assert(info.Removed, Equals, 1) - c.Assert(info.UpsertedId, IsNil) - - result = M{} - info, err = coll.Find(M{"n": 60}).Apply(mgo.Change{Remove: true}, result) - c.Assert(err, Equals, mgo.ErrNotFound) - c.Assert(len(result), Equals, 0) - c.Assert(info, IsNil) -} - -func (s *S) TestFindAndModifyBug997828(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.Insert(M{"n": "not-a-number"}) - - result := make(M) - _, err = coll.Find(M{"n": "not-a-number"}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}}, result) - c.Assert(err, ErrorMatches, `(exception: )?Cannot apply \$inc .*`) - if s.versionAtLeast(2, 1) { - qerr, _ := err.(*mgo.QueryError) - c.Assert(qerr, NotNil, Commentf("err: %#v", err)) - if s.versionAtLeast(2, 6) { - // Oh, the dance of error codes. :-( - c.Assert(qerr.Code, Equals, 16837) - } else { - c.Assert(qerr.Code, Equals, 10140) - } - } else { - lerr, _ := err.(*mgo.LastError) - c.Assert(lerr, NotNil, Commentf("err: %#v", err)) - c.Assert(lerr.Code, Equals, 10140) - } -} - -func (s *S) TestCountCollection(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - n, err := coll.Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 3) -} - -func (s *S) TestCountQuery(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - n, err := coll.Find(M{"n": M{"$gt": 40}}).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 2) -} - -func (s *S) TestCountQuerySorted(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - n, err := coll.Find(M{"n": M{"$gt": 40}}).Sort("n").Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 2) -} - -func (s *S) TestCountSkipLimit(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - n, err := coll.Find(nil).Skip(1).Limit(3).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 3) - - n, err = coll.Find(nil).Skip(1).Limit(5).Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, 4) -} - -func (s *S) TestQueryExplain(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - m := M{} - query := coll.Find(nil).Limit(2) - err = query.Explain(m) - c.Assert(err, IsNil) - c.Assert(m["cursor"], Equals, "BasicCursor") - c.Assert(m["nscanned"], Equals, 2) - c.Assert(m["n"], Equals, 2) - - n := 0 - var result M - iter := query.Iter() - for iter.Next(&result) { - n++ - } - c.Assert(iter.Close(), IsNil) - c.Assert(n, Equals, 2) -} - -func (s *S) TestQueryHint(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - coll.EnsureIndexKey("a") - - m := M{} - err = coll.Find(nil).Hint("a").Explain(m) - c.Assert(err, IsNil) - c.Assert(m["indexBounds"], NotNil) - c.Assert(m["indexBounds"].(M)["a"], NotNil) -} - -func (s *S) TestFindOneNotFound(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - result := struct{ A, B int }{} - err = coll.Find(M{"a": 1}).One(&result) - c.Assert(err, Equals, mgo.ErrNotFound) - c.Assert(err, ErrorMatches, "not found") - c.Assert(err == mgo.ErrNotFound, Equals, true) -} - -func (s *S) TestFindNil(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 1}) - c.Assert(err, IsNil) - - result := struct{ N int }{} - - err = coll.Find(nil).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 1) -} - -func (s *S) TestFindId(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"_id": 41, "n": 41}) - c.Assert(err, IsNil) - err = coll.Insert(M{"_id": 42, "n": 42}) - c.Assert(err, IsNil) - - result := struct{ N int }{} - - err = coll.FindId(42).One(&result) - c.Assert(err, IsNil) - c.Assert(result.N, Equals, 42) -} - -func (s *S) TestFindIterAll(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - session.Refresh() // Release socket. - - mgo.ResetStats() - - iter := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2).Iter() - result := struct{ N int }{} - for i := 2; i < 7; i++ { - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.N, Equals, ns[i]) - if i == 1 { - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) - } - } - - ok := iter.Next(&result) - c.Assert(ok, Equals, false) - c.Assert(iter.Close(), IsNil) - - session.Refresh() // Release socket. - - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP - c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs. - c.Assert(stats.ReceivedDocs, Equals, 5) - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestFindIterTwiceWithSameQuery(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for i := 40; i != 47; i++ { - coll.Insert(M{"n": i}) - } - - query := coll.Find(M{}).Sort("n") - - result1 := query.Skip(1).Iter() - result2 := query.Skip(2).Iter() - - result := struct{ N int }{} - ok := result2.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.N, Equals, 42) - ok = result1.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.N, Equals, 41) -} - -func (s *S) TestFindIterWithoutResults(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"n": 42}) - - iter := coll.Find(M{"n": 0}).Iter() - - result := struct{ N int }{} - ok := iter.Next(&result) - c.Assert(ok, Equals, false) - c.Assert(iter.Close(), IsNil) - c.Assert(result.N, Equals, 0) -} - -func (s *S) TestFindIterLimit(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - session.Refresh() // Release socket. - - mgo.ResetStats() - - query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Limit(3) - iter := query.Iter() - - result := struct{ N int }{} - for i := 2; i < 5; i++ { - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.N, Equals, ns[i]) - } - - ok := iter.Next(&result) - c.Assert(ok, Equals, false) - c.Assert(iter.Close(), IsNil) - - session.Refresh() // Release socket. - - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 2) // 1*QUERY_OP + 1*KILL_CURSORS_OP - c.Assert(stats.ReceivedOps, Equals, 1) // and its REPLY_OP - c.Assert(stats.ReceivedDocs, Equals, 3) - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestTooManyItemsLimitBug(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU())) - - mgo.SetDebug(false) - coll := session.DB("mydb").C("mycoll") - words := strings.Split("foo bar baz", " ") - for i := 0; i < 5; i++ { - words = append(words, words...) - } - doc := bson.D{{"words", words}} - inserts := 10000 - limit := 5000 - iters := 0 - c.Assert(inserts > limit, Equals, true) - for i := 0; i < inserts; i++ { - err := coll.Insert(&doc) - c.Assert(err, IsNil) - } - iter := coll.Find(nil).Limit(limit).Iter() - for iter.Next(&doc) { - if iters%100 == 0 { - c.Logf("Seen %d docments", iters) - } - iters++ - } - c.Assert(iter.Close(), IsNil) - c.Assert(iters, Equals, limit) -} - -func serverCursorsOpen(session *mgo.Session) int { - var result struct { - Cursors struct { - TotalOpen int `bson:"totalOpen"` - TimedOut int `bson:"timedOut"` - } - } - err := session.Run("serverStatus", &result) - if err != nil { - panic(err) - } - return result.Cursors.TotalOpen -} - -func (s *S) TestFindIterLimitWithMore(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - // Insane amounts of logging otherwise due to the - // amount of data being shuffled. - mgo.SetDebug(false) - defer mgo.SetDebug(true) - - // Should amount to more than 4MB bson payload, - // the default limit per result chunk. - const total = 4096 - var d struct{ A [1024]byte } - docs := make([]interface{}, total) - for i := 0; i < total; i++ { - docs[i] = &d - } - err = coll.Insert(docs...) - c.Assert(err, IsNil) - - n, err := coll.Count() - c.Assert(err, IsNil) - c.Assert(n, Equals, total) - - // First, try restricting to a single chunk with a negative limit. - nresults := 0 - iter := coll.Find(nil).Limit(-total).Iter() - var discard struct{} - for iter.Next(&discard) { - nresults++ - } - if nresults < total/2 || nresults >= total { - c.Fatalf("Bad result size with negative limit: %d", nresults) - } - - cursorsOpen := serverCursorsOpen(session) - - // Try again, with a positive limit. Should reach the end now, - // using multiple chunks. - nresults = 0 - iter = coll.Find(nil).Limit(total).Iter() - for iter.Next(&discard) { - nresults++ - } - c.Assert(nresults, Equals, total) - - // Ensure the cursor used is properly killed. - c.Assert(serverCursorsOpen(session), Equals, cursorsOpen) - - // Edge case, -MinInt == -MinInt. - nresults = 0 - iter = coll.Find(nil).Limit(math.MinInt32).Iter() - for iter.Next(&discard) { - nresults++ - } - if nresults < total/2 || nresults >= total { - c.Fatalf("Bad result size with MinInt32 limit: %d", nresults) - } -} - -func (s *S) TestFindIterLimitWithBatch(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - // Ping the database to ensure the nonce has been received already. - c.Assert(session.Ping(), IsNil) - - session.Refresh() // Release socket. - - mgo.ResetStats() - - query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Limit(3).Batch(2) - iter := query.Iter() - result := struct{ N int }{} - for i := 2; i < 5; i++ { - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.N, Equals, ns[i]) - if i == 3 { - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) - } - } - - ok := iter.Next(&result) - c.Assert(ok, Equals, false) - c.Assert(iter.Close(), IsNil) - - session.Refresh() // Release socket. - - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 1*GET_MORE_OP + 1*KILL_CURSORS_OP - c.Assert(stats.ReceivedOps, Equals, 2) // and its REPLY_OPs - c.Assert(stats.ReceivedDocs, Equals, 3) - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestFindIterSortWithBatch(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - // Without this, the logic above breaks because Mongo refuses to - // return a cursor with an in-memory sort. - coll.EnsureIndexKey("n") - - // Ping the database to ensure the nonce has been received already. - c.Assert(session.Ping(), IsNil) - - session.Refresh() // Release socket. - - mgo.ResetStats() - - query := coll.Find(M{"n": M{"$lte": 44}}).Sort("-n").Batch(2) - iter := query.Iter() - ns = []int{46, 45, 44, 43, 42, 41, 40} - result := struct{ N int }{} - for i := 2; i < len(ns); i++ { - c.Logf("i=%d", i) - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.N, Equals, ns[i]) - if i == 3 { - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) - } - } - - ok := iter.Next(&result) - c.Assert(ok, Equals, false) - c.Assert(iter.Close(), IsNil) - - session.Refresh() // Release socket. - - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP - c.Assert(stats.ReceivedOps, Equals, 3) // and its REPLY_OPs - c.Assert(stats.ReceivedDocs, Equals, 5) - c.Assert(stats.SocketsInUse, Equals, 0) -} - -// Test tailable cursors in a situation where Next has to sleep to -// respect the timeout requested on Tail. -func (s *S) TestFindTailTimeoutWithSleep(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - cresult := struct{ ErrMsg string }{} - - db := session.DB("mydb") - err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) - c.Assert(err, IsNil) - c.Assert(cresult.ErrMsg, Equals, "") - coll := db.C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - session.Refresh() // Release socket. - - mgo.ResetStats() - - timeout := 3 * time.Second - - query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) - iter := query.Tail(timeout) - - n := len(ns) - result := struct{ N int }{} - for i := 2; i != n; i++ { - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(iter.Err(), IsNil) - c.Assert(iter.Timeout(), Equals, false) - c.Assert(result.N, Equals, ns[i]) - if i == 3 { // The batch boundary. - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) - } - } - - mgo.ResetStats() - - // The following call to Next will block. - go func() { - // The internal AwaitData timing of MongoDB is around 2 seconds, - // so this should force mgo to sleep at least once by itself to - // respect the requested timeout. - time.Sleep(timeout + 5e8*time.Nanosecond) - session := session.New() - defer session.Close() - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"n": 47}) - }() - - c.Log("Will wait for Next with N=47...") - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(iter.Err(), IsNil) - c.Assert(iter.Timeout(), Equals, false) - c.Assert(result.N, Equals, 47) - c.Log("Got Next with N=47!") - - // The following may break because it depends a bit on the internal - // timing used by MongoDB's AwaitData logic. If it does, the problem - // will be observed as more GET_MORE_OPs than predicted: - // 1*QUERY for nonce + 1*GET_MORE_OP on Next + 1*GET_MORE_OP on Next after sleep + - // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 5) - c.Assert(stats.ReceivedOps, Equals, 4) // REPLY_OPs for 1*QUERY_OP for nonce + 2*GET_MORE_OPs + 1*QUERY_OP - c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response - - c.Log("Will wait for a result which will never come...") - - started := time.Now() - ok = iter.Next(&result) - c.Assert(ok, Equals, false) - c.Assert(iter.Err(), IsNil) - c.Assert(iter.Timeout(), Equals, true) - c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) - - c.Log("Will now reuse the timed out tail cursor...") - - coll.Insert(M{"n": 48}) - ok = iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(iter.Close(), IsNil) - c.Assert(iter.Timeout(), Equals, false) - c.Assert(result.N, Equals, 48) -} - -// Test tailable cursors in a situation where Next never gets to sleep once -// to respect the timeout requested on Tail. -func (s *S) TestFindTailTimeoutNoSleep(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - cresult := struct{ ErrMsg string }{} - - db := session.DB("mydb") - err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) - c.Assert(err, IsNil) - c.Assert(cresult.ErrMsg, Equals, "") - coll := db.C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - session.Refresh() // Release socket. - - mgo.ResetStats() - - timeout := 1 * time.Second - - query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) - iter := query.Tail(timeout) - - n := len(ns) - result := struct{ N int }{} - for i := 2; i != n; i++ { - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(iter.Err(), IsNil) - c.Assert(iter.Timeout(), Equals, false) - c.Assert(result.N, Equals, ns[i]) - if i == 3 { // The batch boundary. - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) - } - } - - mgo.ResetStats() - - // The following call to Next will block. - go func() { - // The internal AwaitData timing of MongoDB is around 2 seconds, - // so this item should arrive within the AwaitData threshold. - time.Sleep(5e8) - session := session.New() - defer session.Close() - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"n": 47}) - }() - - c.Log("Will wait for Next with N=47...") - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(iter.Err(), IsNil) - c.Assert(iter.Timeout(), Equals, false) - c.Assert(result.N, Equals, 47) - c.Log("Got Next with N=47!") - - // The following may break because it depends a bit on the internal - // timing used by MongoDB's AwaitData logic. If it does, the problem - // will be observed as more GET_MORE_OPs than predicted: - // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + - // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 4) - c.Assert(stats.ReceivedOps, Equals, 3) // REPLY_OPs for 1*QUERY_OP for nonce + 1*GET_MORE_OPs and 1*QUERY_OP - c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response - - c.Log("Will wait for a result which will never come...") - - started := time.Now() - ok = iter.Next(&result) - c.Assert(ok, Equals, false) - c.Assert(iter.Err(), IsNil) - c.Assert(iter.Timeout(), Equals, true) - c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) - - c.Log("Will now reuse the timed out tail cursor...") - - coll.Insert(M{"n": 48}) - ok = iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(iter.Close(), IsNil) - c.Assert(iter.Timeout(), Equals, false) - c.Assert(result.N, Equals, 48) -} - -// Test tailable cursors in a situation where Next never gets to sleep once -// to respect the timeout requested on Tail. -func (s *S) TestFindTailNoTimeout(c *C) { - if *fast { - c.Skip("-fast") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - cresult := struct{ ErrMsg string }{} - - db := session.DB("mydb") - err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) - c.Assert(err, IsNil) - c.Assert(cresult.ErrMsg, Equals, "") - coll := db.C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - session.Refresh() // Release socket. - - mgo.ResetStats() - - query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) - iter := query.Tail(-1) - c.Assert(err, IsNil) - - n := len(ns) - result := struct{ N int }{} - for i := 2; i != n; i++ { - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.N, Equals, ns[i]) - if i == 3 { // The batch boundary. - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) - } - } - - mgo.ResetStats() - - // The following call to Next will block. - go func() { - time.Sleep(5e8) - session := session.New() - defer session.Close() - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"n": 47}) - }() - - c.Log("Will wait for Next with N=47...") - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(iter.Err(), IsNil) - c.Assert(iter.Timeout(), Equals, false) - c.Assert(result.N, Equals, 47) - c.Log("Got Next with N=47!") - - // The following may break because it depends a bit on the internal - // timing used by MongoDB's AwaitData logic. If it does, the problem - // will be observed as more GET_MORE_OPs than predicted: - // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + - // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 4) - c.Assert(stats.ReceivedOps, Equals, 3) // REPLY_OPs for 1*QUERY_OP for nonce + 1*GET_MORE_OPs and 1*QUERY_OP - c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response - - c.Log("Will wait for a result which will never come...") - - gotNext := make(chan bool) - go func() { - ok := iter.Next(&result) - gotNext <- ok - }() - - select { - case ok := <-gotNext: - c.Fatalf("Next returned: %v", ok) - case <-time.After(3e9): - // Good. Should still be sleeping at that point. - } - - // Closing the session should cause Next to return. - session.Close() - - select { - case ok := <-gotNext: - c.Assert(ok, Equals, false) - c.Assert(iter.Err(), ErrorMatches, "Closed explicitly") - c.Assert(iter.Timeout(), Equals, false) - case <-time.After(1e9): - c.Fatal("Closing the session did not unblock Next") - } -} - -func (s *S) TestIterNextResetsResult(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{1, 2, 3} - for _, n := range ns { - coll.Insert(M{"n" + strconv.Itoa(n): n}) - } - - query := coll.Find(nil).Sort("$natural") - - i := 0 - var sresult *struct{ N1, N2, N3 int } - iter := query.Iter() - for iter.Next(&sresult) { - switch i { - case 0: - c.Assert(sresult.N1, Equals, 1) - c.Assert(sresult.N2+sresult.N3, Equals, 0) - case 1: - c.Assert(sresult.N2, Equals, 2) - c.Assert(sresult.N1+sresult.N3, Equals, 0) - case 2: - c.Assert(sresult.N3, Equals, 3) - c.Assert(sresult.N1+sresult.N2, Equals, 0) - } - i++ - } - c.Assert(iter.Close(), IsNil) - - i = 0 - var mresult M - iter = query.Iter() - for iter.Next(&mresult) { - delete(mresult, "_id") - switch i { - case 0: - c.Assert(mresult, DeepEquals, M{"n1": 1}) - case 1: - c.Assert(mresult, DeepEquals, M{"n2": 2}) - case 2: - c.Assert(mresult, DeepEquals, M{"n3": 3}) - } - i++ - } - c.Assert(iter.Close(), IsNil) - - i = 0 - var iresult interface{} - iter = query.Iter() - for iter.Next(&iresult) { - mresult, ok := iresult.(bson.M) - c.Assert(ok, Equals, true, Commentf("%#v", iresult)) - delete(mresult, "_id") - switch i { - case 0: - c.Assert(mresult, DeepEquals, bson.M{"n1": 1}) - case 1: - c.Assert(mresult, DeepEquals, bson.M{"n2": 2}) - case 2: - c.Assert(mresult, DeepEquals, bson.M{"n3": 3}) - } - i++ - } - c.Assert(iter.Close(), IsNil) -} - -func (s *S) TestFindForOnIter(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - session.Refresh() // Release socket. - - mgo.ResetStats() - - query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) - iter := query.Iter() - - i := 2 - var result *struct{ N int } - err = iter.For(&result, func() error { - c.Assert(i < 7, Equals, true) - c.Assert(result.N, Equals, ns[i]) - if i == 1 { - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) - } - i++ - return nil - }) - c.Assert(err, IsNil) - - session.Refresh() // Release socket. - - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP - c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs. - c.Assert(stats.ReceivedDocs, Equals, 5) - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestFindFor(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - session.Refresh() // Release socket. - - mgo.ResetStats() - - query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) - - i := 2 - var result *struct{ N int } - err = query.For(&result, func() error { - c.Assert(i < 7, Equals, true) - c.Assert(result.N, Equals, ns[i]) - if i == 1 { - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) - } - i++ - return nil - }) - c.Assert(err, IsNil) - - session.Refresh() // Release socket. - - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP - c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs. - c.Assert(stats.ReceivedDocs, Equals, 5) - c.Assert(stats.SocketsInUse, Equals, 0) -} - -func (s *S) TestFindForStopOnError(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - query := coll.Find(M{"n": M{"$gte": 42}}) - i := 2 - var result *struct{ N int } - err = query.For(&result, func() error { - c.Assert(i < 4, Equals, true) - c.Assert(result.N, Equals, ns[i]) - if i == 3 { - return fmt.Errorf("stop!") - } - i++ - return nil - }) - c.Assert(err, ErrorMatches, "stop!") -} - -func (s *S) TestFindForResetsResult(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{1, 2, 3} - for _, n := range ns { - coll.Insert(M{"n" + strconv.Itoa(n): n}) - } - - query := coll.Find(nil).Sort("$natural") - - i := 0 - var sresult *struct{ N1, N2, N3 int } - err = query.For(&sresult, func() error { - switch i { - case 0: - c.Assert(sresult.N1, Equals, 1) - c.Assert(sresult.N2+sresult.N3, Equals, 0) - case 1: - c.Assert(sresult.N2, Equals, 2) - c.Assert(sresult.N1+sresult.N3, Equals, 0) - case 2: - c.Assert(sresult.N3, Equals, 3) - c.Assert(sresult.N1+sresult.N2, Equals, 0) - } - i++ - return nil - }) - c.Assert(err, IsNil) - - i = 0 - var mresult M - err = query.For(&mresult, func() error { - delete(mresult, "_id") - switch i { - case 0: - c.Assert(mresult, DeepEquals, M{"n1": 1}) - case 1: - c.Assert(mresult, DeepEquals, M{"n2": 2}) - case 2: - c.Assert(mresult, DeepEquals, M{"n3": 3}) - } - i++ - return nil - }) - c.Assert(err, IsNil) - - i = 0 - var iresult interface{} - err = query.For(&iresult, func() error { - mresult, ok := iresult.(bson.M) - c.Assert(ok, Equals, true, Commentf("%#v", iresult)) - delete(mresult, "_id") - switch i { - case 0: - c.Assert(mresult, DeepEquals, bson.M{"n1": 1}) - case 1: - c.Assert(mresult, DeepEquals, bson.M{"n2": 2}) - case 2: - c.Assert(mresult, DeepEquals, bson.M{"n3": 3}) - } - i++ - return nil - }) - c.Assert(err, IsNil) -} - -func (s *S) TestFindIterSnapshot(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - // Insane amounts of logging otherwise due to the - // amount of data being shuffled. - mgo.SetDebug(false) - defer mgo.SetDebug(true) - - coll := session.DB("mydb").C("mycoll") - - var a [1024000]byte - - for n := 0; n < 10; n++ { - err := coll.Insert(M{"_id": n, "n": n, "a1": &a}) - c.Assert(err, IsNil) - } - - query := coll.Find(M{"n": M{"$gt": -1}}).Batch(2).Prefetch(0) - query.Snapshot() - iter := query.Iter() - - seen := map[int]bool{} - result := struct { - Id int "_id" - }{} - for iter.Next(&result) { - if len(seen) == 2 { - // Grow all entries so that they have to move. - // Backwards so that the order is inverted. - for n := 10; n >= 0; n-- { - _, err := coll.Upsert(M{"_id": n}, M{"$set": M{"a2": &a}}) - c.Assert(err, IsNil) - } - } - if seen[result.Id] { - c.Fatalf("seen duplicated key: %d", result.Id) - } - seen[result.Id] = true - } - c.Assert(iter.Close(), IsNil) -} - -func (s *S) TestSort(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - coll.Insert(M{"a": 1, "b": 1}) - coll.Insert(M{"a": 2, "b": 2}) - coll.Insert(M{"a": 2, "b": 1}) - coll.Insert(M{"a": 0, "b": 1}) - coll.Insert(M{"a": 2, "b": 0}) - coll.Insert(M{"a": 0, "b": 2}) - coll.Insert(M{"a": 1, "b": 2}) - coll.Insert(M{"a": 0, "b": 0}) - coll.Insert(M{"a": 1, "b": 0}) - - query := coll.Find(M{}) - query.Sort("-a") // Should be ignored. - query.Sort("-b", "a") - iter := query.Iter() - - l := make([]int, 18) - r := struct{ A, B int }{} - for i := 0; i != len(l); i += 2 { - ok := iter.Next(&r) - c.Assert(ok, Equals, true) - c.Assert(err, IsNil) - l[i] = r.A - l[i+1] = r.B - } - - c.Assert(l, DeepEquals, []int{0, 2, 1, 2, 2, 2, 0, 1, 1, 1, 2, 1, 0, 0, 1, 0, 2, 0}) -} - -func (s *S) TestSortWithBadArgs(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - f1 := func() { coll.Find(nil).Sort("") } - f2 := func() { coll.Find(nil).Sort("+") } - f3 := func() { coll.Find(nil).Sort("foo", "-") } - - for _, f := range []func(){f1, f2, f3} { - c.Assert(f, PanicMatches, "Sort: empty field name") - } -} - -func (s *S) TestPrefetching(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - mgo.SetDebug(false) - docs := make([]interface{}, 800) - for i := 0; i != 600; i++ { - docs[i] = bson.D{{"n", i}} - } - coll.Insert(docs...) - - for testi := 0; testi < 5; testi++ { - mgo.ResetStats() - - var iter *mgo.Iter - var beforeMore int - - switch testi { - case 0: // The default session value. - session.SetBatch(100) - iter = coll.Find(M{}).Iter() - beforeMore = 75 - - case 2: // Changing the session value. - session.SetBatch(100) - session.SetPrefetch(0.27) - iter = coll.Find(M{}).Iter() - beforeMore = 73 - - case 1: // Changing via query methods. - iter = coll.Find(M{}).Prefetch(0.27).Batch(100).Iter() - beforeMore = 73 - - case 3: // With prefetch on first document. - iter = coll.Find(M{}).Prefetch(1.0).Batch(100).Iter() - beforeMore = 0 - - case 4: // Without prefetch. - iter = coll.Find(M{}).Prefetch(0).Batch(100).Iter() - beforeMore = 100 - } - - pings := 0 - for batchi := 0; batchi < len(docs)/100-1; batchi++ { - c.Logf("Iterating over %d documents on batch %d", beforeMore, batchi) - var result struct{ N int } - for i := 0; i < beforeMore; i++ { - ok := iter.Next(&result) - c.Assert(ok, Equals, true, Commentf("iter.Err: %v", iter.Err())) - } - beforeMore = 99 - c.Logf("Done iterating.") - - session.Run("ping", nil) // Roundtrip to settle down. - pings++ - - stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, (batchi+1)*100+pings) - - c.Logf("Iterating over one more document on batch %d", batchi) - ok := iter.Next(&result) - c.Assert(ok, Equals, true, Commentf("iter.Err: %v", iter.Err())) - c.Logf("Done iterating.") - - session.Run("ping", nil) // Roundtrip to settle down. - pings++ - - stats = mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, (batchi+2)*100+pings) - } - } -} - -func (s *S) TestSafeSetting(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - // Check the default - safe := session.Safe() - c.Assert(safe.W, Equals, 0) - c.Assert(safe.WMode, Equals, "") - c.Assert(safe.WTimeout, Equals, 0) - c.Assert(safe.FSync, Equals, false) - c.Assert(safe.J, Equals, false) - - // Tweak it - session.SetSafe(&mgo.Safe{W: 1, WTimeout: 2, FSync: true}) - safe = session.Safe() - c.Assert(safe.W, Equals, 1) - c.Assert(safe.WMode, Equals, "") - c.Assert(safe.WTimeout, Equals, 2) - c.Assert(safe.FSync, Equals, true) - c.Assert(safe.J, Equals, false) - - // Reset it again. - session.SetSafe(&mgo.Safe{}) - safe = session.Safe() - c.Assert(safe.W, Equals, 0) - c.Assert(safe.WMode, Equals, "") - c.Assert(safe.WTimeout, Equals, 0) - c.Assert(safe.FSync, Equals, false) - c.Assert(safe.J, Equals, false) - - // Ensure safety to something more conservative. - session.SetSafe(&mgo.Safe{W: 5, WTimeout: 6, J: true}) - safe = session.Safe() - c.Assert(safe.W, Equals, 5) - c.Assert(safe.WMode, Equals, "") - c.Assert(safe.WTimeout, Equals, 6) - c.Assert(safe.FSync, Equals, false) - c.Assert(safe.J, Equals, true) - - // Ensure safety to something less conservative won't change it. - session.EnsureSafe(&mgo.Safe{W: 4, WTimeout: 7}) - safe = session.Safe() - c.Assert(safe.W, Equals, 5) - c.Assert(safe.WMode, Equals, "") - c.Assert(safe.WTimeout, Equals, 6) - c.Assert(safe.FSync, Equals, false) - c.Assert(safe.J, Equals, true) - - // But to something more conservative will. - session.EnsureSafe(&mgo.Safe{W: 6, WTimeout: 4, FSync: true}) - safe = session.Safe() - c.Assert(safe.W, Equals, 6) - c.Assert(safe.WMode, Equals, "") - c.Assert(safe.WTimeout, Equals, 4) - c.Assert(safe.FSync, Equals, true) - c.Assert(safe.J, Equals, false) - - // Even more conservative. - session.EnsureSafe(&mgo.Safe{WMode: "majority", WTimeout: 2}) - safe = session.Safe() - c.Assert(safe.W, Equals, 0) - c.Assert(safe.WMode, Equals, "majority") - c.Assert(safe.WTimeout, Equals, 2) - c.Assert(safe.FSync, Equals, true) - c.Assert(safe.J, Equals, false) - - // WMode always overrides, whatever it is, but J doesn't. - session.EnsureSafe(&mgo.Safe{WMode: "something", J: true}) - safe = session.Safe() - c.Assert(safe.W, Equals, 0) - c.Assert(safe.WMode, Equals, "something") - c.Assert(safe.WTimeout, Equals, 2) - c.Assert(safe.FSync, Equals, true) - c.Assert(safe.J, Equals, false) - - // EnsureSafe with nil does nothing. - session.EnsureSafe(nil) - safe = session.Safe() - c.Assert(safe.W, Equals, 0) - c.Assert(safe.WMode, Equals, "something") - c.Assert(safe.WTimeout, Equals, 2) - c.Assert(safe.FSync, Equals, true) - c.Assert(safe.J, Equals, false) - - // Changing the safety of a cloned session doesn't touch the original. - clone := session.Clone() - defer clone.Close() - clone.EnsureSafe(&mgo.Safe{WMode: "foo"}) - safe = session.Safe() - c.Assert(safe.WMode, Equals, "something") -} - -func (s *S) TestSafeInsert(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - // Insert an element with a predefined key. - err = coll.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - - mgo.ResetStats() - - // Session should be safe by default, so inserting it again must fail. - err = coll.Insert(M{"_id": 1}) - c.Assert(err, ErrorMatches, ".*E11000 duplicate.*") - c.Assert(err.(*mgo.LastError).Code, Equals, 11000) - - // It must have sent two operations (INSERT_OP + getLastError QUERY_OP) - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 2) - - mgo.ResetStats() - - // If we disable safety, though, it won't complain. - session.SetSafe(nil) - err = coll.Insert(M{"_id": 1}) - c.Assert(err, IsNil) - - // Must have sent a single operation this time (just the INSERT_OP) - stats = mgo.GetStats() - c.Assert(stats.SentOps, Equals, 1) -} - -func (s *S) TestSafeParameters(c *C) { - session, err := mgo.Dial("localhost:40011") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - // Tweak the safety parameters to something unachievable. - session.SetSafe(&mgo.Safe{W: 4, WTimeout: 100}) - err = coll.Insert(M{"_id": 1}) - c.Assert(err, ErrorMatches, "timeout|timed out waiting for slaves") - if !s.versionAtLeast(2, 6) { - // 2.6 turned it into a query error. - c.Assert(err.(*mgo.LastError).WTimeout, Equals, true) - } -} - -func (s *S) TestQueryErrorOne(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - result := struct { - Err string "$err" - }{} - - err = coll.Find(M{"a": 1}).Select(M{"a": M{"b": 1}}).One(&result) - c.Assert(err, ErrorMatches, ".*Unsupported projection option:.*") - c.Assert(err.(*mgo.QueryError).Message, Matches, ".*Unsupported projection option:.*") - if s.versionAtLeast(2, 6) { - // Oh, the dance of error codes. :-( - c.Assert(err.(*mgo.QueryError).Code, Equals, 17287) - } else { - c.Assert(err.(*mgo.QueryError).Code, Equals, 13097) - } - - // The result should be properly unmarshalled with QueryError - c.Assert(result.Err, Matches, ".*Unsupported projection option:.*") -} - -func (s *S) TestQueryErrorNext(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - result := struct { - Err string "$err" - }{} - - iter := coll.Find(M{"a": 1}).Select(M{"a": M{"b": 1}}).Iter() - - ok := iter.Next(&result) - c.Assert(ok, Equals, false) - - err = iter.Close() - c.Assert(err, ErrorMatches, ".*Unsupported projection option:.*") - c.Assert(err.(*mgo.QueryError).Message, Matches, ".*Unsupported projection option:.*") - if s.versionAtLeast(2, 6) { - // Oh, the dance of error codes. :-( - c.Assert(err.(*mgo.QueryError).Code, Equals, 17287) - } else { - c.Assert(err.(*mgo.QueryError).Code, Equals, 13097) - } - c.Assert(iter.Err(), Equals, err) - - // The result should be properly unmarshalled with QueryError - c.Assert(result.Err, Matches, ".*Unsupported projection option:.*") -} - -func (s *S) TestEnsureIndex(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - index1 := mgo.Index{ - Key: []string{"a"}, - Background: true, - } - - index2 := mgo.Index{ - Key: []string{"a", "-b"}, - Unique: true, - DropDups: true, - } - - // Obsolete: - index3 := mgo.Index{ - Key: []string{"@loc_old"}, - Min: -500, - Max: 500, - Bits: 32, - } - - index4 := mgo.Index{ - Key: []string{"$2d:loc"}, - Min: -500, - Max: 500, - Bits: 32, - } - - coll := session.DB("mydb").C("mycoll") - - for _, index := range []mgo.Index{index1, index2, index3, index4} { - err = coll.EnsureIndex(index) - c.Assert(err, IsNil) - } - - sysidx := session.DB("mydb").C("system.indexes") - - result1 := M{} - err = sysidx.Find(M{"name": "a_1"}).One(result1) - c.Assert(err, IsNil) - - result2 := M{} - err = sysidx.Find(M{"name": "a_1_b_-1"}).One(result2) - c.Assert(err, IsNil) - - result3 := M{} - err = sysidx.Find(M{"name": "loc_old_2d"}).One(result3) - c.Assert(err, IsNil) - - result4 := M{} - err = sysidx.Find(M{"name": "loc_2d"}).One(result4) - c.Assert(err, IsNil) - - delete(result1, "v") - expected1 := M{ - "name": "a_1", - "key": M{"a": 1}, - "ns": "mydb.mycoll", - "background": true, - } - c.Assert(result1, DeepEquals, expected1) - - delete(result2, "v") - expected2 := M{ - "name": "a_1_b_-1", - "key": M{"a": 1, "b": -1}, - "ns": "mydb.mycoll", - "unique": true, - "dropDups": true, - } - c.Assert(result2, DeepEquals, expected2) - - delete(result3, "v") - expected3 := M{ - "name": "loc_old_2d", - "key": M{"loc_old": "2d"}, - "ns": "mydb.mycoll", - "min": -500, - "max": 500, - "bits": 32, - } - c.Assert(result3, DeepEquals, expected3) - - delete(result4, "v") - expected4 := M{ - "name": "loc_2d", - "key": M{"loc": "2d"}, - "ns": "mydb.mycoll", - "min": -500, - "max": 500, - "bits": 32, - } - c.Assert(result4, DeepEquals, expected4) - - // Ensure the index actually works for real. - err = coll.Insert(M{"a": 1, "b": 1}) - c.Assert(err, IsNil) - err = coll.Insert(M{"a": 1, "b": 1}) - c.Assert(err, ErrorMatches, ".*duplicate key error.*") - c.Assert(mgo.IsDup(err), Equals, true) -} - -func (s *S) TestEnsureIndexWithBadInfo(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.EnsureIndex(mgo.Index{}) - c.Assert(err, ErrorMatches, "invalid index key:.*") - - err = coll.EnsureIndex(mgo.Index{Key: []string{""}}) - c.Assert(err, ErrorMatches, "invalid index key:.*") -} - -func (s *S) TestEnsureIndexWithUnsafeSession(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - session.SetSafe(nil) - - coll := session.DB("mydb").C("mycoll") - - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - err = coll.Insert(M{"a": 1}) - c.Assert(err, IsNil) - - // Should fail since there are duplicated entries. - index := mgo.Index{ - Key: []string{"a"}, - Unique: true, - } - - err = coll.EnsureIndex(index) - c.Assert(err, ErrorMatches, ".*duplicate key error.*") -} - -func (s *S) TestEnsureIndexKey(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.EnsureIndexKey("a") - c.Assert(err, IsNil) - - err = coll.EnsureIndexKey("a", "-b") - c.Assert(err, IsNil) - - sysidx := session.DB("mydb").C("system.indexes") - - result1 := M{} - err = sysidx.Find(M{"name": "a_1"}).One(result1) - c.Assert(err, IsNil) - - result2 := M{} - err = sysidx.Find(M{"name": "a_1_b_-1"}).One(result2) - c.Assert(err, IsNil) - - delete(result1, "v") - expected1 := M{ - "name": "a_1", - "key": M{"a": 1}, - "ns": "mydb.mycoll", - } - c.Assert(result1, DeepEquals, expected1) - - delete(result2, "v") - expected2 := M{ - "name": "a_1_b_-1", - "key": M{"a": 1, "b": -1}, - "ns": "mydb.mycoll", - } - c.Assert(result2, DeepEquals, expected2) -} - -func (s *S) TestEnsureIndexDropIndex(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.EnsureIndexKey("a") - c.Assert(err, IsNil) - - err = coll.EnsureIndexKey("-b") - c.Assert(err, IsNil) - - err = coll.DropIndex("-b") - c.Assert(err, IsNil) - - sysidx := session.DB("mydb").C("system.indexes") - dummy := &struct{}{} - - err = sysidx.Find(M{"name": "a_1"}).One(dummy) - c.Assert(err, IsNil) - - err = sysidx.Find(M{"name": "b_1"}).One(dummy) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = coll.DropIndex("a") - c.Assert(err, IsNil) - - err = sysidx.Find(M{"name": "a_1"}).One(dummy) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = coll.DropIndex("a") - c.Assert(err, ErrorMatches, "index not found.*") -} - -func (s *S) TestEnsureIndexCaching(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.EnsureIndexKey("a") - c.Assert(err, IsNil) - - mgo.ResetStats() - - // Second EnsureIndex should be cached and do nothing. - err = coll.EnsureIndexKey("a") - c.Assert(err, IsNil) - - stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 0) - - // Resetting the cache should make it contact the server again. - session.ResetIndexCache() - - err = coll.EnsureIndexKey("a") - c.Assert(err, IsNil) - - stats = mgo.GetStats() - c.Assert(stats.SentOps, Equals, 2) - - // Dropping the index should also drop the cached index key. - err = coll.DropIndex("a") - c.Assert(err, IsNil) - - mgo.ResetStats() - - err = coll.EnsureIndexKey("a") - c.Assert(err, IsNil) - - stats = mgo.GetStats() - c.Assert(stats.SentOps, Equals, 2) -} - -func (s *S) TestEnsureIndexGetIndexes(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = coll.EnsureIndexKey("-b") - c.Assert(err, IsNil) - - err = coll.EnsureIndexKey("a") - c.Assert(err, IsNil) - - // Obsolete. - err = coll.EnsureIndexKey("@c") - c.Assert(err, IsNil) - - err = coll.EnsureIndexKey("$2d:d") - c.Assert(err, IsNil) - - indexes, err := coll.Indexes() - c.Assert(err, IsNil) - - c.Assert(indexes[0].Name, Equals, "_id_") - c.Assert(indexes[1].Name, Equals, "a_1") - c.Assert(indexes[1].Key, DeepEquals, []string{"a"}) - c.Assert(indexes[2].Name, Equals, "b_-1") - c.Assert(indexes[2].Key, DeepEquals, []string{"-b"}) - c.Assert(indexes[3].Name, Equals, "c_2d") - c.Assert(indexes[3].Key, DeepEquals, []string{"$2d:c"}) - c.Assert(indexes[4].Name, Equals, "d_2d") - c.Assert(indexes[4].Key, DeepEquals, []string{"$2d:d"}) -} - -func (s *S) TestEnsureIndexEvalGetIndexes(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({b: -1})"}}, nil) - c.Assert(err, IsNil) - err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({a: 1})"}}, nil) - c.Assert(err, IsNil) - err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({c: '2d'})"}}, nil) - c.Assert(err, IsNil) - err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({d: -1, e: 1})"}}, nil) - c.Assert(err, IsNil) - - indexes, err := coll.Indexes() - c.Assert(err, IsNil) - - c.Assert(indexes[0].Name, Equals, "_id_") - c.Assert(indexes[1].Name, Equals, "a_1") - c.Assert(indexes[1].Key, DeepEquals, []string{"a"}) - c.Assert(indexes[2].Name, Equals, "b_-1") - c.Assert(indexes[2].Key, DeepEquals, []string{"-b"}) - c.Assert(indexes[3].Name, Equals, "c_2d") - c.Assert(indexes[3].Key, DeepEquals, []string{"$2d:c"}) - c.Assert(indexes[4].Name, Equals, "d_-1_e_1") - c.Assert(indexes[4].Key, DeepEquals, []string{"-d", "e"}) -} - -var testTTL = flag.Bool("test-ttl", false, "test TTL collections (may take 1 minute)") - -func (s *S) TestEnsureIndexExpireAfter(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - session.SetSafe(nil) - - coll := session.DB("mydb").C("mycoll") - - err = coll.Insert(M{"n": 1, "t": time.Now().Add(-120 * time.Second)}) - c.Assert(err, IsNil) - err = coll.Insert(M{"n": 2, "t": time.Now()}) - c.Assert(err, IsNil) - - // Should fail since there are duplicated entries. - index := mgo.Index{ - Key: []string{"t"}, - ExpireAfter: 1 * time.Minute, - } - - err = coll.EnsureIndex(index) - c.Assert(err, IsNil) - - indexes, err := coll.Indexes() - c.Assert(err, IsNil) - c.Assert(indexes[1].Name, Equals, "t_1") - c.Assert(indexes[1].ExpireAfter, Equals, 1*time.Minute) - - if *testTTL { - worked := false - stop := time.Now().Add(70 * time.Second) - for time.Now().Before(stop) { - n, err := coll.Count() - c.Assert(err, IsNil) - if n == 1 { - worked = true - break - } - c.Assert(n, Equals, 2) - c.Logf("Still has 2 entries...") - time.Sleep(1 * time.Second) - } - if !worked { - c.Fatalf("TTL index didn't work") - } - } -} - -func (s *S) TestDistinct(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { - coll.Insert(M{"n": i}) - } - - var result []int - err = coll.Find(M{"n": M{"$gt": 2}}).Sort("n").Distinct("n", &result) - - sort.IntSlice(result).Sort() - c.Assert(result, DeepEquals, []int{3, 4, 6}) -} - -func (s *S) TestMapReduce(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { - coll.Insert(M{"n": i}) - } - - job := &mgo.MapReduce{ - Map: "function() { emit(this.n, 1); }", - Reduce: "function(key, values) { return Array.sum(values); }", - } - var result []struct { - Id int "_id" - Value int - } - - info, err := coll.Find(M{"n": M{"$gt": 2}}).MapReduce(job, &result) - c.Assert(err, IsNil) - c.Assert(info.InputCount, Equals, 4) - c.Assert(info.EmitCount, Equals, 4) - c.Assert(info.OutputCount, Equals, 3) - c.Assert(info.VerboseTime, IsNil) - - expected := map[int]int{3: 1, 4: 2, 6: 1} - for _, item := range result { - c.Logf("Item: %#v", &item) - c.Assert(item.Value, Equals, expected[item.Id]) - expected[item.Id] = -1 - } - - // Weak attempt of testing that Sort gets delivered. - _, err = coll.Find(nil).Sort("-n").MapReduce(job, &result) - _, isQueryError := err.(*mgo.QueryError) - c.Assert(isQueryError, Equals, true) -} - -func (s *S) TestMapReduceFinalize(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { - coll.Insert(M{"n": i}) - } - - job := &mgo.MapReduce{ - Map: "function() { emit(this.n, 1) }", - Reduce: "function(key, values) { return Array.sum(values) }", - Finalize: "function(key, count) { return {count: count} }", - } - var result []struct { - Id int "_id" - Value struct{ Count int } - } - _, err = coll.Find(nil).MapReduce(job, &result) - c.Assert(err, IsNil) - - expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1} - for _, item := range result { - c.Logf("Item: %#v", &item) - c.Assert(item.Value.Count, Equals, expected[item.Id]) - expected[item.Id] = -1 - } -} - -func (s *S) TestMapReduceToCollection(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { - coll.Insert(M{"n": i}) - } - - job := &mgo.MapReduce{ - Map: "function() { emit(this.n, 1); }", - Reduce: "function(key, values) { return Array.sum(values); }", - Out: "mr", - } - - info, err := coll.Find(nil).MapReduce(job, nil) - c.Assert(err, IsNil) - c.Assert(info.InputCount, Equals, 7) - c.Assert(info.EmitCount, Equals, 7) - c.Assert(info.OutputCount, Equals, 5) - c.Assert(info.Collection, Equals, "mr") - c.Assert(info.Database, Equals, "mydb") - - expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1} - var item *struct { - Id int "_id" - Value int - } - mr := session.DB("mydb").C("mr") - iter := mr.Find(nil).Iter() - for iter.Next(&item) { - c.Logf("Item: %#v", &item) - c.Assert(item.Value, Equals, expected[item.Id]) - expected[item.Id] = -1 - } - c.Assert(iter.Close(), IsNil) -} - -func (s *S) TestMapReduceToOtherDb(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { - coll.Insert(M{"n": i}) - } - - job := &mgo.MapReduce{ - Map: "function() { emit(this.n, 1); }", - Reduce: "function(key, values) { return Array.sum(values); }", - Out: bson.D{{"replace", "mr"}, {"db", "otherdb"}}, - } - - info, err := coll.Find(nil).MapReduce(job, nil) - c.Assert(err, IsNil) - c.Assert(info.InputCount, Equals, 7) - c.Assert(info.EmitCount, Equals, 7) - c.Assert(info.OutputCount, Equals, 5) - c.Assert(info.Collection, Equals, "mr") - c.Assert(info.Database, Equals, "otherdb") - - expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1} - var item *struct { - Id int "_id" - Value int - } - mr := session.DB("otherdb").C("mr") - iter := mr.Find(nil).Iter() - for iter.Next(&item) { - c.Logf("Item: %#v", &item) - c.Assert(item.Value, Equals, expected[item.Id]) - expected[item.Id] = -1 - } - c.Assert(iter.Close(), IsNil) -} - -func (s *S) TestMapReduceOutOfOrder(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { - coll.Insert(M{"n": i}) - } - - job := &mgo.MapReduce{ - Map: "function() { emit(this.n, 1); }", - Reduce: "function(key, values) { return Array.sum(values); }", - Out: bson.M{"a": "a", "z": "z", "replace": "mr", "db": "otherdb", "b": "b", "y": "y"}, - } - - info, err := coll.Find(nil).MapReduce(job, nil) - c.Assert(err, IsNil) - c.Assert(info.Collection, Equals, "mr") - c.Assert(info.Database, Equals, "otherdb") -} - -func (s *S) TestMapReduceScope(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - coll.Insert(M{"n": 1}) - - job := &mgo.MapReduce{ - Map: "function() { emit(this.n, x); }", - Reduce: "function(key, values) { return Array.sum(values); }", - Scope: M{"x": 42}, - } - - var result []bson.M - _, err = coll.Find(nil).MapReduce(job, &result) - c.Assert(len(result), Equals, 1) - c.Assert(result[0]["value"], Equals, 42.0) -} - -func (s *S) TestMapReduceVerbose(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for i := 0; i < 100; i++ { - err = coll.Insert(M{"n": i}) - c.Assert(err, IsNil) - } - - job := &mgo.MapReduce{ - Map: "function() { emit(this.n, 1); }", - Reduce: "function(key, values) { return Array.sum(values); }", - Verbose: true, - } - - info, err := coll.Find(nil).MapReduce(job, nil) - c.Assert(err, IsNil) - c.Assert(info.VerboseTime, NotNil) -} - -func (s *S) TestMapReduceLimit(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { - coll.Insert(M{"n": i}) - } - - job := &mgo.MapReduce{ - Map: "function() { emit(this.n, 1); }", - Reduce: "function(key, values) { return Array.sum(values); }", - } - - var result []bson.M - _, err = coll.Find(nil).Limit(3).MapReduce(job, &result) - c.Assert(err, IsNil) - c.Assert(len(result), Equals, 3) -} - -func (s *S) TestBuildInfo(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - info, err := session.BuildInfo() - c.Assert(err, IsNil) - - var v []int - for i, a := range strings.Split(info.Version, ".") { - for _, token := range []string{"-rc", "-pre"} { - if i == 2 && strings.Contains(a, token) { - a = a[:strings.Index(a, token)] - info.VersionArray[len(info.VersionArray)-1] = 0 - } - } - n, err := strconv.Atoi(a) - c.Assert(err, IsNil) - v = append(v, n) - } - for len(v) < 4 { - v = append(v, 0) - } - - c.Assert(info.VersionArray, DeepEquals, v) - c.Assert(info.GitVersion, Matches, "[a-z0-9]+") - c.Assert(info.SysInfo, Matches, ".*[0-9:]+.*") - if info.Bits != 32 && info.Bits != 64 { - c.Fatalf("info.Bits is %d", info.Bits) - } - if info.MaxObjectSize < 8192 { - c.Fatalf("info.MaxObjectSize seems too small: %d", info.MaxObjectSize) - } -} - -func (s *S) TestZeroTimeRoundtrip(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - var d struct{ T time.Time } - conn := session.DB("mydb").C("mycoll") - err = conn.Insert(d) - c.Assert(err, IsNil) - - var result bson.M - err = conn.Find(nil).One(&result) - c.Assert(err, IsNil) - t, isTime := result["t"].(time.Time) - c.Assert(isTime, Equals, true) - c.Assert(t, Equals, time.Time{}) -} - -func (s *S) TestFsyncLock(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - clone := session.Clone() - defer clone.Close() - - err = session.FsyncLock() - c.Assert(err, IsNil) - - done := make(chan time.Time) - go func() { - time.Sleep(3e9) - now := time.Now() - err := session.FsyncUnlock() - c.Check(err, IsNil) - done <- now - }() - - err = clone.DB("mydb").C("mycoll").Insert(bson.M{"n": 1}) - unlocked := time.Now() - unlocking := <-done - c.Assert(err, IsNil) - - c.Assert(unlocked.After(unlocking), Equals, true) - c.Assert(unlocked.Sub(unlocking) < 1e9, Equals, true) -} - -func (s *S) TestFsync(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - // Not much to do here. Just a smoke check. - err = session.Fsync(false) - c.Assert(err, IsNil) - err = session.Fsync(true) - c.Assert(err, IsNil) -} - -func (s *S) TestPipeIter(c *C) { - if !s.versionAtLeast(2, 1) { - c.Skip("Pipe only works on 2.1+") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - coll.Insert(M{"n": n}) - } - - iter := coll.Pipe([]M{{"$match": M{"n": M{"$gte": 42}}}}).Iter() - result := struct{ N int }{} - for i := 2; i < 7; i++ { - ok := iter.Next(&result) - c.Assert(ok, Equals, true) - c.Assert(result.N, Equals, ns[i]) - } - - c.Assert(iter.Next(&result), Equals, false) - c.Assert(iter.Close(), IsNil) -} - -func (s *S) TestPipeAll(c *C) { - if !s.versionAtLeast(2, 1) { - c.Skip("Pipe only works on 2.1+") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err := coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - var result []struct{ N int } - err = coll.Pipe([]M{{"$match": M{"n": M{"$gte": 42}}}}).All(&result) - c.Assert(err, IsNil) - for i := 2; i < 7; i++ { - c.Assert(result[i-2].N, Equals, ns[i]) - } -} - -func (s *S) TestPipeOne(c *C) { - if !s.versionAtLeast(2, 1) { - c.Skip("Pipe only works on 2.1+") - } - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"a": 1, "b": 2}) - - result := struct{ A, B int }{} - - pipe := coll.Pipe([]M{{"$project": M{"a": 1, "b": M{"$add": []interface{}{"$b", 1}}}}}) - err = pipe.One(&result) - c.Assert(err, IsNil) - c.Assert(result.A, Equals, 1) - c.Assert(result.B, Equals, 3) - - pipe = coll.Pipe([]M{{"$match": M{"a": 2}}}) - err = pipe.One(&result) - c.Assert(err, Equals, mgo.ErrNotFound) -} - -func (s *S) TestBatch1Bug(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for i := 0; i < 3; i++ { - err := coll.Insert(M{"n": i}) - c.Assert(err, IsNil) - } - - var ns []struct{ N int } - err = coll.Find(nil).Batch(1).All(&ns) - c.Assert(err, IsNil) - c.Assert(len(ns), Equals, 3) - - session.SetBatch(1) - err = coll.Find(nil).All(&ns) - c.Assert(err, IsNil) - c.Assert(len(ns), Equals, 3) -} - -func (s *S) TestInterfaceIterBug(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - - for i := 0; i < 3; i++ { - err := coll.Insert(M{"n": i}) - c.Assert(err, IsNil) - } - - var result interface{} - - i := 0 - iter := coll.Find(nil).Sort("n").Iter() - for iter.Next(&result) { - c.Assert(result.(bson.M)["n"], Equals, i) - i++ - } - c.Assert(iter.Close(), IsNil) -} - -func (s *S) TestFindIterCloseKillsCursor(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - cursors := serverCursorsOpen(session) - - coll := session.DB("mydb").C("mycoll") - ns := []int{40, 41, 42, 43, 44, 45, 46} - for _, n := range ns { - err = coll.Insert(M{"n": n}) - c.Assert(err, IsNil) - } - - iter := coll.Find(nil).Batch(2).Iter() - c.Assert(iter.Next(bson.M{}), Equals, true) - - c.Assert(iter.Close(), IsNil) - c.Assert(serverCursorsOpen(session), Equals, cursors) -} - -func (s *S) TestLogReplay(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - for i := 0; i < 5; i++ { - err = coll.Insert(M{"ts": time.Now()}) - c.Assert(err, IsNil) - } - - iter := coll.Find(nil).LogReplay().Iter() - if s.versionAtLeast(2, 6) { - // This used to fail in 2.4. Now it's just a smoke test. - c.Assert(iter.Err(), IsNil) - } else { - c.Assert(iter.Next(bson.M{}), Equals, false) - c.Assert(iter.Err(), ErrorMatches, "no ts field in query") - } -} - -func (s *S) TestSetCursorTimeout(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - err = coll.Insert(M{"n": 42}) - - // This is just a smoke test. Won't wait 10 minutes for an actual timeout. - - session.SetCursorTimeout(0) - - var result struct{ N int } - iter := coll.Find(nil).Iter() - c.Assert(iter.Next(&result), Equals, true) - c.Assert(result.N, Equals, 42) - c.Assert(iter.Next(&result), Equals, false) -} diff --git a/vendor/labix.org/v2/mgo/suite_test.go b/vendor/labix.org/v2/mgo/suite_test.go deleted file mode 100644 index a846c5134..000000000 --- a/vendor/labix.org/v2/mgo/suite_test.go +++ /dev/null @@ -1,240 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo_test - -import ( - "errors" - "flag" - "fmt" - "labix.org/v2/mgo" - "labix.org/v2/mgo/bson" - . "launchpad.net/gocheck" - "net" - "os/exec" - "strconv" - "syscall" - - "testing" - "time" -) - -var fast = flag.Bool("fast", false, "Skip slow tests") - -type M bson.M - -type cLogger C - -func (c *cLogger) Output(calldepth int, s string) error { - ns := time.Now().UnixNano() - t := float64(ns%100e9) / 1e9 - ((*C)(c)).Logf("[LOG] %.05f %s", t, s) - return nil -} - -func TestAll(t *testing.T) { - TestingT(t) -} - -type S struct { - session *mgo.Session - stopped bool - build mgo.BuildInfo - frozen []string -} - -func (s *S) versionAtLeast(v ...int) bool { - for i := range v { - if i == len(s.build.VersionArray) { - return false - } - if s.build.VersionArray[i] < v[i] { - return false - } - } - return true -} - -var _ = Suite(&S{}) - -func (s *S) SetUpSuite(c *C) { - mgo.SetDebug(true) - mgo.SetStats(true) - s.StartAll() - - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - s.build, err = session.BuildInfo() - c.Check(err, IsNil) - session.Close() -} - -func (s *S) SetUpTest(c *C) { - err := run("mongo --nodb testdb/dropall.js") - if err != nil { - panic(err.Error()) - } - mgo.SetLogger((*cLogger)(c)) - mgo.ResetStats() -} - -func (s *S) TearDownTest(c *C) { - if s.stopped { - s.StartAll() - } - for _, host := range s.frozen { - if host != "" { - s.Thaw(host) - } - } - var stats mgo.Stats - for i := 0; ; i++ { - stats = mgo.GetStats() - if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { - break - } - if i == 20 { - c.Fatal("Test left sockets in a dirty state") - } - c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive) - time.Sleep(500 * time.Millisecond) - } - for i := 0; ; i++ { - stats = mgo.GetStats() - if stats.Clusters == 0 { - break - } - if i == 60 { - c.Fatal("Test left clusters alive") - } - c.Logf("Waiting for clusters to die: %d alive", stats.Clusters) - time.Sleep(1 * time.Second) - } -} - -func (s *S) Stop(host string) { - // Give a moment for slaves to sync and avoid getting rollback issues. - time.Sleep(2 * time.Second) - err := run("cd _testdb && supervisorctl stop " + supvName(host)) - if err != nil { - panic(err) - } - s.stopped = true -} - -func (s *S) pid(host string) int { - output, err := exec.Command("lsof", "-iTCP:"+hostPort(host), "-sTCP:LISTEN", "-Fp").CombinedOutput() - if err != nil { - panic(err) - } - pidstr := string(output[1 : len(output)-1]) - pid, err := strconv.Atoi(pidstr) - if err != nil { - panic("cannot convert pid to int: " + pidstr) - } - return pid -} - -func (s *S) Freeze(host string) { - err := syscall.Kill(s.pid(host), syscall.SIGSTOP) - if err != nil { - panic(err) - } - s.frozen = append(s.frozen, host) -} - -func (s *S) Thaw(host string) { - err := syscall.Kill(s.pid(host), syscall.SIGCONT) - if err != nil { - panic(err) - } - for i, frozen := range s.frozen { - if frozen == host { - s.frozen[i] = "" - } - } -} - -func (s *S) StartAll() { - // Restart any stopped nodes. - run("cd _testdb && supervisorctl start all") - err := run("cd testdb && mongo --nodb wait.js") - if err != nil { - panic(err) - } - s.stopped = false -} - -func run(command string) error { - output, err := exec.Command("/bin/sh", "-c", command).CombinedOutput() - if err != nil { - msg := fmt.Sprintf("Failed to execute: %s: %s\n%s", command, err.Error(), string(output)) - return errors.New(msg) - } - return nil -} - -var supvNames = map[string]string{ - "40001": "db1", - "40002": "db2", - "40011": "rs1a", - "40012": "rs1b", - "40013": "rs1c", - "40021": "rs2a", - "40022": "rs2b", - "40023": "rs2c", - "40031": "rs3a", - "40032": "rs3b", - "40033": "rs3c", - "40041": "rs4a", - "40101": "cfg1", - "40102": "cfg2", - "40103": "cfg3", - "40201": "s1", - "40202": "s2", - "40203": "s3", -} - -// supvName returns the supervisord name for the given host address. -func supvName(host string) string { - host, port, err := net.SplitHostPort(host) - if err != nil { - panic(err) - } - name, ok := supvNames[port] - if !ok { - panic("Unknown host: " + host) - } - return name -} - -func hostPort(host string) string { - _, port, err := net.SplitHostPort(host) - if err != nil { - panic(err) - } - return port -} diff --git a/vendor/labix.org/v2/mgo/testdb/dropall.js b/vendor/labix.org/v2/mgo/testdb/dropall.js deleted file mode 100644 index ca1289263..000000000 --- a/vendor/labix.org/v2/mgo/testdb/dropall.js +++ /dev/null @@ -1,47 +0,0 @@ - -var ports = [40001, 40002, 40011, 40012, 40013, 40021, 40022, 40023, 40041, 40101, 40102, 40103, 40201, 40202, 40203] -var auth = [40002, 40103, 40203, 40031] - -for (var i in ports) { - var port = ports[i] - var server = "localhost:" + port - var mongo = new Mongo("localhost:" + port) - var admin = mongo.getDB("admin") - - for (var j in auth) { - if (auth[j] == port) { - admin.auth("root", "rapadura") - admin.system.users.find().forEach(function(u) { - if (u.user == "root" || u.user == "reader") { - return; - } - if (typeof admin.dropUser == "function") { - mongo.getDB(u.db).dropUser(u.user); - } else { - admin.removeUser(u.user); - } - }) - break - } - } - var result = admin.runCommand({"listDatabases": 1}) - // Why is the command returning undefined!? - while (typeof result.databases == "undefined") { - print("dropall.js: listing databases of :" + port + " got:", result) - result = admin.runCommand({"listDatabases": 1}) - } - var dbs = result.databases - for (var j = 0; j != dbs.length; j++) { - var db = dbs[j] - switch (db.name) { - case "admin": - case "local": - case "config": - break - default: - mongo.getDB(db.name).dropDatabase() - } - } -} - -// vim:ts=4:sw=4:et diff --git a/vendor/labix.org/v2/mgo/testdb/init.js b/vendor/labix.org/v2/mgo/testdb/init.js deleted file mode 100644 index 02a6c61c8..000000000 --- a/vendor/labix.org/v2/mgo/testdb/init.js +++ /dev/null @@ -1,103 +0,0 @@ -//var settings = {heartbeatSleep: 0.05, heartbeatTimeout: 0.5} -var settings = {}; - -// We know the master of the first set (pri=1), but not of the second. -var rs1cfg = {_id: "rs1", - members: [{_id: 1, host: "127.0.0.1:40011", priority: 1, tags: {rs1: "a"}}, - {_id: 2, host: "127.0.0.1:40012", priority: 0, tags: {rs1: "b"}}, - {_id: 3, host: "127.0.0.1:40013", priority: 0, tags: {rs1: "c"}}], - settings: settings} -var rs2cfg = {_id: "rs2", - members: [{_id: 1, host: "127.0.0.1:40021", priority: 1, tags: {rs2: "a"}}, - {_id: 2, host: "127.0.0.1:40022", priority: 1, tags: {rs2: "b"}}, - {_id: 3, host: "127.0.0.1:40023", priority: 1, tags: {rs2: "c"}}], - settings: settings} -var rs3cfg = {_id: "rs3", - members: [{_id: 1, host: "127.0.0.1:40031", priority: 1, tags: {rs3: "a"}}, - {_id: 2, host: "127.0.0.1:40032", priority: 1, tags: {rs3: "b"}}, - {_id: 3, host: "127.0.0.1:40033", priority: 1, tags: {rs3: "c"}}], - settings: settings} - -for (var i = 0; i != 60; i++) { - try { - db1 = new Mongo("127.0.0.1:40001").getDB("admin") - db2 = new Mongo("127.0.0.1:40002").getDB("admin") - rs1a = new Mongo("127.0.0.1:40011").getDB("admin") - rs2a = new Mongo("127.0.0.1:40021").getDB("admin") - rs3a = new Mongo("127.0.0.1:40031").getDB("admin") - break - } catch(err) { - print("Can't connect yet...") - } - sleep(1000) -} - -rs1a.runCommand({replSetInitiate: rs1cfg}) -rs2a.runCommand({replSetInitiate: rs2cfg}) -rs3a.runCommand({replSetInitiate: rs3cfg}) - -function configShards() { - cfg1 = new Mongo("127.0.0.1:40201").getDB("admin") - cfg1.runCommand({addshard: "127.0.0.1:40001"}) - cfg1.runCommand({addshard: "rs1/127.0.0.1:40011"}) - - cfg2 = new Mongo("127.0.0.1:40202").getDB("admin") - cfg2.runCommand({addshard: "rs2/127.0.0.1:40021"}) - - cfg3 = new Mongo("127.0.0.1:40203").getDB("admin") - cfg3.runCommand({addshard: "rs3/127.0.0.1:40031"}) -} - -function configAuth() { - var addrs = ["127.0.0.1:40002", "127.0.0.1:40203", "127.0.0.1:40031"] - for (var i in addrs) { - var db = new Mongo(addrs[i]).getDB("admin") - var v = db.serverBuildInfo().versionArray - if (v < [2, 5]) { - db.addUser("root", "rapadura") - } else { - db.createUser({user: "root", pwd: "rapadura", roles: ["root"]}) - } - db.auth("root", "rapadura") - if (v >= [2, 6]) { - db.createUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]}) - } else if (v >= [2, 4]) { - db.addUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]}) - } else { - db.addUser("reader", "rapadura", true) - } - } -} - -function countHealthy(rs) { - var status = rs.runCommand({replSetGetStatus: 1}) - var count = 0 - if (typeof status.members != "undefined") { - for (var i = 0; i != status.members.length; i++) { - var m = status.members[i] - if (m.health == 1 && (m.state == 1 || m.state == 2)) { - count += 1 - } - } - } - return count -} - -var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length - -for (var i = 0; i != 60; i++) { - var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a) - print("Replica sets have", count, "healthy nodes.") - if (count == totalRSMembers) { - sleep(2000) - configShards() - configAuth() - quit(0) - } - sleep(1000) -} - -print("Replica sets didn't sync up properly.") -quit(12) - -// vim:ts=4:sw=4:et diff --git a/vendor/labix.org/v2/mgo/testdb/setup.sh b/vendor/labix.org/v2/mgo/testdb/setup.sh deleted file mode 100755 index ab841dadb..000000000 --- a/vendor/labix.org/v2/mgo/testdb/setup.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/sh -e - -start() { - mkdir _testdb - cd _testdb - mkdir db1 db2 rs1a rs1b rs1c rs2a rs2b rs2c rs3a rs3b rs3c rs4a cfg1 cfg2 cfg3 - ln -s ../testdb/supervisord.conf supervisord.conf - echo keyfile > keyfile - chmod 600 keyfile - echo "Running supervisord..." - supervisord || ( echo "Supervisord failed executing ($?)" && exit 1 ) - COUNT=$(grep '^\[program' supervisord.conf | wc -l) - echo "Supervisord is up, starting $COUNT processes..." - for i in $(seq 10); do - RUNNING=$(supervisorctl status | grep RUNNING | wc -l) - echo "$RUNNING processes running..." - if [ x$COUNT = x$RUNNING ]; then - echo "Running setup.js with mongo..." - mongo --nodb ../testdb/init.js - exit 0 - fi - sleep 1 - done - echo "Failed to start all processes. Check out what's up at $PWD now!" - exit 1 -} - -stop() { - if [ -d _testdb ]; then - echo "Shutting down test cluster..." - (cd _testdb && supervisorctl shutdown) - rm -rf _testdb - fi -} - - -if [ ! -f suite_test.go ]; then - echo "This script must be run from within the source directory." - exit 1 -fi - -case "$1" in - - start) - start - ;; - - stop) - stop - ;; - -esac - -# vim:ts=4:sw=4:et diff --git a/vendor/labix.org/v2/mgo/testdb/supervisord.conf b/vendor/labix.org/v2/mgo/testdb/supervisord.conf deleted file mode 100644 index b0aca01a9..000000000 --- a/vendor/labix.org/v2/mgo/testdb/supervisord.conf +++ /dev/null @@ -1,62 +0,0 @@ -[supervisord] -logfile = %(here)s/supervisord.log -pidfile = %(here)s/supervisord.pid -directory = %(here)s -#nodaemon = true - -[inet_http_server] -port = 127.0.0.1:9001 - -[supervisorctl] -serverurl = http://127.0.0.1:9001 - -[rpcinterface:supervisor] -supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface - -[program:db1] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db1 --bind_ip=127.0.0.1 --port 40001 - -[program:db2] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db2 --bind_ip=127.0.0.1 --port 40002 --auth - -[program:rs1a] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1a --bind_ip=127.0.0.1 --port 40011 -[program:rs1b] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1b --bind_ip=127.0.0.1 --port 40012 -[program:rs1c] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1c --bind_ip=127.0.0.1 --port 40013 - -[program:rs2a] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2a --bind_ip=127.0.0.1 --port 40021 -[program:rs2b] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2b --bind_ip=127.0.0.1 --port 40022 -[program:rs2c] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2c --bind_ip=127.0.0.1 --port 40023 - -[program:rs3a] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3a --bind_ip=127.0.0.1 --port 40031 --auth --keyFile=%(here)s/keyfile -[program:rs3b] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3b --bind_ip=127.0.0.1 --port 40032 --auth --keyFile=%(here)s/keyfile -[program:rs3c] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3c --bind_ip=127.0.0.1 --port 40033 --auth --keyFile=%(here)s/keyfile - -[program:rs4a] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs4 --dbpath %(here)s/rs4a --bind_ip=127.0.0.1 --port 40041 - -[program:cfg1] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg1 --bind_ip=127.0.0.1 --port 40101 - -[program:cfg2] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg2 --bind_ip=127.0.0.1 --port 40102 - -[program:cfg3] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg3 --bind_ip=127.0.0.1 --port 40103 --auth --keyFile=%(here)s/keyfile - -[program:s1] -command = mongos --configdb 127.0.0.1:40101 --bind_ip=127.0.0.1 --port 40201 --chunkSize 1 - -[program:s2] -command = mongos --configdb 127.0.0.1:40102 --bind_ip=127.0.0.1 --port 40202 --chunkSize 1 - -[program:s3] -command = mongos --configdb 127.0.0.1:40103 --bind_ip=127.0.0.1 --port 40203 --chunkSize 1 --keyFile=%(here)s/keyfile diff --git a/vendor/labix.org/v2/mgo/testdb/wait.js b/vendor/labix.org/v2/mgo/testdb/wait.js deleted file mode 100644 index de0d66075..000000000 --- a/vendor/labix.org/v2/mgo/testdb/wait.js +++ /dev/null @@ -1,58 +0,0 @@ -// We know the master of the first set (pri=1), but not of the second. -var settings = {} -var rs1cfg = {_id: "rs1", - members: [{_id: 1, host: "127.0.0.1:40011", priority: 1}, - {_id: 2, host: "127.0.0.1:40012", priority: 0}, - {_id: 3, host: "127.0.0.1:40013", priority: 0}]} -var rs2cfg = {_id: "rs2", - members: [{_id: 1, host: "127.0.0.1:40021", priority: 1}, - {_id: 2, host: "127.0.0.1:40022", priority: 1}, - {_id: 3, host: "127.0.0.1:40023", priority: 0}]} -var rs3cfg = {_id: "rs3", - members: [{_id: 1, host: "127.0.0.1:40031", priority: 1}, - {_id: 2, host: "127.0.0.1:40032", priority: 1}, - {_id: 3, host: "127.0.0.1:40033", priority: 1}], - settings: settings} - -for (var i = 0; i != 60; i++) { - try { - rs1a = new Mongo("127.0.0.1:40011").getDB("admin") - rs2a = new Mongo("127.0.0.1:40021").getDB("admin") - rs3a = new Mongo("127.0.0.1:40031").getDB("admin") - rs3a.auth("root", "rapadura") - db1 = new Mongo("127.0.0.1:40001").getDB("admin") - db2 = new Mongo("127.0.0.1:40002").getDB("admin") - break - } catch(err) { - print("Can't connect yet...") - } - sleep(1000) -} - -function countHealthy(rs) { - var status = rs.runCommand({replSetGetStatus: 1}) - var count = 0 - if (typeof status.members != "undefined") { - for (var i = 0; i != status.members.length; i++) { - var m = status.members[i] - if (m.health == 1 && (m.state == 1 || m.state == 2)) { - count += 1 - } - } - } - return count -} - -var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length - -for (var i = 0; i != 60; i++) { - var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a) - print("Replica sets have", count, "healthy nodes.") - if (count == totalRSMembers) { - quit(0) - } - sleep(1000) -} - -print("Replica sets didn't sync up properly.") -quit(12) diff --git a/vendor/labix.org/v2/mgo/txn/chaos.go b/vendor/labix.org/v2/mgo/txn/chaos.go deleted file mode 100644 index a9258faec..000000000 --- a/vendor/labix.org/v2/mgo/txn/chaos.go +++ /dev/null @@ -1,68 +0,0 @@ -package txn - -import ( - mrand "math/rand" - "time" -) - -var chaosEnabled = false -var chaosSetting Chaos - -// Chaos holds parameters for the failure injection mechanism. -type Chaos struct { - // KillChance is the 0.0 to 1.0 chance that a given checkpoint - // within the algorithm will raise an interruption that will - // stop the procedure. - KillChance float64 - - // SlowdownChance is the 0.0 to 1.0 chance that a given checkpoint - // within the algorithm will be delayed by Slowdown before - // continuing. - SlowdownChance float64 - Slowdown time.Duration - - // If Breakpoint is set, the above settings will only affect the - // named breakpoint. - Breakpoint string -} - -// SetChaos sets the failure injection parameters to c. -func SetChaos(c Chaos) { - chaosSetting = c - chaosEnabled = c.KillChance > 0 || c.SlowdownChance > 0 -} - -func chaos(bpname string) { - if !chaosEnabled { - return - } - switch chaosSetting.Breakpoint { - case "", bpname: - kc := chaosSetting.KillChance - if kc > 0 && mrand.Intn(1000) < int(kc*1000) { - panic(chaosError{}) - } - if bpname == "insert" { - return - } - sc := chaosSetting.SlowdownChance - if sc > 0 && mrand.Intn(1000) < int(kc*1000) { - time.Sleep(chaosSetting.Slowdown) - } - } -} - -type chaosError struct{} - -func (f *flusher) handleChaos(err *error) { - v := recover() - if v == nil { - return - } - if _, ok := v.(chaosError); ok { - f.debugf("Killed by chaos!") - *err = ErrChaos - return - } - panic(v) -} diff --git a/vendor/labix.org/v2/mgo/txn/debug.go b/vendor/labix.org/v2/mgo/txn/debug.go deleted file mode 100644 index 7f67f4e3f..000000000 --- a/vendor/labix.org/v2/mgo/txn/debug.go +++ /dev/null @@ -1,108 +0,0 @@ -package txn - -import ( - "bytes" - "fmt" - "labix.org/v2/mgo/bson" - "sort" - "sync/atomic" -) - -var ( - debugEnabled bool - logger log_Logger -) - -type log_Logger interface { - Output(calldepth int, s string) error -} - -// Specify the *log.Logger where logged messages should be sent to. -func SetLogger(l log_Logger) { - logger = l -} - -// SetDebug enables or disables debugging. -func SetDebug(debug bool) { - debugEnabled = debug -} - -var ErrChaos = fmt.Errorf("interrupted by chaos") - -var debugId uint32 - -func debugPrefix() string { - d := atomic.AddUint32(&debugId, 1) - 1 - s := make([]byte, 0, 10) - for i := uint(0); i < 8; i++ { - s = append(s, "abcdefghijklmnop"[(d>>(4*i))&0xf]) - if d>>(4*(i+1)) == 0 { - break - } - } - s = append(s, ')', ' ') - return string(s) -} - -func logf(format string, args ...interface{}) { - if logger != nil { - logger.Output(2, fmt.Sprintf(format, argsForLog(args)...)) - } -} - -func debugf(format string, args ...interface{}) { - if debugEnabled && logger != nil { - logger.Output(2, fmt.Sprintf(format, argsForLog(args)...)) - } -} - -func argsForLog(args []interface{}) []interface{} { - for i, arg := range args { - switch v := arg.(type) { - case bson.ObjectId: - args[i] = v.Hex() - case []bson.ObjectId: - lst := make([]string, len(v)) - for j, id := range v { - lst[j] = id.Hex() - } - args[i] = lst - case map[docKey][]bson.ObjectId: - buf := &bytes.Buffer{} - var dkeys docKeys - for dkey := range v { - dkeys = append(dkeys, dkey) - } - sort.Sort(dkeys) - for i, dkey := range dkeys { - if i > 0 { - buf.WriteByte(' ') - } - buf.WriteString(fmt.Sprintf("%v: {", dkey)) - for j, id := range v[dkey] { - if j > 0 { - buf.WriteByte(' ') - } - buf.WriteString(id.Hex()) - } - buf.WriteByte('}') - } - args[i] = buf.String() - case map[docKey][]int64: - buf := &bytes.Buffer{} - var dkeys docKeys - for dkey := range v { - dkeys = append(dkeys, dkey) - } - sort.Sort(dkeys) - for i, dkey := range dkeys { - if i > 0 { - buf.WriteByte(' ') - } - buf.WriteString(fmt.Sprintf("%v: %v", dkey, v[dkey])) - } - args[i] = buf.String() - } - } - return args -} diff --git a/vendor/labix.org/v2/mgo/txn/flusher.go b/vendor/labix.org/v2/mgo/txn/flusher.go deleted file mode 100644 index 846eefec1..000000000 --- a/vendor/labix.org/v2/mgo/txn/flusher.go +++ /dev/null @@ -1,996 +0,0 @@ -package txn - -import ( - "fmt" - "labix.org/v2/mgo" - "labix.org/v2/mgo/bson" - "sort" -) - -func flush(r *Runner, t *transaction) error { - f := &flusher{ - Runner: r, - goal: t, - goalKeys: make(map[docKey]bool), - queue: make(map[docKey][]token), - debugId: debugPrefix(), - } - for _, dkey := range f.goal.docKeys() { - f.goalKeys[dkey] = true - } - return f.run() -} - -type flusher struct { - *Runner - goal *transaction - goalKeys map[docKey]bool - queue map[docKey][]token - debugId string -} - -func (f *flusher) run() (err error) { - if chaosEnabled { - defer f.handleChaos(&err) - } - - f.debugf("Processing %s", f.goal) - seen := make(map[bson.ObjectId]*transaction) - if err := f.recurse(f.goal, seen); err != nil { - return err - } - if f.goal.done() { - return nil - } - - // Sparse workloads will generally be managed entirely by recurse. - // Getting here means one or more transactions have dependencies - // and perhaps cycles. - - // Build successors data for Tarjan's sort. Must consider - // that entries in txn-queue are not necessarily valid. - successors := make(map[bson.ObjectId][]bson.ObjectId) - ready := true - for _, dqueue := range f.queue { - NextPair: - for i := 0; i < len(dqueue); i++ { - pred := dqueue[i] - predid := pred.id() - predt := seen[predid] - if predt == nil || predt.Nonce != pred.nonce() { - continue - } - predsuccids, ok := successors[predid] - if !ok { - successors[predid] = nil - } - - for j := i + 1; j < len(dqueue); j++ { - succ := dqueue[j] - succid := succ.id() - succt := seen[succid] - if succt == nil || succt.Nonce != succ.nonce() { - continue - } - if _, ok := successors[succid]; !ok { - successors[succid] = nil - } - - // Found a valid pred/succ pair. - i = j - 1 - for _, predsuccid := range predsuccids { - if predsuccid == succid { - continue NextPair - } - } - successors[predid] = append(predsuccids, succid) - if succid == f.goal.Id { - // There are still pre-requisites to handle. - ready = false - } - continue NextPair - } - } - } - f.debugf("Queues: %v", f.queue) - f.debugf("Successors: %v", successors) - if ready { - f.debugf("Goal %s has no real pre-requisites", f.goal) - return f.advance(f.goal, nil, true) - } - - // Robert Tarjan's algorithm for detecting strongly-connected - // components is used for topological sorting and detecting - // cycles at once. The order in which transactions are applied - // in commonly affected documents must be a global agreement. - sorted := tarjanSort(successors) - if debugEnabled { - f.debugf("Tarjan output: %v", sorted) - } - pull := make(map[bson.ObjectId]*transaction) - for i := len(sorted) - 1; i >= 0; i-- { - scc := sorted[i] - f.debugf("Flushing %v", scc) - if len(scc) == 1 { - pull[scc[0]] = seen[scc[0]] - } - for _, id := range scc { - if err := f.advance(seen[id], pull, true); err != nil { - return err - } - } - if len(scc) > 1 { - for _, id := range scc { - pull[id] = seen[id] - } - } - } - return nil -} - -func (f *flusher) recurse(t *transaction, seen map[bson.ObjectId]*transaction) error { - seen[t.Id] = t - err := f.advance(t, nil, false) - if err != errPreReqs { - return err - } - for _, dkey := range t.docKeys() { - for _, dtt := range f.queue[dkey] { - id := dtt.id() - if seen[id] != nil { - continue - } - qt, err := f.load(id) - if err != nil { - return err - } - err = f.recurse(qt, seen) - if err != nil { - return err - } - } - } - return nil -} - -func (f *flusher) advance(t *transaction, pull map[bson.ObjectId]*transaction, force bool) error { - for { - switch t.State { - case tpreparing, tprepared: - revnos, err := f.prepare(t, force) - if err != nil { - return err - } - if t.State != tprepared { - continue - } - if err = f.assert(t, revnos, pull); err != nil { - return err - } - if t.State != tprepared { - continue - } - if err = f.checkpoint(t, revnos); err != nil { - return err - } - case tapplying: - return f.apply(t, pull) - case taborting: - return f.abortOrReload(t, nil, pull) - case tapplied, taborted: - return nil - default: - panic(fmt.Errorf("transaction in unknown state: %q", t.State)) - } - } - panic("unreachable") -} - -type stash string - -const ( - stashStable stash = "" - stashInsert stash = "insert" - stashRemove stash = "remove" -) - -type txnInfo struct { - Queue []token `bson:"txn-queue"` - Revno int64 `bson:"txn-revno,omitempty"` - Insert bson.ObjectId `bson:"txn-insert,omitempty"` - Remove bson.ObjectId `bson:"txn-remove,omitempty"` -} - -type stashState string - -const ( - stashNew stashState = "" - stashInserting stashState = "inserting" -) - -var txnFields = bson.D{{"txn-queue", 1}, {"txn-revno", 1}, {"txn-remove", 1}, {"txn-insert", 1}} - -var errPreReqs = fmt.Errorf("transaction has pre-requisites and force is false") - -// prepare injects t's id onto txn-queue for all affected documents -// and collects the current txn-queue and txn-revno values during -// the process. If the prepared txn-queue indicates that there are -// pre-requisite transactions to be applied and the force parameter -// is false, errPreReqs will be returned. Otherwise, the current -// tip revision numbers for all the documents are returned. -func (f *flusher) prepare(t *transaction, force bool) (revnos []int64, err error) { - if t.State != tpreparing { - return f.rescan(t, force) - } - f.debugf("Preparing %s", t) - - // Iterate in a stable way across all runners. This isn't - // strictly required, but reduces the chances of cycles. - dkeys := t.docKeys() - sort.Sort(dkeys) - - revno := make(map[docKey]int64) - info := txnInfo{} - tt := tokenFor(t) -NextDoc: - for _, dkey := range dkeys { - change := mgo.Change{ - Update: bson.D{{"$addToSet", bson.D{{"txn-queue", tt}}}}, - ReturnNew: true, - } - c := f.tc.Database.C(dkey.C) - cquery := c.FindId(dkey.Id).Select(txnFields) - - RetryDoc: - change.Upsert = false - chaos("") - if _, err := cquery.Apply(change, &info); err == nil { - if info.Remove == "" { - // Fast path, unless workload is insert/remove heavy. - revno[dkey] = info.Revno - f.queue[dkey] = info.Queue - f.debugf("[A] Prepared document %v with revno %d and queue: %v", dkey, info.Revno, info.Queue) - continue NextDoc - } else { - // Handle remove in progress before preparing it. - if err := f.loadAndApply(info.Remove); err != nil { - return nil, err - } - goto RetryDoc - } - } else if err != mgo.ErrNotFound { - return nil, err - } - - // Document missing. Use stash collection. - change.Upsert = true - chaos("") - _, err := f.sc.FindId(dkey).Apply(change, &info) - if err != nil { - return nil, err - } - if info.Insert != "" { - // Handle insert in progress before preparing it. - if err := f.loadAndApply(info.Insert); err != nil { - return nil, err - } - goto RetryDoc - } - - // Must confirm stash is still in use and is the same one - // prepared, since applying a remove overwrites the stash. - docFound := false - stashFound := false - if err = c.FindId(dkey.Id).Select(txnFields).One(&info); err == nil { - docFound = true - } else if err != mgo.ErrNotFound { - return nil, err - } else if err = f.sc.FindId(dkey).One(&info); err == nil { - stashFound = true - if info.Revno == 0 { - // Missing revno in the stash only happens when it - // has been upserted, in which case it defaults to -1. - // Txn-inserted documents get revno -1 while in the stash - // for the first time, and -revno-1 == 2 when they go live. - info.Revno = -1 - } - } else if err != mgo.ErrNotFound { - return nil, err - } - - if docFound && info.Remove == "" || stashFound && info.Insert == "" { - for _, dtt := range info.Queue { - if dtt != tt { - continue - } - // Found tt properly prepared. - if stashFound { - f.debugf("[B] Prepared document %v on stash with revno %d and queue: %v", dkey, info.Revno, info.Queue) - } else { - f.debugf("[B] Prepared document %v with revno %d and queue: %v", dkey, info.Revno, info.Queue) - } - revno[dkey] = info.Revno - f.queue[dkey] = info.Queue - continue NextDoc - } - } - - // The stash wasn't valid and tt got overwriten. Try again. - f.unstashToken(tt, dkey) - goto RetryDoc - } - - // Save the prepared nonce onto t. - nonce := tt.nonce() - qdoc := bson.D{{"_id", t.Id}, {"s", tpreparing}} - udoc := bson.D{{"$set", bson.D{{"s", tprepared}, {"n", nonce}}}} - chaos("set-prepared") - err = f.tc.Update(qdoc, udoc) - if err == nil { - t.State = tprepared - t.Nonce = nonce - } else if err == mgo.ErrNotFound { - f.debugf("Can't save nonce of %s: LOST RACE", tt) - if err := f.reload(t); err != nil { - return nil, err - } else if t.State == tpreparing { - panic("can't save nonce yet transaction is still preparing") - } else if t.State != tprepared { - return t.Revnos, nil - } - tt = t.token() - } else if err != nil { - return nil, err - } - - prereqs, found := f.hasPreReqs(tt, dkeys) - if !found { - // Must only happen when reloading above. - return f.rescan(t, force) - } else if prereqs && !force { - f.debugf("Prepared queue with %s [has prereqs & not forced].", tt) - return nil, errPreReqs - } - for _, op := range t.Ops { - dkey := op.docKey() - revnos = append(revnos, revno[dkey]) - drevno := revno[dkey] - switch { - case op.Insert != nil && drevno < 0: - revno[dkey] = -drevno+1 - case op.Update != nil && drevno >= 0: - revno[dkey] = drevno+1 - case op.Remove && drevno >= 0: - revno[dkey] = -drevno-1 - } - } - if !prereqs { - f.debugf("Prepared queue with %s [no prereqs]. Revnos: %v", tt, revnos) - } else { - f.debugf("Prepared queue with %s [forced] Revnos: %v", tt, revnos) - } - return revnos, nil -} - -func (f *flusher) unstashToken(tt token, dkey docKey) error { - qdoc := bson.D{{"_id", dkey}, {"txn-queue", tt}} - udoc := bson.D{{"$pull", bson.D{{"txn-queue", tt}}}} - chaos("") - if err := f.sc.Update(qdoc, udoc); err == nil { - chaos("") - err = f.sc.Remove(bson.D{{"_id", dkey}, {"txn-queue", bson.D{}}}) - } else if err != mgo.ErrNotFound { - return err - } - return nil -} - -func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) { - f.debugf("Rescanning %s", t) - if t.State != tprepared { - panic(fmt.Errorf("rescanning transaction in invalid state: %q", t.State)) - } - - // Iterate in a stable way across all runners. This isn't - // strictly required, but reduces the chances of cycles. - dkeys := t.docKeys() - sort.Sort(dkeys) - - tt := t.token() - if !force { - prereqs, found := f.hasPreReqs(tt, dkeys) - if found && prereqs { - // Its state is already known. - return nil, errPreReqs - } - } - - revno := make(map[docKey]int64) - info := txnInfo{} - for _, dkey := range dkeys { - retry := 0 - - RetryDoc: - c := f.tc.Database.C(dkey.C) - if err := c.FindId(dkey.Id).Select(txnFields).One(&info); err == mgo.ErrNotFound { - // Document is missing. Look in stash. - if err := f.sc.FindId(dkey).One(&info); err == mgo.ErrNotFound { - // Stash also doesn't exist. Maybe someone applied it. - if err := f.reload(t); err != nil { - return nil, err - } else if t.State != tprepared { - return t.Revnos, err - } - // Not applying either. - retry++ - if retry < 3 { - // Retry since there might be an insert/remove race. - goto RetryDoc - } - // Neither the doc nor the stash seem to exist. - return nil, fmt.Errorf("cannot find document %v for applying transaction %s", dkey, t) - } else if err != nil { - return nil, err - } - // Stash found. - if info.Insert != "" { - // Handle insert in progress before assuming ordering is good. - if err := f.loadAndApply(info.Insert); err != nil { - return nil, err - } - goto RetryDoc - } - if info.Revno == 0 { - // Missing revno in the stash means -1. - info.Revno = -1 - } - } else if err != nil { - return nil, err - } else if info.Remove != "" { - // Handle remove in progress before assuming ordering is good. - if err := f.loadAndApply(info.Remove); err != nil { - return nil, err - } - goto RetryDoc - } - revno[dkey] = info.Revno - - found := false - for _, id := range info.Queue { - if id == tt { - found = true - break - } - } - f.queue[dkey] = info.Queue - if !found { - // Previously set txn-queue was popped by someone. - // Transaction is being/has been applied elsewhere. - f.debugf("Rescanned document %v misses %s in queue: %v", dkey, tt, info.Queue) - err := f.reload(t) - if t.State == tpreparing || t.State == tprepared { - panic("rescanned document misses transaction in queue") - } - return t.Revnos, err - } - } - - prereqs, found := f.hasPreReqs(tt, dkeys) - if !found { - panic("rescanning loop guarantees that this can't happen") - } else if prereqs && !force { - f.debugf("Rescanned queue with %s: has prereqs, not forced", tt) - return nil, errPreReqs - } - for _, op := range t.Ops { - dkey := op.docKey() - revnos = append(revnos, revno[dkey]) - if op.isChange() { - revno[dkey] += 1 - } - } - if !prereqs { - f.debugf("Rescanned queue with %s: no prereqs, revnos: %v", tt, revnos) - } else { - f.debugf("Rescanned queue with %s: has prereqs, forced, revnos: %v", tt, revnos) - } - return revnos, nil -} - -func (f *flusher) hasPreReqs(tt token, dkeys docKeys) (prereqs, found bool) { - found = true -NextDoc: - for _, dkey := range dkeys { - for _, dtt := range f.queue[dkey] { - if dtt == tt { - continue NextDoc - } else if dtt.id() != tt.id() { - prereqs = true - } - } - found = false - } - return -} - -func (f *flusher) reload(t *transaction) error { - var newt transaction - query := f.tc.FindId(t.Id) - query.Select(bson.D{{"s", 1}, {"n", 1}, {"r", 1}}) - if err := query.One(&newt); err != nil { - return fmt.Errorf("failed to reload transaction: %v", err) - } - t.State = newt.State - t.Nonce = newt.Nonce - t.Revnos = newt.Revnos - f.debugf("Reloaded %s: %q", t, t.State) - return nil -} - -func (f *flusher) loadAndApply(id bson.ObjectId) error { - t, err := f.load(id) - if err != nil { - return err - } - return f.advance(t, nil, true) -} - -// assert verifies that all assertions in t match the content that t -// will be applied upon. If an assertion fails, the transaction state -// is changed to aborted. -func (f *flusher) assert(t *transaction, revnos []int64, pull map[bson.ObjectId]*transaction) error { - f.debugf("Asserting %s with revnos %v", t, revnos) - if t.State != tprepared { - panic(fmt.Errorf("asserting transaction in invalid state: %q", t.State)) - } - qdoc := make(bson.D, 3) - revno := make(map[docKey]int64) - for i, op := range t.Ops { - dkey := op.docKey() - if _, ok := revno[dkey]; !ok { - revno[dkey] = revnos[i] - } - if op.Assert == nil { - continue - } - if op.Assert == DocMissing { - if revnos[i] >= 0 { - return f.abortOrReload(t, revnos, pull) - } - continue - } - if op.Insert != nil { - return fmt.Errorf("Insert can only Assert txn.DocMissing", op.Assert) - } - // if revnos[i] < 0 { abort }? - - qdoc = append(qdoc[:0], bson.DocElem{"_id", op.Id}) - if op.Assert != DocMissing { - var revnoq interface{} - if n := revno[dkey]; n == 0 { - revnoq = bson.D{{"$exists", false}} - } else { - revnoq = n - } - // XXX Add tt to the query here, once we're sure it's all working. - // Not having it increases the chances of breaking on bad logic. - qdoc = append(qdoc, bson.DocElem{"txn-revno", revnoq}) - if op.Assert != DocExists { - qdoc = append(qdoc, bson.DocElem{"$or", []interface{}{op.Assert}}) - } - } - - c := f.tc.Database.C(op.C) - if err := c.Find(qdoc).Select(bson.D{{"_id", 1}}).One(nil); err == mgo.ErrNotFound { - // Assertion failed or someone else started applying. - return f.abortOrReload(t, revnos, pull) - } else if err != nil { - return err - } - } - f.debugf("Asserting %s succeeded", t) - return nil -} - -func (f *flusher) abortOrReload(t *transaction, revnos []int64, pull map[bson.ObjectId]*transaction) (err error) { - f.debugf("Aborting or reloading %s (was %q)", t, t.State) - if t.State == tprepared { - qdoc := bson.D{{"_id", t.Id}, {"s", tprepared}} - udoc := bson.D{{"$set", bson.D{{"s", taborting}}}} - chaos("set-aborting") - if err = f.tc.Update(qdoc, udoc); err == nil { - t.State = taborting - } else if err == mgo.ErrNotFound { - if err = f.reload(t); err != nil || t.State != taborting { - f.debugf("Won't abort %s. Reloaded state: %q", t, t.State) - return err - } - } else { - return err - } - } else if t.State != taborting { - panic(fmt.Errorf("aborting transaction in invalid state: %q", t.State)) - } - - if len(revnos) > 0 { - if pull == nil { - pull = map[bson.ObjectId]*transaction{t.Id: t} - } - seen := make(map[docKey]bool) - for i, op := range t.Ops { - dkey := op.docKey() - if seen[op.docKey()] { - continue - } - seen[dkey] = true - - pullAll := tokensToPull(f.queue[dkey], pull, "") - if len(pullAll) == 0 { - continue - } - udoc := bson.D{{"$pullAll", bson.D{{"txn-queue", pullAll}}}} - chaos("") - if revnos[i] < 0 { - err = f.sc.UpdateId(dkey, udoc) - } else { - c := f.tc.Database.C(dkey.C) - err = c.UpdateId(dkey.Id, udoc) - } - if err != nil && err != mgo.ErrNotFound { - return err - } - } - } - udoc := bson.D{{"$set", bson.D{{"s", taborted}}}} - chaos("set-aborted") - if err := f.tc.UpdateId(t.Id, udoc); err != nil && err != mgo.ErrNotFound { - return err - } - t.State = taborted - f.debugf("Aborted %s", t) - return nil -} - -func (f *flusher) checkpoint(t *transaction, revnos []int64) error { - var debugRevnos map[docKey][]int64 - if debugEnabled { - debugRevnos = make(map[docKey][]int64) - for i, op := range t.Ops { - dkey := op.docKey() - debugRevnos[dkey] = append(debugRevnos[dkey], revnos[i]) - } - f.debugf("Ready to apply %s. Saving revnos %v", t, debugRevnos) - } - - // Save in t the txn-revno values the transaction must run on. - qdoc := bson.D{{"_id", t.Id}, {"s", tprepared}} - udoc := bson.D{{"$set", bson.D{{"s", tapplying}, {"r", revnos}}}} - chaos("set-applying") - err := f.tc.Update(qdoc, udoc) - if err == nil { - t.State = tapplying - t.Revnos = revnos - f.debugf("Ready to apply %s. Saving revnos %v: DONE", t, debugRevnos) - } else if err == mgo.ErrNotFound { - f.debugf("Ready to apply %s. Saving revnos %v: LOST RACE", t, debugRevnos) - return f.reload(t) - } - return nil -} - -func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) error { - f.debugf("Applying transaction %s", t) - if t.State != tapplying { - panic(fmt.Errorf("applying transaction in invalid state: %q", t.State)) - } - if pull == nil { - pull = map[bson.ObjectId]*transaction{t.Id: t} - } - - // Compute the operation in which t's id may be pulled - // out of txn-queue. That's on its last change, or the - // first assertion. - pullOp := make(map[docKey]int) - for i := range t.Ops { - op := &t.Ops[i] - dkey := op.docKey() - if _, ok := pullOp[dkey]; !ok || op.isChange() { - pullOp[dkey] = i - } - } - - logRevnos := append([]int64(nil), t.Revnos...) - logDoc := bson.D{{"_id", t.Id}} - - tt := tokenFor(t) - for i := range t.Ops { - op := &t.Ops[i] - dkey := op.docKey() - dqueue := f.queue[dkey] - revno := t.Revnos[i] - - var opName string - if debugEnabled { - opName = op.name() - f.debugf("Applying %s op %d (%s) on %v with txn-revno %d", t, i, opName, dkey, revno) - } - - c := f.tc.Database.C(op.C) - - qdoc := bson.D{{"_id", dkey.Id}, {"txn-revno", revno}, {"txn-queue", tt}} - if op.Insert != nil { - qdoc[0].Value = dkey - if revno == -1 { - qdoc[1].Value = bson.D{{"$exists", false}} - } - } else if revno == 0 { - // There's no document with revno 0. The only way to see it is - // when an existent document participates in a transaction the - // first time. Txn-inserted documents get revno -1 while in the - // stash for the first time, and -revno-1 == 2 when they go live. - qdoc[1].Value = bson.D{{"$exists", false}} - } - - dontPull := tt - isPullOp := pullOp[dkey] == i - if isPullOp { - dontPull = "" - } - pullAll := tokensToPull(dqueue, pull, dontPull) - - var d bson.D - var outcome string - var err error - switch { - case op.Update != nil: - if revno < 0 { - err = mgo.ErrNotFound - f.debugf("Won't try to apply update op; negative revision means the document is missing or stashed"); - } else { - newRevno := revno + 1 - logRevnos[i] = newRevno - if d, err = objToDoc(op.Update); err != nil { - return err - } - if d, err = addToDoc(d, "$pullAll", bson.D{{"txn-queue", pullAll}}); err != nil { - return err - } - if d, err = addToDoc(d, "$set", bson.D{{"txn-revno", newRevno}}); err != nil { - return err - } - chaos("") - err = c.Update(qdoc, d) - } - case op.Remove: - if revno < 0 { - err = mgo.ErrNotFound - } else { - newRevno := -revno - 1 - logRevnos[i] = newRevno - nonce := newNonce() - stash := txnInfo{} - change := mgo.Change{ - Update: bson.D{{"$push", bson.D{{"n", nonce}}}}, - Upsert: true, - ReturnNew: true, - } - if _, err = f.sc.FindId(dkey).Apply(change, &stash); err != nil { - return err - } - change = mgo.Change{ - Update: bson.D{{"$set", bson.D{{"txn-remove", t.Id}}}}, - ReturnNew: true, - } - var info txnInfo - if _, err = c.Find(qdoc).Apply(change, &info); err == nil { - // The document still exists so the stash previously - // observed was either out of date or necessarily - // contained the token being applied. - f.debugf("Marked document %v to be removed on revno %d with queue: %v", dkey, info.Revno, info.Queue) - updated := false - if !hasToken(stash.Queue, tt) { - var set, unset bson.D - if revno == 0 { - // Missing revno in stash means -1. - set = bson.D{{"txn-queue", info.Queue}} - unset = bson.D{{"n", 1}, {"txn-revno", 1}} - } else { - set = bson.D{{"txn-queue", info.Queue}, {"txn-revno", newRevno}} - unset = bson.D{{"n", 1}} - } - qdoc := bson.D{{"_id", dkey}, {"n", nonce}} - udoc := bson.D{{"$set", set}, {"$unset", unset}} - if err = f.sc.Update(qdoc, udoc); err == nil { - updated = true - } else if err != mgo.ErrNotFound { - return err - } - } - if updated { - f.debugf("Updated stash for document %v with revno %d and queue: %v", dkey, newRevno, info.Queue) - } else { - f.debugf("Stash for document %v was up-to-date", dkey) - } - err = c.Remove(qdoc) - } - } - case op.Insert != nil: - if revno >= 0 { - err = mgo.ErrNotFound - } else { - newRevno := -revno + 1 - logRevnos[i] = newRevno - if d, err = objToDoc(op.Insert); err != nil { - return err - } - change := mgo.Change{ - Update: bson.D{{"$set", bson.D{{"txn-insert", t.Id}}}}, - ReturnNew: true, - } - chaos("") - var info txnInfo - if _, err = f.sc.Find(qdoc).Apply(change, &info); err == nil { - f.debugf("Stash for document %v has revno %d and queue: %v", dkey, info.Revno, info.Queue) - d = setInDoc(d, bson.D{{"_id", op.Id}, {"txn-revno", newRevno}, {"txn-queue", info.Queue}}) - // Unlikely yet unfortunate race in here if this gets seriously - // delayed. If someone inserts+removes meanwhile, this will - // reinsert, and there's no way to avoid that while keeping the - // collection clean or compromising sharding. applyOps can solve - // the former, but it can't shard (SERVER-1439). - chaos("insert") - err = c.Insert(d) - if err == nil || mgo.IsDup(err) { - if err == nil { - f.debugf("New document %v inserted with revno %d and queue: %v", dkey, info.Revno, info.Queue) - } else { - f.debugf("Document %v already existed", dkey) - } - chaos("") - if err = f.sc.Remove(qdoc); err == nil { - f.debugf("Stash for document %v removed", dkey) - } - } - if pullOp[dkey] == i && len(pullAll) > 0 { - _ = f.sc.UpdateId(dkey, bson.D{{"$pullAll", bson.D{{"txn-queue", pullAll}}}}) - } - } - } - case op.Assert != nil: - // TODO pullAll if pullOp[dkey] == i - } - if err == nil { - outcome = "DONE" - } else if err == mgo.ErrNotFound || mgo.IsDup(err) { - outcome = "MISS" - err = nil - } else { - outcome = err.Error() - } - if debugEnabled { - f.debugf("Applying %s op %d (%s) on %v with txn-revno %d: %s", t, i, opName, dkey, revno, outcome) - } - if err != nil { - return err - } - - if f.lc != nil && op.isChange() { - // Add change to the log document. - var dr bson.D - for li := range logDoc { - elem := &logDoc[li] - if elem.Name == op.C { - dr = elem.Value.(bson.D) - break - } - } - if dr == nil { - logDoc = append(logDoc, bson.DocElem{op.C, bson.D{{"d", []interface{}{}}, {"r", []int64{}}}}) - dr = logDoc[len(logDoc)-1].Value.(bson.D) - } - dr[0].Value = append(dr[0].Value.([]interface{}), op.Id) - dr[1].Value = append(dr[1].Value.([]int64), logRevnos[i]) - } - } - t.State = tapplied - - if f.lc != nil { - // Insert log document into the changelog collection. - f.debugf("Inserting %s into change log", t) - err := f.lc.Insert(logDoc) - if err != nil && !mgo.IsDup(err) { - return err - } - } - - // It's been applied, so errors are ignored here. It's fine for someone - // else to win the race and mark it as applied, and it's also fine for - // it to remain pending until a later point when someone will perceive - // it has been applied and mark it at such. - f.debugf("Marking %s as applied", t) - chaos("set-applied") - f.tc.Update(bson.D{{"_id", t.Id}, {"s", tapplying}}, bson.D{{"$set", bson.D{{"s", tapplied}}}}) - return nil -} - -func tokensToPull(dqueue []token, pull map[bson.ObjectId]*transaction, dontPull token) []token { - var result []token - for j := len(dqueue) - 1; j >= 0; j-- { - dtt := dqueue[j] - if dt, ok := pull[dtt.id()]; ok { - if dt.Nonce == dtt.nonce() { - // It's valid and is being pulled out, so everything - // preceding it must have been handled already. - if dtt == dontPull { - // Not time to pull this one out yet. - j-- - } - result = append(result, dqueue[:j+1]...) - break - } - // It was handled before and this is a leftover invalid - // nonce in the queue. Cherry-pick it out. - result = append(result, dtt) - } - } - return result -} - -func objToDoc(obj interface{}) (d bson.D, err error) { - data, err := bson.Marshal(obj) - if err != nil { - return nil, err - } - err = bson.Unmarshal(data, &d) - if err != nil { - return nil, err - } - return d, err -} - -func addToDoc(doc bson.D, key string, add bson.D) (bson.D, error) { - for i := range doc { - elem := &doc[i] - if elem.Name != key { - continue - } - if old, ok := elem.Value.(bson.D); ok { - elem.Value = append(old, add...) - return doc, nil - } else { - return nil, fmt.Errorf("invalid %q value in change document: %#v", key, elem.Value) - } - } - return append(doc, bson.DocElem{key, add}), nil -} - -func setInDoc(doc bson.D, set bson.D) bson.D { - dlen := len(doc) -NextS: - for s := range set { - sname := set[s].Name - for d := 0; d < dlen; d++ { - if doc[d].Name == sname { - doc[d].Value = set[s].Value - continue NextS - } - } - doc = append(doc, set[s]) - } - return doc -} - -func hasToken(tokens []token, tt token) bool { - for _, ttt := range tokens { - if ttt == tt { - return true - } - } - return false -} - -func (f *flusher) debugf(format string, args ...interface{}) { - if !debugEnabled { - return - } - debugf(f.debugId+format, args...) -} diff --git a/vendor/labix.org/v2/mgo/txn/mgo_test.go b/vendor/labix.org/v2/mgo/txn/mgo_test.go deleted file mode 100644 index ce5d9d0ec..000000000 --- a/vendor/labix.org/v2/mgo/txn/mgo_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package txn_test - -import ( - "bytes" - "labix.org/v2/mgo" - . "launchpad.net/gocheck" - "os/exec" - "time" -) - -// ---------------------------------------------------------------------------- -// The mgo test suite - -type MgoSuite struct { - output bytes.Buffer - server *exec.Cmd - session *mgo.Session -} - -var mgoaddr = "127.0.0.1:50017" - -func (s *MgoSuite) SetUpSuite(c *C) { - //mgo.SetDebug(true) - mgo.SetStats(true) - dbdir := c.MkDir() - args := []string{ - "--dbpath", dbdir, - "--bind_ip", "127.0.0.1", - "--port", "50017", - "--nssize", "1", - "--noprealloc", - "--smallfiles", - "--nojournal", - "-vvvvv", - } - s.server = exec.Command("mongod", args...) - s.server.Stdout = &s.output - s.server.Stderr = &s.output - err := s.server.Start() - if err != nil { - panic(err) - } -} - -func (s *MgoSuite) TearDownSuite(c *C) { - s.server.Process.Kill() - s.server.Process.Wait() -} - -func (s *MgoSuite) SetUpTest(c *C) { - err := DropAll(mgoaddr) - if err != nil { - panic(err) - } - mgo.SetLogger(c) - mgo.ResetStats() - - s.session, err = mgo.Dial(mgoaddr) - c.Assert(err, IsNil) -} - -func (s *MgoSuite) TearDownTest(c *C) { - if s.session != nil { - s.session.Close() - } - for i := 0; ; i++ { - stats := mgo.GetStats() - if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { - break - } - if i == 20 { - c.Fatal("Test left sockets in a dirty state") - } - c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive) - time.Sleep(500 * time.Millisecond) - } -} - -func DropAll(mongourl string) (err error) { - session, err := mgo.Dial(mongourl) - if err != nil { - return err - } - defer session.Close() - - names, err := session.DatabaseNames() - if err != nil { - return err - } - for _, name := range names { - switch name { - case "admin", "local", "config": - default: - err = session.DB(name).DropDatabase() - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/labix.org/v2/mgo/txn/sim_test.go b/vendor/labix.org/v2/mgo/txn/sim_test.go deleted file mode 100644 index 312eed999..000000000 --- a/vendor/labix.org/v2/mgo/txn/sim_test.go +++ /dev/null @@ -1,389 +0,0 @@ -package txn_test - -import ( - "flag" - "labix.org/v2/mgo" - "labix.org/v2/mgo/bson" - "labix.org/v2/mgo/txn" - . "launchpad.net/gocheck" - "math/rand" - "time" -) - -var ( - duration = flag.Duration("duration", 200*time.Millisecond, "duration for each simulation") - seed = flag.Int64("seed", 0, "seed for rand") -) - -type params struct { - killChance float64 - slowdownChance float64 - slowdown time.Duration - - unsafe bool - workers int - accounts int - changeHalf bool - reinsertCopy bool - reinsertZeroed bool - changelog bool - - changes int -} - -func (s *S) TestSim1Worker(c *C) { - simulate(c, params{ - workers: 1, - accounts: 4, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSim4WorkersDense(c *C) { - simulate(c, params{ - workers: 4, - accounts: 2, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSim4WorkersSparse(c *C) { - simulate(c, params{ - workers: 4, - accounts: 10, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSimHalf1Worker(c *C) { - simulate(c, params{ - workers: 1, - accounts: 4, - changeHalf: true, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSimHalf4WorkersDense(c *C) { - simulate(c, params{ - workers: 4, - accounts: 2, - changeHalf: true, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSimHalf4WorkersSparse(c *C) { - simulate(c, params{ - workers: 4, - accounts: 10, - changeHalf: true, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSimReinsertCopy1Worker(c *C) { - simulate(c, params{ - workers: 1, - accounts: 10, - reinsertCopy: true, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSimReinsertCopy4Workers(c *C) { - simulate(c, params{ - workers: 4, - accounts: 10, - reinsertCopy: true, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSimReinsertZeroed1Worker(c *C) { - simulate(c, params{ - workers: 1, - accounts: 10, - reinsertZeroed: true, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSimReinsertZeroed4Workers(c *C) { - simulate(c, params{ - workers: 4, - accounts: 10, - reinsertZeroed: true, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - }) -} - -func (s *S) TestSimChangeLog(c *C) { - simulate(c, params{ - workers: 4, - accounts: 10, - killChance: 0.01, - slowdownChance: 0.3, - slowdown: 100 * time.Millisecond, - changelog: true, - }) -} - - -type balanceChange struct { - id bson.ObjectId - origin int - target int - amount int -} - -func simulate(c *C, params params) { - seed := *seed - if seed == 0 { - seed = time.Now().UnixNano() - } - rand.Seed(seed) - c.Logf("Seed: %v", seed) - - txn.SetChaos(txn.Chaos{ - KillChance: params.killChance, - SlowdownChance: params.slowdownChance, - Slowdown: params.slowdown, - }) - defer txn.SetChaos(txn.Chaos{}) - - session, err := mgo.Dial(mgoaddr) - c.Assert(err, IsNil) - defer session.Close() - - db := session.DB("test") - tc := db.C("tc") - - runner := txn.NewRunner(tc) - - tclog := db.C("tc.log") - if params.changelog { - info := mgo.CollectionInfo{ - Capped: true, - MaxBytes: 1000000, - } - err := tclog.Create(&info) - c.Assert(err, IsNil) - runner.ChangeLog(tclog) - } - - accounts := db.C("accounts") - for i := 0; i < params.accounts; i++ { - err := accounts.Insert(M{"_id": i, "balance": 300}) - c.Assert(err, IsNil) - } - var stop time.Time - if params.changes <= 0 { - stop = time.Now().Add(*duration) - } - - max := params.accounts - if params.reinsertCopy || params.reinsertZeroed { - max = int(float64(params.accounts) * 1.5) - } - - changes := make(chan balanceChange, 1024) - - //session.SetMode(mgo.Eventual, true) - for i := 0; i < params.workers; i++ { - go func() { - n := 0 - for { - if n > 0 && n == params.changes { - break - } - if !stop.IsZero() && time.Now().After(stop) { - break - } - - change := balanceChange{ - id: bson.NewObjectId(), - origin: rand.Intn(max), - target: rand.Intn(max), - amount: 100, - } - - var old Account - var oldExists bool - if params.reinsertCopy || params.reinsertZeroed { - if err := accounts.FindId(change.origin).One(&old); err != mgo.ErrNotFound { - c.Check(err, IsNil) - change.amount = old.Balance - oldExists = true - } - } - - var ops []txn.Op - switch { - case params.reinsertCopy && oldExists: - ops = []txn.Op{{ - C: "accounts", - Id: change.origin, - Assert: M{"balance": change.amount}, - Remove: true, - }, { - C: "accounts", - Id: change.target, - Assert: txn.DocMissing, - Insert: M{"balance": change.amount}, - }} - case params.reinsertZeroed && oldExists: - ops = []txn.Op{{ - C: "accounts", - Id: change.target, - Assert: txn.DocMissing, - Insert: M{"balance": 0}, - }, { - C: "accounts", - Id: change.origin, - Assert: M{"balance": change.amount}, - Remove: true, - }, { - C: "accounts", - Id: change.target, - Assert: txn.DocExists, - Update: M{"$inc": M{"balance": change.amount}}, - }} - case params.changeHalf: - ops = []txn.Op{{ - C: "accounts", - Id: change.origin, - Assert: M{"balance": M{"$gte": change.amount}}, - Update: M{"$inc": M{"balance": -change.amount / 2}}, - }, { - C: "accounts", - Id: change.target, - Assert: txn.DocExists, - Update: M{"$inc": M{"balance": change.amount / 2}}, - }, { - C: "accounts", - Id: change.origin, - Update: M{"$inc": M{"balance": -change.amount / 2}}, - }, { - C: "accounts", - Id: change.target, - Update: M{"$inc": M{"balance": change.amount / 2}}, - }} - default: - ops = []txn.Op{{ - C: "accounts", - Id: change.origin, - Assert: M{"balance": M{"$gte": change.amount}}, - Update: M{"$inc": M{"balance": -change.amount}}, - }, { - C: "accounts", - Id: change.target, - Assert: txn.DocExists, - Update: M{"$inc": M{"balance": change.amount}}, - }} - } - - err = runner.Run(ops, change.id, nil) - if err != nil && err != txn.ErrAborted && err != txn.ErrChaos { - c.Check(err, IsNil) - } - n++ - changes <- change - } - changes <- balanceChange{} - }() - } - - alive := params.workers - changeLog := make([]balanceChange, 0, 1024) - for alive > 0 { - change := <-changes - if change.id == "" { - alive-- - } else { - changeLog = append(changeLog, change) - } - } - c.Check(len(changeLog), Not(Equals), 0, Commentf("No operations were even attempted.")) - - txn.SetChaos(txn.Chaos{}) - err = runner.ResumeAll() - c.Assert(err, IsNil) - - n, err := accounts.Count() - c.Check(err, IsNil) - c.Check(n, Equals, params.accounts, Commentf("Number of accounts has changed.")) - - n, err = accounts.Find(M{"balance": M{"$lt": 0}}).Count() - c.Check(err, IsNil) - c.Check(n, Equals, 0, Commentf("There are %d accounts with negative balance.", n)) - - globalBalance := 0 - iter := accounts.Find(nil).Iter() - account := Account{} - for iter.Next(&account) { - globalBalance += account.Balance - } - c.Check(iter.Close(), IsNil) - c.Check(globalBalance, Equals, params.accounts*300, Commentf("Total amount of money should be constant.")) - - // Compute and verify the exact final state of all accounts. - balance := make(map[int]int) - for i := 0; i < params.accounts; i++ { - balance[i] += 300 - } - var applied, aborted int - for _, change := range changeLog { - err := runner.Resume(change.id) - if err == txn.ErrAborted { - aborted++ - continue - } else if err != nil { - c.Fatalf("resuming %s failed: %v", change.id, err) - } - balance[change.origin] -= change.amount - balance[change.target] += change.amount - applied++ - } - iter = accounts.Find(nil).Iter() - for iter.Next(&account) { - c.Assert(account.Balance, Equals, balance[account.Id]) - } - c.Check(iter.Close(), IsNil) - c.Logf("Total transactions: %d (%d applied, %d aborted)", len(changeLog), applied, aborted) - - if params.changelog { - n, err := tclog.Count() - c.Assert(err, IsNil) - // Check if the capped collection is full. - dummy := make([]byte, 1024) - tclog.Insert(M{"_id": bson.NewObjectId(), "dummy": dummy}) - m, err := tclog.Count() - c.Assert(err, IsNil) - if m == n+1 { - // Wasn't full, so it must have seen it all. - c.Assert(err, IsNil) - c.Assert(n, Equals, applied) - } - } -} diff --git a/vendor/labix.org/v2/mgo/txn/tarjan.go b/vendor/labix.org/v2/mgo/txn/tarjan.go deleted file mode 100644 index a5ac0b02b..000000000 --- a/vendor/labix.org/v2/mgo/txn/tarjan.go +++ /dev/null @@ -1,96 +0,0 @@ -package txn - -import ( - "labix.org/v2/mgo/bson" - "sort" -) - -func tarjanSort(successors map[bson.ObjectId][]bson.ObjectId) [][]bson.ObjectId { - // http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm - data := &tarjanData{ - successors: successors, - nodes: make([]tarjanNode, 0, len(successors)), - index: make(map[bson.ObjectId]int, len(successors)), - } - - // Sort all nodes to stabilize the logic. - var all []string - for id := range successors { - all = append(all, string(id)) - } - sort.Strings(all) - for _, strid := range all { - id := bson.ObjectId(strid) - if _, seen := data.index[id]; !seen { - data.strongConnect(id) - } - } - return data.output -} - -type tarjanData struct { - successors map[bson.ObjectId][]bson.ObjectId - output [][]bson.ObjectId - - nodes []tarjanNode - stack []bson.ObjectId - index map[bson.ObjectId]int -} - -type tarjanNode struct { - lowlink int - stacked bool -} - -type idList []bson.ObjectId - -func (l idList) Len() int { return len(l) } -func (l idList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l idList) Less(i, j int) bool { return l[i] < l[j] } - -func (data *tarjanData) strongConnect(id bson.ObjectId) *tarjanNode { - index := len(data.nodes) - data.index[id] = index - data.stack = append(data.stack, id) - data.nodes = append(data.nodes, tarjanNode{index, true}) - node := &data.nodes[index] - - // Sort to stabilize the algorithm. - succids := idList(data.successors[id]) - sort.Sort(succids) - for _, succid := range succids { - succindex, seen := data.index[succid] - if !seen { - succnode := data.strongConnect(succid) - if succnode.lowlink < node.lowlink { - node.lowlink = succnode.lowlink - } - } else if data.nodes[succindex].stacked { - // Part of the current strongly-connected component. - if succindex < node.lowlink { - node.lowlink = succindex - } - } - } - - if node.lowlink == index { - // Root node; pop stack and output new - // strongly-connected component. - var scc []bson.ObjectId - i := len(data.stack) - 1 - for { - stackid := data.stack[i] - stackindex := data.index[stackid] - data.nodes[stackindex].stacked = false - scc = append(scc, stackid) - if stackindex == index { - break - } - i-- - } - data.stack = data.stack[:i] - data.output = append(data.output, scc) - } - - return node -} diff --git a/vendor/labix.org/v2/mgo/txn/tarjan_test.go b/vendor/labix.org/v2/mgo/txn/tarjan_test.go deleted file mode 100644 index c422605cf..000000000 --- a/vendor/labix.org/v2/mgo/txn/tarjan_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package txn - -import ( - "fmt" - "labix.org/v2/mgo/bson" - . "launchpad.net/gocheck" -) - -type TarjanSuite struct{} - -var _ = Suite(TarjanSuite{}) - -func bid(n int) bson.ObjectId { - return bson.ObjectId(fmt.Sprintf("%024d", n)) -} - -func bids(ns ...int) (ids []bson.ObjectId) { - for _, n := range ns { - ids = append(ids, bid(n)) - } - return -} - -func (TarjanSuite) TestExample(c *C) { - successors := map[bson.ObjectId][]bson.ObjectId{ - bid(1): bids(2), - bid(2): bids(1, 5), - bid(3): bids(4), - bid(4): bids(3, 5), - bid(5): bids(6), - bid(6): bids(7), - bid(7): bids(8), - bid(8): bids(6, 9), - bid(9): bids(), - } - - c.Assert(tarjanSort(successors), DeepEquals, [][]bson.ObjectId{ - bids(9), - bids(8, 7, 6), - bids(5), - bids(2, 1), - bids(4, 3), - }) -} diff --git a/vendor/labix.org/v2/mgo/txn/txn.go b/vendor/labix.org/v2/mgo/txn/txn.go deleted file mode 100644 index e798e65c2..000000000 --- a/vendor/labix.org/v2/mgo/txn/txn.go +++ /dev/null @@ -1,518 +0,0 @@ -// The txn package implements support for multi-document transactions. -// -// For details check the following blog post: -// -// http://blog.labix.org/2012/08/22/multi-doc-transactions-for-mongodb -// -package txn - -import ( - "encoding/binary" - "fmt" - "labix.org/v2/mgo" - "labix.org/v2/mgo/bson" - "reflect" - "sort" - "sync" - - crand "crypto/rand" - mrand "math/rand" -) - -type state int - -const ( - tpreparing state = 1 // One or more documents not prepared - tprepared state = 2 // Prepared but not yet ready to run - taborting state = 3 // Assertions failed, cleaning up - tapplying state = 4 // Changes are in progress - taborted state = 5 // Pre-conditions failed, nothing done - tapplied state = 6 // All changes applied -) - -func (s state) String() string { - switch s { - case tpreparing: - return "preparing" - case tprepared: - return "prepared" - case taborting: - return "aborting" - case tapplying: - return "applying" - case taborted: - return "aborted" - case tapplied: - return "applied" - } - panic(fmt.Errorf("unknown state: %d", s)) -} - -var rand *mrand.Rand -var randmu sync.Mutex - -func init() { - var seed int64 - err := binary.Read(crand.Reader, binary.BigEndian, &seed) - if err != nil { - panic(err) - } - rand = mrand.New(mrand.NewSource(seed)) -} - -type transaction struct { - Id bson.ObjectId `bson:"_id"` - State state `bson:"s"` - Info interface{} `bson:"i,omitempty"` - Ops []Op `bson:"o"` - Nonce string `bson:"n,omitempty"` - Revnos []int64 `bson:"r,omitempty"` - - docKeysCached docKeys -} - -func (t *transaction) String() string { - if t.Nonce == "" { - return t.Id.Hex() - } - return string(t.token()) -} - -func (t *transaction) done() bool { - return t.State == tapplied || t.State == taborted -} - -func (t *transaction) token() token { - if t.Nonce == "" { - panic("transaction has no nonce") - } - return tokenFor(t) -} - -func (t *transaction) docKeys() docKeys { - if t.docKeysCached != nil { - return t.docKeysCached - } - dkeys := make(docKeys, 0, len(t.Ops)) -NextOp: - for _, op := range t.Ops { - dkey := op.docKey() - for i := range dkeys { - if dkey == dkeys[i] { - continue NextOp - } - } - dkeys = append(dkeys, dkey) - } - sort.Sort(dkeys) - t.docKeysCached = dkeys - return dkeys -} - -// tokenFor returns a unique transaction token that -// is composed by t's id and a nonce. If t already has -// a nonce assigned to it, it will be used, otherwise -// a new nonce will be generated. -func tokenFor(t *transaction) token { - nonce := t.Nonce - if nonce == "" { - nonce = newNonce() - } - return token(t.Id.Hex() + "_" + nonce) -} - -func newNonce() string { - randmu.Lock() - r := rand.Uint32() - randmu.Unlock() - n := make([]byte, 8) - for i := uint(0); i < 8; i++ { - n[i] = "0123456789abcdef"[(r>>(4*i))&0xf] - } - return string(n) -} - -type token string - -func (tt token) id() bson.ObjectId { return bson.ObjectIdHex(string(tt[:24])) } -func (tt token) nonce() string { return string(tt[25:]) } - -// Op represents an operation to a single document that may be -// applied as part of a transaction with other operations. -type Op struct { - // C and Id identify the collection and document this operation - // refers to. Id is matched against the "_id" document field. - C string `bson:"c"` - Id interface{} `bson:"d"` - - // Assert optionally holds a query document that is used to - // test the operation document at the time the transaction is - // going to be applied. The assertions for all operations in - // a transaction are tested before any changes take place, - // and the transaction is entirely aborted if any of them - // fails. This is also the only way to prevent a transaction - // from being being applied (the transaction continues despite - // the outcome of Insert, Update, and Remove). - Assert interface{} `bson:"a,omitempty"` - - // The Insert, Update and Remove fields describe the mutation - // intended by the operation. At most one of them may be set - // per operation. If none are set, Assert must be set and the - // operation becomes a read-only test. - // - // Insert holds the document to be inserted at the time the - // transaction is applied. The Id field will be inserted - // into the document automatically as its _id field. The - // transaction will continue even if the document already - // exists. Use Assert with txn.DocMissing if the insertion is - // required. - // - // Update holds the update document to be applied at the time - // the transaction is applied. The transaction will continue - // even if a document with Id is missing. Use Assert to - // test for the document presence or its contents. - // - // Remove indicates whether to remove the document with Id. - // The transaction continues even if the document doesn't yet - // exist at the time the transaction is applied. Use Assert - // with txn.DocExists to make sure it will be removed. - Insert interface{} `bson:"i,omitempty"` - Update interface{} `bson:"u,omitempty"` - Remove bool `bson:"r,omitempty"` -} - -func (op *Op) isChange() bool { - return op.Update != nil || op.Insert != nil || op.Remove -} - -func (op *Op) docKey() docKey { - return docKey{op.C, op.Id} -} - -func (op *Op) name() string { - switch { - case op.Update != nil: - return "update" - case op.Insert != nil: - return "insert" - case op.Remove: - return "remove" - case op.Assert != nil: - return "assert" - } - return "none" -} - -const ( - // DocExists and DocMissing may be used on an operation's - // Assert value to assert that the document with the given - // Id exists or does not exist, respectively. - DocExists = "d+" - DocMissing = "d-" -) - -// A Runner applies operations as part of a transaction onto any number -// of collections within a database. See the Run method for details. -type Runner struct { - tc *mgo.Collection // txns - sc *mgo.Collection // stash - lc *mgo.Collection // log -} - -// NewRunner returns a new transaction runner that uses tc to hold its -// transactions. -// -// Multiple transaction collections may exist in a single database, but -// all collections that are touched by operations in a given transaction -// collection must be handled exclusively by it. -// -// A second collection with the same name of tc but suffixed by ".stash" -// will be used for implementing the transactional behavior of insert -// and remove operations. -func NewRunner(tc *mgo.Collection) *Runner { - return &Runner{tc, tc.Database.C(tc.Name + ".stash"), nil} -} - -var ErrAborted = fmt.Errorf("transaction aborted") - -// Run creates a new transaction with ops and runs it immediately. -// The id parameter specifies the transaction id, and may be written -// down ahead of time to later verify the success of the change and -// resume it, when the procedure is interrupted for any reason. If -// empty, a random id will be generated. -// The info parameter, if not nil, is included under the "i" -// field of the transaction document. -// -// Operations across documents are not atomically applied, but are -// guaranteed to be eventually all applied in the order provided or -// all aborted, as long as the affected documents are only modified -// through transactions. If documents are simultaneously modified -// by transactions and out of transactions the behavior is undefined. -// -// If Run returns no errors, all operations were applied successfully. -// If it returns ErrAborted, one or more operations can't be applied -// and the transaction was entirely aborted with no changes performed. -// Otherwise, if the transaction is interrupted while running for any -// reason, it may be resumed explicitly or by attempting to apply -// another transaction on any of the documents targeted by ops, as -// long as the interruption was made after the transaction document -// itself was inserted. Run Resume with the obtained transaction id -// to confirm whether the transaction was applied or not. -// -// Any number of transactions may be run concurrently, with one -// runner or many. -func (r *Runner) Run(ops []Op, id bson.ObjectId, info interface{}) (err error) { - const efmt = "error in transaction op %d: %s" - for i := range ops { - op := &ops[i] - if op.C == "" || op.Id == nil { - return fmt.Errorf(efmt, i, "C or Id missing") - } - changes := 0 - if op.Insert != nil { - changes++ - } - if op.Update != nil { - changes++ - } - if op.Remove { - changes++ - } - if changes > 1 { - return fmt.Errorf(efmt, i, "more than one of Insert/Update/Remove set") - } - if changes == 0 && op.Assert == nil { - return fmt.Errorf(efmt, i, "none of Assert/Insert/Update/Remove set") - } - } - if id == "" { - id = bson.NewObjectId() - } - - // Insert transaction sooner rather than later, to stay on the safer side. - t := transaction{ - Id: id, - Ops: ops, - State: tpreparing, - Info: info, - } - if err = r.tc.Insert(&t); err != nil { - return err - } - if err = flush(r, &t); err != nil { - return err - } - if t.State == taborted { - return ErrAborted - } else if t.State != tapplied { - panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State)) - } - return nil -} - -// ResumeAll resumes all pending transactions. All ErrAborted errors -// from individual transactions are ignored. -func (r *Runner) ResumeAll() (err error) { - debugf("Resuming all unfinished transactions") - iter := r.tc.Find(bson.D{{"s", bson.D{{"$in", []state{tpreparing, tprepared, tapplying}}}}}).Iter() - var t transaction - for iter.Next(&t) { - if t.State == tapplied || t.State == taborted { - continue - } - debugf("Resuming %s from %q", t.Id, t.State) - if err := flush(r, &t); err != nil { - return err - } - if !t.done() { - panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State)) - } - } - return nil -} - -// Resume resumes the transaction with id. It returns mgo.ErrNotFound -// if the transaction is not found. Otherwise, it has the same semantics -// of the Run method after the transaction is inserted. -func (r *Runner) Resume(id bson.ObjectId) (err error) { - t, err := r.load(id) - if err != nil { - return err - } - if !t.done() { - debugf("Resuming %s from %q", t, t.State) - if err := flush(r, t); err != nil { - return err - } - } - if t.State == taborted { - return ErrAborted - } else if t.State != tapplied { - panic(fmt.Errorf("invalid state for %s after flush: %q", t, t.State)) - } - return nil -} - -// ChangeLog enables logging of changes to the given collection -// every time a transaction that modifies content is done being -// applied. -// -// Saved documents are in the format: -// -// {"_id": , : {"d": [, ...], "r": [, ...]}} -// -// The document revision is the value of the txn-revno field after -// the change has been applied. Negative values indicate the document -// was not present in the collection. Revisions will not change when -// updates or removes are applied to missing documents or inserts are -// attempted when the document isn't present. -func (r *Runner) ChangeLog(logc *mgo.Collection) { - r.lc = logc -} - -// PurgeMissing removes from collections any state that refers to transaction -// documents that for whatever reason have been lost from the system (removed -// by accident or lost in a hard crash, for example). -// -// This method should very rarely be needed, if at all, and should never be -// used during the normal operation of an application. Its purpose is to put -// a system that has seen unavoidable corruption back in a working state. -func (r *Runner) PurgeMissing(collections ...string) error { - type M map[string]interface{} - type S []interface{} - pipeline := []M{ - {"$project": M{"_id": 1, "txn-queue": 1}}, - {"$unwind": "$txn-queue"}, - {"$sort": M{"_id": 1, "txn-queue": 1}}, - //{"$group": M{"_id": M{"$substr": S{"$txn-queue", 0, 24}}, "docids": M{"$push": "$_id"}}}, - } - - type TRef struct { - DocId interface{} "_id" - TxnId string "txn-queue" - } - - found := make(map[bson.ObjectId]bool) - colls := make(map[string]bool) - - sort.Strings(collections) - for _, collection := range collections { - c := r.tc.Database.C(collection) - iter := c.Pipe(pipeline).Iter() - var tref TRef - for iter.Next(&tref) { - txnId := bson.ObjectIdHex(tref.TxnId[:24]) - if found[txnId] { - continue - } - if r.tc.FindId(txnId).One(nil) == nil { - found[txnId] = true - continue - } - logf("WARNING: purging from document %s/%v the missing transaction id %s", collection, tref.DocId, txnId) - err := c.UpdateId(tref.DocId, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}}) - if err != nil { - return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err) - } - } - colls[collection] = true - } - - type StashTRef struct { - Id docKey "_id" - TxnId string "txn-queue" - } - - iter := r.sc.Pipe(pipeline).Iter() - var stref StashTRef - for iter.Next(&stref) { - txnId := bson.ObjectIdHex(stref.TxnId[:24]) - if found[txnId] { - continue - } - if r.tc.FindId(txnId).One(nil) == nil { - found[txnId] = true - continue - } - logf("WARNING: purging from stash document %s/%v the missing transaction id %s", stref.Id.C, stref.Id.Id, txnId) - err := r.sc.UpdateId(stref.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}}) - if err != nil { - return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err) - } - } - - return nil -} - -func (r *Runner) load(id bson.ObjectId) (*transaction, error) { - var t transaction - err := r.tc.FindId(id).One(&t) - if err == mgo.ErrNotFound { - return nil, fmt.Errorf("cannot find transaction %s", id) - } else if err != nil { - return nil, err - } - return &t, nil -} - -type docKey struct { - C string - Id interface{} -} - -type docKeys []docKey - -func (ks docKeys) Len() int { return len(ks) } -func (ks docKeys) Swap(i, j int) { ks[i], ks[j] = ks[j], ks[i] } -func (ks docKeys) Less(i, j int) bool { - a, b := ks[i], ks[j] - if a.C != b.C { - return a.C < b.C - } - av, an := valueNature(a.Id) - bv, bn := valueNature(b.Id) - if an != bn { - return an < bn - } - switch an { - case natureString: - return av.(string) < bv.(string) - case natureInt: - return av.(int64) < bv.(int64) - case natureFloat: - return av.(float64) < bv.(float64) - case natureBool: - return !av.(bool) && bv.(bool) - } - panic("unreachable") -} - -type typeNature int - -const ( - // The order of these values matters. Transactions - // from applications using different ordering will - // be incompatible with each other. - _ typeNature = iota - natureString - natureInt - natureFloat - natureBool -) - -func valueNature(v interface{}) (value interface{}, nature typeNature) { - rv := reflect.ValueOf(v) - switch rv.Kind() { - case reflect.String: - return rv.String(), natureString - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return rv.Int(), natureInt - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return int64(rv.Uint()), natureInt - case reflect.Float32, reflect.Float64: - return rv.Float(), natureFloat - case reflect.Bool: - return rv.Bool(), natureBool - } - panic("document id type unsupported by txn: " + rv.Kind().String()) -} diff --git a/vendor/labix.org/v2/mgo/txn/txn_test.go b/vendor/labix.org/v2/mgo/txn/txn_test.go deleted file mode 100644 index b716783d2..000000000 --- a/vendor/labix.org/v2/mgo/txn/txn_test.go +++ /dev/null @@ -1,521 +0,0 @@ -package txn_test - -import ( - "labix.org/v2/mgo" - "labix.org/v2/mgo/bson" - "labix.org/v2/mgo/txn" - . "launchpad.net/gocheck" - "testing" -) - -func TestAll(t *testing.T) { - TestingT(t) -} - -type S struct { - MgoSuite - - db *mgo.Database - tc, sc *mgo.Collection - accounts *mgo.Collection - runner *txn.Runner -} - -var _ = Suite(&S{}) - -type M map[string]interface{} - -func (s *S) SetUpTest(c *C) { - txn.SetChaos(txn.Chaos{}) - txn.SetLogger(c) - txn.SetDebug(true) - s.MgoSuite.SetUpTest(c) - - s.db = s.session.DB("test") - s.tc = s.db.C("tc") - s.sc = s.db.C("tc.stash") - s.accounts = s.db.C("accounts") - s.runner = txn.NewRunner(s.tc) -} - -type Account struct { - Id int `bson:"_id"` - Balance int -} - -func (s *S) TestDocExists(c *C) { - err := s.accounts.Insert(M{"_id": 0, "balance": 300}) - c.Assert(err, IsNil) - - exists := []txn.Op{{ - C: "accounts", - Id: 0, - Assert: txn.DocExists, - }} - missing := []txn.Op{{ - C: "accounts", - Id: 0, - Assert: txn.DocMissing, - }} - - err = s.runner.Run(exists, "", nil) - c.Assert(err, IsNil) - err = s.runner.Run(missing, "", nil) - c.Assert(err, Equals, txn.ErrAborted) - - err = s.accounts.RemoveId(0) - c.Assert(err, IsNil) - - err = s.runner.Run(exists, "", nil) - c.Assert(err, Equals, txn.ErrAborted) - err = s.runner.Run(missing, "", nil) - c.Assert(err, IsNil) -} - -func (s *S) TestInsert(c *C) { - err := s.accounts.Insert(M{"_id": 0, "balance": 300}) - c.Assert(err, IsNil) - - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Insert: M{"balance": 200}, - }} - - err = s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - var account Account - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 300) - - ops[0].Id = 1 - err = s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - err = s.accounts.FindId(1).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 200) -} - -func (s *S) TestRemove(c *C) { - err := s.accounts.Insert(M{"_id": 0, "balance": 300}) - c.Assert(err, IsNil) - - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Remove: true, - }} - - err = s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - err = s.accounts.FindId(0).One(nil) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) -} - -func (s *S) TestUpdate(c *C) { - var err error - err = s.accounts.Insert(M{"_id": 0, "balance": 200}) - c.Assert(err, IsNil) - err = s.accounts.Insert(M{"_id": 1, "balance": 200}) - c.Assert(err, IsNil) - - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Update: M{"$inc": M{"balance": 100}}, - }} - - err = s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - var account Account - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 300) - - ops[0].Id = 1 - - err = s.accounts.FindId(1).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 200) -} - -func (s *S) TestInsertUpdate(c *C) { - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Insert: M{"_id": 0, "balance": 200}, - }, { - C: "accounts", - Id: 0, - Update: M{"$inc": M{"balance": 100}}, - }} - - err := s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - var account Account - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 300) - - err = s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 400) -} - -func (s *S) TestUpdateInsert(c *C) { - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Update: M{"$inc": M{"balance": 100}}, - }, { - C: "accounts", - Id: 0, - Insert: M{"_id": 0, "balance": 200}, - }} - - err := s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - var account Account - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 200) - - err = s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 300) -} - -func (s *S) TestInsertRemoveInsert(c *C) { - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Insert: M{"_id": 0, "balance": 200}, - }, { - C: "accounts", - Id: 0, - Remove: true, - }, { - C: "accounts", - Id: 0, - Insert: M{"_id": 0, "balance": 300}, - }} - - err := s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - var account Account - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 300) -} - -func (s *S) TestQueueStashing(c *C) { - txn.SetChaos(txn.Chaos{ - KillChance: 1, - Breakpoint: "set-applying", - }) - - opses := [][]txn.Op{{{ - C: "accounts", - Id: 0, - Insert: M{"balance": 100}, - }}, {{ - C: "accounts", - Id: 0, - Remove: true, - }}, {{ - C: "accounts", - Id: 0, - Insert: M{"balance": 200}, - }}, {{ - C: "accounts", - Id: 0, - Update: M{"$inc": M{"balance": 100}}, - }}} - - var last bson.ObjectId - for _, ops := range opses { - last = bson.NewObjectId() - err := s.runner.Run(ops, last, nil) - c.Assert(err, Equals, txn.ErrChaos) - } - - txn.SetChaos(txn.Chaos{}) - err := s.runner.Resume(last) - c.Assert(err, IsNil) - - var account Account - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 300) -} - -func (s *S) TestInfo(c *C) { - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Assert: txn.DocMissing, - }} - - id := bson.NewObjectId() - err := s.runner.Run(ops, id, M{"n": 42}) - c.Assert(err, IsNil) - - var t struct{ I struct{ N int } } - err = s.tc.FindId(id).One(&t) - c.Assert(err, IsNil) - c.Assert(t.I.N, Equals, 42) -} - -func (s *S) TestErrors(c *C) { - doc := bson.M{"foo": 1} - tests := []txn.Op{{ - C: "c", - Id: 0, - }, { - C: "c", - Id: 0, - Insert: doc, - Remove: true, - }, { - C: "c", - Id: 0, - Insert: doc, - Update: doc, - }, { - C: "c", - Id: 0, - Update: doc, - Remove: true, - }, { - C: "c", - Assert: doc, - }, { - Id: 0, - Assert: doc, - }} - - txn.SetChaos(txn.Chaos{KillChance: 1.0}) - for _, op := range tests { - c.Logf("op: %v", op) - err := s.runner.Run([]txn.Op{op}, "", nil) - c.Assert(err, ErrorMatches, "error in transaction op 0: .*") - } -} - -func (s *S) TestAssertNestedOr(c *C) { - // Assert uses $or internally. Ensure nesting works. - err := s.accounts.Insert(M{"_id": 0, "balance": 300}) - c.Assert(err, IsNil) - - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Assert: bson.D{{"$or", []bson.D{{{"balance", 100}}, {{"balance", 300}}}}}, - Update: bson.D{{"$inc", bson.D{{"balance", 100}}}}, - }} - - err = s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - var account Account - err = s.accounts.FindId(0).One(&account) - c.Assert(err, IsNil) - c.Assert(account.Balance, Equals, 400) -} - -func (s *S) TestVerifyFieldOrdering(c *C) { - // Used to have a map in certain operations, which means - // the ordering of fields would be messed up. - fields := bson.D{{"a", 1}, {"b", 2}, {"c", 3}} - ops := []txn.Op{{ - C: "accounts", - Id: 0, - Insert: fields, - }} - - err := s.runner.Run(ops, "", nil) - c.Assert(err, IsNil) - - var d bson.D - err = s.accounts.FindId(0).One(&d) - c.Assert(err, IsNil) - - var filtered bson.D - for _, e := range d { - switch e.Name { - case "a", "b", "c": - filtered = append(filtered, e) - } - } - c.Assert(filtered, DeepEquals, fields) -} - -func (s *S) TestChangeLog(c *C) { - chglog := s.db.C("chglog") - s.runner.ChangeLog(chglog) - - ops := []txn.Op{{ - C: "debts", - Id: 0, - Assert: txn.DocMissing, - }, { - C: "accounts", - Id: 0, - Insert: M{"balance": 300}, - }, { - C: "accounts", - Id: 1, - Insert: M{"balance": 300}, - }, { - C: "people", - Id: "joe", - Insert: M{"accounts": []int64{0, 1}}, - }} - id := bson.NewObjectId() - err := s.runner.Run(ops, id, nil) - c.Assert(err, IsNil) - - type IdList []interface{} - type Log struct { - Docs IdList "d" - Revnos []int64 "r" - } - var m map[string]*Log - err = chglog.FindId(id).One(&m) - c.Assert(err, IsNil) - - c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{2, 2}}) - c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{2}}) - c.Assert(m["debts"], IsNil) - - ops = []txn.Op{{ - C: "accounts", - Id: 0, - Update: M{"$inc": M{"balance": 100}}, - }, { - C: "accounts", - Id: 1, - Update: M{"$inc": M{"balance": 100}}, - }} - id = bson.NewObjectId() - err = s.runner.Run(ops, id, nil) - c.Assert(err, IsNil) - - m = nil - err = chglog.FindId(id).One(&m) - c.Assert(err, IsNil) - - c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{3, 3}}) - c.Assert(m["people"], IsNil) - - ops = []txn.Op{{ - C: "accounts", - Id: 0, - Remove: true, - }, { - C: "people", - Id: "joe", - Remove: true, - }} - id = bson.NewObjectId() - err = s.runner.Run(ops, id, nil) - c.Assert(err, IsNil) - - m = nil - err = chglog.FindId(id).One(&m) - c.Assert(err, IsNil) - - c.Assert(m["accounts"], DeepEquals, &Log{IdList{0}, []int64{-4}}) - c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{-3}}) -} - -func (s *S) TestPurgeMissing(c *C) { - txn.SetChaos(txn.Chaos{ - KillChance: 1, - Breakpoint: "set-applying", - }) - - err := s.accounts.Insert(M{"_id": 0, "balance": 100}) - c.Assert(err, IsNil) - err = s.accounts.Insert(M{"_id": 1, "balance": 100}) - c.Assert(err, IsNil) - - ops1 := []txn.Op{{ - C: "accounts", - Id: 3, - Insert: M{"balance": 100}, - }} - - ops2 := []txn.Op{{ - C: "accounts", - Id: 0, - Remove: true, - }, { - C: "accounts", - Id: 1, - Update: M{"$inc": M{"balance": 100}}, - }, { - C: "accounts", - Id: 2, - Insert: M{"balance": 100}, - }} - - err = s.runner.Run(ops1, "", nil) - c.Assert(err, Equals, txn.ErrChaos) - - last := bson.NewObjectId() - err = s.runner.Run(ops2, last, nil) - c.Assert(err, Equals, txn.ErrChaos) - err = s.tc.RemoveId(last) - c.Assert(err, IsNil) - - txn.SetChaos(txn.Chaos{}) - err = s.runner.ResumeAll() - c.Assert(err, IsNil) - - err = s.runner.Run(ops2, "", nil) - c.Assert(err, ErrorMatches, "cannot find transaction .*") - - err = s.runner.PurgeMissing("accounts") - c.Assert(err, IsNil) - - err = s.runner.Run(ops2, "", nil) - c.Assert(err, IsNil) - - expect := []struct{ Id, Balance int }{ - {0, -1}, - {1, 200}, - {2, 100}, - {3, 100}, - } - var got Account - for _, want := range expect { - err = s.accounts.FindId(want.Id).One(&got) - if want.Balance == -1 { - if err != mgo.ErrNotFound { - c.Errorf("Account %d should not exist, find got err=%#v", err) - } - } else if err != nil { - c.Errorf("Account %d should have balance of %d, but wasn't found", want.Id, want.Balance) - } else if got.Balance != want.Balance { - c.Errorf("Account %d should have balance of %d, got %d", want.Id, want.Balance, got.Balance) - } - } -}