diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..13d837b1 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,14 @@ +# EditorConfig is awesome: https://EditorConfig.org + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true + +[{*.sh,Makefile}] +indent_style = tab + +# Matches the exact files either package.json or .travis.yml +[.travis.yml] +indent_style = space +indent_size = 2 diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 00000000..9b585a1c --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,16 @@ +Before opening an issue, make sure there is not a similar issue [already open](https://github.com/digitalocean/do-agent/issues) + +## Describe the problem + +## Steps to reproduce + +## Expected behavior + +## System Information +Distribution: + +Version: + +Metrics-Agent Version: + +(paste the output of `do-agent --version`) diff --git a/.gitignore b/.gitignore index 3b95ff06..e42adeb6 100644 --- a/.gitignore +++ b/.gitignore @@ -1,29 +1,3 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -build -vendor/* - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -/do-agent -/vendor/*/ +target +repos/ +sonar-agent.key diff --git a/.travis.yml b/.travis.yml index 0d968fa4..7033a245 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,11 +1,34 @@ language: go -go: - - 1.7 + +sudo: required + +services: + - docker + +addons: + apt: + update: true + +matrix: + # fail fast + fast_finish: true + include: + - go: 1.x + before_install: - - go get -u github.com/kardianos/govendor -before_script: - - go get -d ./... + - sudo apt-get install -y shellcheck + - go get -u github.com/golang/dep/... + # coveralls.io deps + - go get golang.org/x/tools/cmd/cover github.com/mattn/goveralls + # install gometalinter + - curl -L https://git.io/vp6lP | sh + # gometalinter installs to ./bin + - export PATH="$GOPATH/bin:$PWD/bin:$PATH" + - dep ensure + +install: + - skip + script: - - ./scripts/licensecheck.sh - - govendor sync - - make test build + - make ci + - goveralls -coverprofile=target/.coverprofile -service=travis-ci -repotoken $COVERALLS_API_TOKEN diff --git a/AUTHORS b/AUTHORS deleted file mode 100644 index 1e36568a..00000000 --- a/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -Maintainer ----------- -DigitalOcean, Inc - -Original Authors ----------------- -Carlos Amedee -Daniel Barney -Alex Couture-Beil -Valient Gough - -Contributors ------------- -Philip Baker -Bruno Tavares diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..36e43c2e --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at contact@digitalocean.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/Changelog.txt b/Changelog.txt deleted file mode 100644 index 899dfc1a..00000000 --- a/Changelog.txt +++ /dev/null @@ -1,5 +0,0 @@ -0.4.13: - * No change release, to fix backend tuf repo issue. - -0.4.12: - * Fix syslog target from kernel to daemon. diff --git a/Dockerfile b/Dockerfile index cc384d85..1aab74e3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,39 +1,16 @@ -FROM golang:1.9-alpine +FROM ubuntu:18.04 +MAINTAINER Insights Engineering -ENV CGO=0 -ENV GOOS=linux +RUN set -x && \ + apt-get -qq update && \ + apt-get install -y ca-certificates && \ + apt-get autoclean -ARG CURRENT_BRANCH -ARG CURRENT_HASH -ARG LAST_RELEASE +ADD target/do-agent-linux-amd64 /bin/do-agent -RUN apk update && \ - apk add bash && \ - apk add curl && \ - apk add git && \ - apk add make && \ - apk add libc6-compat +RUN mkdir -p /host -COPY . /go/src/github.com/digitalocean/do-agent +VOLUME /host/proc +VOLUME /host/sys -RUN cd /go/src/github.com/digitalocean/do-agent && \ - set -x && \ - make build RELEASE=${LAST_RELEASE} CURRENT_BRANCH=${CURRENT_BRANCH} CURRENT_HASH=${CURRENT_HASH} - -# Copy what is needed to -FROM alpine -ENV DO_AGENT_REPO_PATH /agent/updates -ENV DO_AGENT_PROCFS_ROOT /agent/proc - -RUN mkdir -p /agent -RUN mkdir -p /agent/updates -RUN mkdir -p /agent/proc - -RUN apk update && \ - apk add libc6-compat && \ - apk add ca-certificates - -COPY --from=0 /go/src/github.com/digitalocean/do-agent/build/do-agent_linux_amd64 /agent -RUN find /agent - -CMD /agent/do-agent_linux_amd64 +ENTRYPOINT ["/bin/do-agent", "--path.procfs", "/host/proc", "--path.sysfs", "/host/sys"] diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 00000000..d1640416 --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,219 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "github.com/alecthomas/template" + packages = [ + ".", + "parse" + ] + revision = "a0175ee3bccc567396460bf5acd36800cb10c49c" + +[[projects]] + branch = "master" + name = "github.com/alecthomas/units" + packages = ["."] + revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a" + +[[projects]] + name = "github.com/beevik/ntp" + packages = ["."] + revision = "62c80a04de2086884d8296004b6d74ee1846c582" + version = "v0.2.0" + +[[projects]] + branch = "master" + name = "github.com/beorn7/perks" + packages = ["quantile"] + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" + +[[projects]] + name = "github.com/coreos/go-systemd" + packages = ["dbus"] + revision = "40e2722dffead74698ca12a750f64ef313ddce05" + version = "v16" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + branch = "master" + name = "github.com/ema/qdisc" + packages = ["."] + revision = "b307c22d3ce761d351b6e6270b50195b44ee9248" + +[[projects]] + name = "github.com/godbus/dbus" + packages = ["."] + revision = "a389bdde4dd695d414e47b755e95e72b7826432c" + version = "v4.1.0" + +[[projects]] + name = "github.com/golang/protobuf" + packages = ["proto"] + revision = "3a3da3a4e26776cc22a79ef46d5d58477532dede" + +[[projects]] + branch = "master" + name = "github.com/golang/snappy" + packages = ["."] + revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" + +[[projects]] + branch = "master" + name = "github.com/kolo/xmlrpc" + packages = ["."] + revision = "0826b98aaa29c0766956cb40d45cf7482a597671" + +[[projects]] + branch = "master" + name = "github.com/lufia/iostat" + packages = ["."] + revision = "9f7362b77ad333b26c01c99de52a11bdb650ded2" + +[[projects]] + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + +[[projects]] + branch = "master" + name = "github.com/mdlayher/genetlink" + packages = ["."] + revision = "0b18675c0b77924cf6bbf279b68cf9064c4cd265" + +[[projects]] + branch = "master" + name = "github.com/mdlayher/netlink" + packages = [ + ".", + "nlenc" + ] + revision = "25817dc2553bde648b4bd825e625e87d92658bb7" + +[[projects]] + name = "github.com/mdlayher/wifi" + packages = [ + ".", + "internal/nl80211" + ] + revision = "17fb8383f38adbf6a7f12e6cbd1d461760aabf5c" + +[[projects]] + name = "github.com/pkg/errors" + packages = ["."] + revision = "645ef00459ed84a119197bfb8d8205042c6df63d" + version = "v0.8.0" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + name = "github.com/prometheus/client_golang" + packages = ["prometheus"] + revision = "42bc0a18c2207d46ac4039bbb6b4cf1507872837" + +[[projects]] + branch = "master" + name = "github.com/prometheus/client_model" + packages = ["go"] + revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" + +[[projects]] + branch = "master" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "log", + "model" + ] + revision = "7600349dcfe1abd18d72d3a1770870d9800a7801" + +[[projects]] + name = "github.com/prometheus/node_exporter" + packages = ["collector"] + revision = "d42bd70f4363dced6b77d8fc311ea57b63387e4f" + version = "v0.16.0" + +[[projects]] + name = "github.com/prometheus/procfs" + packages = [ + ".", + "bcache", + "internal/util", + "nfs", + "sysfs", + "xfs" + ] + revision = "fe93d378a6b03758a2c1b65e86cf630bf78681c0" + +[[projects]] + name = "github.com/sirupsen/logrus" + packages = ["."] + revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" + version = "v1.0.5" + +[[projects]] + branch = "master" + name = "github.com/soundcloud/go-runit" + packages = ["runit"] + revision = "06ad41a06c4a586951fb8040a697ecd39729640b" + +[[projects]] + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require" + ] + revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" + version = "v1.2.2" + +[[projects]] + branch = "master" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + revision = "8ac0e0d97ce45cd83d1d7243c060cb8461dda5e9" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = [ + "bpf", + "internal/iana", + "internal/socket", + "ipv4" + ] + revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = [ + "unix", + "windows", + "windows/registry", + "windows/svc/eventlog" + ] + revision = "bff228c7b664c5fce602223a05fb708fd8654986" + +[[projects]] + name = "gopkg.in/alecthomas/kingpin.v2" + packages = ["."] + revision = "947dcec5ba9c011838740e680966fd7087a71d0d" + version = "v2.2.6" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "38ef05e2ebb03f66dac5ee4341fc04d0ea5741ebcb5315b7ddcb07d5e9bc8920" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 00000000..f35b2eaf --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,38 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + + +[[constraint]] + name = "gopkg.in/alecthomas/kingpin.v2" + version = "2.2.6" + +[prune] + go-tests = true + unused-packages = true + +[[constraint]] + version = "0.16.0" + name = "github.com/prometheus/node_exporter" diff --git a/Makefile b/Makefile index 976ebf86..4da3970a 100644 --- a/Makefile +++ b/Makefile @@ -1,101 +1,228 @@ -.PHONY: all test clean build dependencies +GOOS ?= linux +GOARCH ?= amd64 -CONFIG_PATH=github.com/digitalocean/do-agent/config - -CURRENT_BRANCH=$(shell git rev-parse --abbrev-ref HEAD) -CURRENT_HASH=$(shell git rev-parse --short HEAD) - -ifeq ("$(shell git name-rev --tags --name-only $(shell git rev-parse HEAD))", "undefined") - RELEASE=dev +ifeq ($(GOARCH),386) +PKG_ARCH = i386 else - RELEASE=$(shell git name-rev --tags --name-only $(shell git rev-parse HEAD) | sed 's/\^.*$///') +PKG_ARCH = amd64 endif -LAST_RELEASE=$(shell git describe --tags $(shell git rev-list --tags --max-count=1)) -GOFLAGS = -ldflags="-X $(CONFIG_PATH).build=$(CURRENT_BRANCH).$(CURRENT_HASH) -X $(CONFIG_PATH).version=$(RELEASE) -extldflags -static" - -GOVENDOR=$(GOPATH)/bin/govendor - -all: build test - -build: dependencies - @echo ">> build version=$(RELEASE)" - @echo ">> Building system native" - @env CGO=0 go build $(GOFLAGS) -o do-agent cmd/do-agent/main.go - @echo ">> Creating build directory" - @mkdir -p build - @echo ">> Building linux 386" - @env CGO=0 GOOS=linux GOARCH=386 go build $(GOFLAGS) -o build/do-agent_linux_386 cmd/do-agent/main.go - @echo ">> Building linux amd64" - @env CGO=0 GOOS=linux GOARCH=amd64 go build $(GOFLAGS) -o build/do-agent_linux_amd64 cmd/do-agent/main.go - -build-latest-release: checkout-latest-release build - -checkout-latest-release: master-branch-check - git fetch --tags - git checkout $(LAST_RELEASE) - -install: - @go get $(GOFLAGS) ./... - -test: dependencies - @echo " ==Running go test==" - @go test -v $(shell go list ./... | grep -v /vendor/) - @echo " ==Running go vet==" - @go vet $(shell go list ./... | grep -v /vendor/) - @go get -u github.com/golang/lint/golint - @echo " ==Running golint==" - @golint ./... | grep -v '^vendor\/' | grep -v ".pb.*.go:" || true - @echo " ==Done testing==" +############ +## macros ## +############ + +mkdir = @mkdir -p $(dir $@) +cp = @cp $< $@ +print = @printf "\n:::::::::::::::: [$(shell date -u)] $@ ::::::::::::::::\n" +touch = @touch $@ +jq = @docker run --rm -i colstrom/jq +shellcheck = @docker run --rm -i -v "$(CURDIR):$(CURDIR)" -w "$(CURDIR)" -u $(shell id -u) koalaman/shellcheck:v0.6.0 +gometalinter = @docker run --rm -i -v "$(CURDIR):/go/src/$(importpath)" -w "/go/src/$(importpath)" -u $(shell id -u) digitalocean/gometalinter:2.0.11 +fpm = @docker run --rm -i -v "$(CURDIR):$(CURDIR)" -w "$(CURDIR)" -u $(shell id -u) digitalocean/fpm:latest +now = $(shell date -u) +git_rev = $(shell git rev-parse --short HEAD) +git_tag = $(subst v,,$(shell git describe --tags --abbrev=0)) +VERSION ?= $(git_tag) + +go = docker run --rm -i \ + -u "$(shell id -u)" \ + -e "GOOS=$(GOOS)" \ + -e "GOARCH=$(GOARCH)" \ + -e "GOPATH=/gopath" \ + -e "GOCACHE=/gopath/src/$(importpath)/target/.cache/go" \ + -v "$(CURDIR):/gopath/src/$(importpath)" \ + -w "/gopath/src/$(importpath)" \ + golang:1.11.2 \ + go + +ldflags = '\ + -X "main.version=$(VERSION)" \ + -X "main.revision=$(git_rev)" \ + -X "main.buildDate=$(now)" \ +' + +########### +## paths ## +########### + +out := target +package_dir := $(out)/pkg +cache := $(out)/.cache +project := $(notdir $(CURDIR))# project name +pkg_project := $(subst _,-,$(project))# package cannot have underscores in the name +importpath := github.com/digitalocean/$(project)# import path used in gocode +gofiles := $(shell find -type f -iname '*.go' ! -path './vendor/*') +vendorgofiles := $(shell find -type f -iname '*.go' -path './vendor/*') +shellscripts := $(shell find -type f -iname '*.sh' ! -path './repos/*' ! -path './vendor/*') +# the name of the binary built with local resources +binary := $(out)/$(project)-$(GOOS)-$(GOARCH) +cover_profile := $(out)/.coverprofile + +# output packages +# deb files should end with _version_arch.deb +# rpm files should end with -version-release.arch.rpm +base_package := $(package_dir)/$(pkg_project).$(VERSION).$(PKG_ARCH).BASE.deb +deb_package := $(package_dir)/$(pkg_project)_$(VERSION)_$(PKG_ARCH).deb +rpm_package := $(package_dir)/$(pkg_project).$(VERSION).$(PKG_ARCH).rpm +tar_package := $(package_dir)/$(pkg_project).$(VERSION).tar.gz + +# use the binary's mtime for epoch for consistency. This needs to be lazily +# evaluated since the binary does not yet exist +epoch = $(shell date '+%s' -r $(binary)) + +############# +## targets ## +############# + +build: $(binary) +$(binary): $(gofiles) $(vendorgofiles) + $(print) + $(mkdir) + $(go) build \ + -ldflags $(ldflags) \ + -o "$@" \ + ./cmd/$(project) + +package: release +release: target/VERSION + $(print) + @GOOS=linux GOARCH=386 $(MAKE) build deb rpm tar + @GOOS=linux GOARCH=amd64 $(MAKE) build deb rpm tar + +lint: $(cache)/lint $(cache)/shellcheck +$(cache)/lint: $(gofiles) + $(print) + $(mkdir) + @$(gometalinter) --config=gometalinter.json ./... + $(touch) + +shellcheck: $(cache)/shellcheck +$(cache)/shellcheck: $(shellscripts) + $(print) + $(mkdir) + @$(shellcheck) --version + @$(shellcheck) $^ + $(touch) + +test: $(cover_profile) +$(cover_profile): $(gofiles) + $(print) + $(mkdir) + @$(go) test -coverprofile=$@ ./... clean: - rm do-agent - rm -fr build - -dependencies: $(GOVENDOR) - @echo ">> fetching dependencies" - @$(GOVENDOR) sync - -$(GOVENDOR): - @echo ">> fetching govendor" - @go get -u github.com/kardianos/govendor - -docker: - docker build . \ - --build-arg CURRENT_HASH="$(CURRENT_HASH)" \ - --build-arg CURRENT_BRANCH="$(CURRENT_BRANCH)" \ - --build-arg LAST_RELEASE="$(LAST_RELEASE).$(CURRENT_HASH)-docker" \ - -t do-agent \ - -t do-agent:$(LAST_RELEASE) - -list-latest-release: - @echo $(LAST_RELEASE) - -release-major-version: master-branch-check - @echo ">> release major version" - $(eval RELEASE_VERSION=$(shell echo $(LAST_RELEASE) | awk '{split($$0,a,"."); print a[1]+1"."0"."0}')) - @echo "Updating release version from=$(LAST_RELEASE) to=$(RELEASE_VERSION)" - git tag $(RELEASE_VERSION) -m"make release-major-version $(RELEASE_VERSION)" - git push origin --tags - -release-minor-version: master-branch-check - @echo "release minor version" - $(eval RELEASE_VERSION=$(shell echo $(LAST_RELEASE) | awk '{split($$0,a,"."); print a[1]"."a[2]+1"."0}')) - @echo "Updating release version from=$(LAST_RELEASE) to=$(RELEASE_VERSION)" - git tag $(RELEASE_VERSION) -m"make release-minor-version $(RELEASE_VERSION)" - git push origin --tags - -release-patch-version: master-branch-check - @echo "release patch version" - $(eval RELEASE_VERSION=$(shell echo $(LAST_RELEASE) | awk '{split($$0,a,"."); print a[1]"."a[2]"."a[3]+1}')) - @echo "Updating release version from=$(LAST_RELEASE) to=$(RELEASE_VERSION)" - git tag $(RELEASE_VERSION) -m"make release-patch-version $(RELEASE_VERSION)" - git push origin --tags - - -master-branch-check: -ifeq ("$(shell git rev-parse --abbrev-ref HEAD)", "master") - @echo "Current branch is master" -else - $(error Action requires the master branch) -endif + $(print) + @rm -rf $(out) +.PHONY: clean + +ci: clean lint shellcheck test package +.PHONY: ci + +.PHONY: target/VERSION +target/VERSION: + $(print) + $(mkdir) + @echo $(VERSION) > $@ + +# used to create a base package with common functionality +$(base_package): $(binary) + $(print) + $(mkdir) + @$(fpm) --output-type deb \ + --verbose \ + --input-type dir \ + --force \ + --architecture $(PKG_ARCH) \ + --package $@ \ + --no-depends \ + --name $(pkg_project) \ + --maintainer "DigitalOcean" \ + --version $(VERSION) \ + --description "DigitalOcean stats collector" \ + --license apache-2.0 \ + --vendor DigitalOcean \ + --url https://github.com/digitalocean/do-agent \ + --log info \ + --after-install packaging/scripts/after_install.sh \ + --after-remove packaging/scripts/after_remove.sh \ + $<=/usr/local/bin/do-agent \ + scripts/update.sh=/opt/digitalocean/do-agent/scripts/update.sh +.INTERMEDIATE: $(base_package) + +deb: $(deb_package) +$(deb_package): $(base_package) + $(print) + $(mkdir) + @$(fpm) --output-type deb \ + --verbose \ + --input-type deb \ + --force \ + --depends cron \ + --conflicts do-agent \ + --replaces do-agent \ + --deb-group nobody \ + --deb-user nogroup \ + -p $@ \ + $< + chown -R $(USER):$(USER) target +# print information about the compiled deb package + @docker run --rm -i -v "$(CURDIR):$(CURDIR)" -w "$(CURDIR)" ubuntu:xenial /bin/bash -c 'dpkg --info $@ && dpkg -c $@' + + +rpm: $(rpm_package) +$(rpm_package): $(base_package) + $(print) + $(mkdir) + @$(fpm) \ + --verbose \ + --output-type rpm \ + --epoch $(epoch) \ + --input-type deb \ + --depends cronie \ + --conflicts do-agent \ + --replaces do-agent \ + --rpm-group nobody \ + --rpm-user nobody \ + --force \ + -p $@ \ + $< + chown -R $(USER):$(USER) target +# print information about the compiled rpm package + @docker run --rm -i -v "$(CURDIR):$(CURDIR)" -w "$(CURDIR)" centos:7 rpm -qilp $@ + +tar: $(tar_package) +$(tar_package): $(base_package) + $(print) + $(mkdir) + @$(fpm) \ + --verbose \ + --output-type tar \ + --input-type deb \ + --force \ + -p $@ \ + $< + chown -R $(USER):$(USER) target +# print all files within the archive + @docker run --rm -i -v "$(CURDIR):$(CURDIR)" -w "$(CURDIR)" ubuntu:xenial tar -ztvf $@ + + +.vault-token: + $(print) + @docker run -u $(shell id -u) --net=host --rm -i \ + docker.internal.digitalocean.com/eng-insights/vault:0.11.5 \ + write -field token auth/approle/login role_id=$(VAULT_ROLE_ID) secret_id=$(VAULT_SECRET_ID) \ + | cp /dev/stdin $@ +.INTERMEDIATE: .vault-token + +sonar-agent.key: .vault-token + $(print) + @docker run -u $(shell id -u) --net=host --rm -i -e "VAULT_TOKEN=$(shell cat $^)" \ + docker.internal.digitalocean.com/eng-insights/vault:0.11.5 \ + read --field gpg secret/agent/packager/key \ + | cp /dev/stdin $@ + +deploy: release sonar-agent.key + ./scripts/deploy.sh all + +promote: release sonar-agent.key + ./scripts/deploy.sh promote diff --git a/README.md b/README.md index 5e899566..f1e6fcdd 100644 --- a/README.md +++ b/README.md @@ -1,49 +1,23 @@ -# Do-Agent +# DigitalOcean Agent -[![Build Status](https://travis-ci.org/digitalocean/do-agent.svg?branch=master)](https://travis-ci.org/digitalocean/do-agent) -[![GoDoc](https://godoc.org/github.com/digitalocean/do-agent?status.svg)](https://godoc.org/github.com/digitalocean/do-agent) +[![Build +Status](https://travis-ci.org/digitalocean/do-agent.svg?branch=master)](https://travis-ci.org/digitalocean/do-agent) +[![Go Report Card](https://goreportcard.com/badge/github.com/digitalocean/do-agent)](https://goreportcard.com/report/github.com/digitalocean/do-agent) +[![Coverage Status](https://coveralls.io/repos/github/digitalocean/do-agent/badge.svg?branch=feat%2Fadd-coveralls-report)](https://coveralls.io/github/digitalocean/do-agent?branch=feat%2Fadd-coveralls-report) -## General +## Overview +do-agent enables droplet metrics to be gathered and sent to DigitalOcean to provide resource usage graphs and alerting. -The `do-agent` extracts system metrics from -DigitalOcean Droplets and transmits them to the DigitalOcean -monitoring service. When the agent is initiated on a Droplet it -will automatically configure itself with the appropriate settings. +DO Agent currently supports: +- Ubuntu 14.04+ +- Debian 8+ +- Fedora 27+ +- CentOS 6+ +- Docker (see below) -## Flags +## Installation -Flag |Option |Description ------|-------|----------- --log_syslog | bool | Log to syslog. --log_level | string | Sets the log level. [ INFO, ERROR, DEBUG ] Default=INFO --force_update | bool | force an agent update. --v | bool | Prints DigitalOcean Agent version. --h | bool | Prints `do-agent` usage information. - -## Env Flags - -Generally used during development and debugging. - -Flag |Option |Description ------|-------|----------- -DO_AGENT_AUTHENTICATION_URL | string | Override the authentication URL -DO_AGENT_APPKEY | string | Override AppKey -DO_AGENT_METRICS_URL | string | Override metrics URL -DO_AGENT_DROPLET_ID | int64 | Override Droplet ID -DO_AGENT_AUTHTOKEN | string | Override AuthToken -DO_AGENT_UPDATE_URL | string | Override Update URL -DO_AGENT_REPO_PATH | string | Override Local repository path -DO_AGENT_PLUGIN_PATH | string | Override plugin directory path -DO_AGENT_PROCFS_ROOT | string | Override location of /proc - -## Building and running - - make build - sudo -u nobody ./do-agent - -## Running Tests - - make test +To install the do-agent on new Droplets simply select the Monitoring checkbox on the Droplet create screen to get the latest stable version of do-agent. Use your OS package manager (yum/dnf/apt-get) to update and manage do-agent. ## Installing via package managers @@ -72,191 +46,50 @@ rpm --import https://repos.sonar.digitalocean.com/sonar-agent.asc yum install do-agent ``` -## Update via package managers - -### Yum - -`yum update do-agent` - - -### Apt - -`apt-get update && apt-get install --only-upgrade do-agent` - - -## Removal via package managers - -### Yum - -`sudo yum remove do-agent` - - -### Apt - -`sudo apt-get purge do-agent` - - -## Package installation - -### Deb - -`dpkg -i do-agent__.deb` - - -### Rpm - -`rpm -Uvh do-agent-_.rpm` - - -## Package removal +## Development -### Deb +### Requirements -`dpkg --remove do-agent` +- [go](https://golang.org/dl/) +- [golang/dep](https://github.com/golang/dep#installation) +- [GNU Make](https://www.gnu.org/software/make/) +- [gometalinter](https://github.com/alecthomas/gometalinter#installing) - -### Rpm - -`rpm -e do-agent` - - -## Docker - -You can optionally run the Agent in Docker. To do so: -`make docker` - -In order for the agent to report accurate metrics, you need to bind mount in -/proc inside the container: -`docker run --rm \ - -v /proc:/agent/proc:ro \ - do-agent -` - - -## Plugins - -`do-agent` builds in a common set of metrics, and provides a plugin mechanism to -add additional metric collectors. Plugins are executable files that are placed -in the agent's plugin directory (see `DO_AGENT_PLUGIN_PATH`). When `do-agent` -starts, it will find all executables in the plugin path and call them during -metric collection. - -A collection agent must do two things: - -1. report metric configuration to stdout when a "config" argument is passed. -1. report metric values to stdout when no argument is passed. - -See `plugins/test.sh` for a simple static plugin, in the form of a shell script. -Plugins may be written in any language, and must only know how to produce -serialized results in `json` format. - - -### Definitions - -When test.sh is run with "config" as the argument, it produces the following: - -```json -{ - "definitions": { - "test": { - "type": 1, - "labels": { - "user": "foo" - }, - "label_keys": ["timezone", "country"] - } - } -} ``` +git clone git@github.com:digitalocean/do-agent.git \ + $GOPATH/src/github.com/digitalocean/do-agent +cd !$ -Definitions is a mapping of metric name to configuration. `type` is an integer -type, compatible with the Prometheus client definition. The following types are -supported: - -Type Value |Description ----|-------- -0 | Counter -1 | Gauge - -Two types of labels are supported: fixed and dynamic. Fixed labels are specified -once in the metric definition and never specified in the metric value. In the -example above, there is a fixed label `user:foo`. This label will be set on -all values of the metric. - -The second type of label is a dynamic label. Dynamic labels have a fixed key -but the value is specified with every update of the metric. The example above -has two dynamic labels, with the keys `timezone` and `country`. The values -for these labels will be provided on each metric update. - -### Values - -When test.sh is run without any arguments, it produces the current value of -the collected metrics. For example: +# build the project +make -```json -{ - "metrics": { - "test": { - "value": 42.0, - "label_values": ["est", "usa"] - } - } -} +# add dependencies +dep ensure -v -add ``` -The _(optional)_ label_values must correspond to the label_keys in the definition. -If there were two label keys in the definition, then there must be exactly two -label values in each metric update. +### Uninstall -## Contributing +do-agent can be uninstalled with your distribution's package manager -The `do-agent` project makes use of the [GitHub Flow](https://guides.github.com/introduction/flow/) -for contributions. +`apt remove do-agent` for Debian based distros -If you'd like to contribute to the project, please -[open an issue](https://github.com/digitalocean/do-agent/issues/new) or find an -[existing issue](https://github.com/digitalocean/do-agent/issues) that you'd like -to take on. This ensures that efforts are not duplicated, and that a new feature -aligns with the focus of the rest of the repository. +`yum remove do-agent` for RHEL based distros -Once your suggestion has been submitted and discussed, please be sure that your -code meets the following criteria: - - code is completely `gofmt`'d - - new features or codepaths have appropriate test coverage - - `go test ./...` passes - - `go vet ./...` passes - - `golint ./...` returns no warnings, including documentation comment warnings -In addition, if this is your first time contributing to the `do-agent` project, -add your name and email address to the -[AUTHORS](https://github.com/digitalocean/do-agent/blob/master/AUTHORS) file -under the "Contributors" section using the format: -`First Last `. +### Run as a Docker container -First, to setup the project, ensure that `$GOPATH` variable is pointing to an -existing directory, where all your Go code will be available. +You can optionally run do-agent as a docker container. In order to do so +you need to mount the host directory `/proc` to `/host/proc`. -After forking the repo on Github, run: +For example: -```sh -go get github.com/digitalocean/do-agent -cd $GOPATH/src/github.com/digitalocean/do-agent - -git remote add my-fork -make test # ensure tests are passing ``` - -Push your changes to your fork: - -```sh -git push -u master my-fork +docker run \ + -v /proc:/host/proc:ro \ + -v /sys:/host/sys:ro \ + digitalocean/do-agent:1 ``` -Finally, submit a pull request for review! - -## Report a bug - -If you discover a software bug, please feel free to report it by: - -1. Search through the [existing issues](https://github.com/digitalocean/do-agent/issues) to ensure that the bug hasn't already been filed. -2. [Open an issue](https://github.com/digitalocean/do-agent/issues/new) for a new bug. +## Report an Issue +Feel free to [open an issue](https://github.com/digitalocean/do-agent/issues/new) +if one does not [already exist](https://github.com/digitalocean/do-agent/issues) diff --git a/bootstrap/bootstrap.go b/bootstrap/bootstrap.go deleted file mode 100644 index 2abb062d..00000000 --- a/bootstrap/bootstrap.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bootstrap - -import ( - "fmt" - - "github.com/digitalocean/do-agent/monitoringclient" -) - -//Credentials contains do-agent credentials and config required to talk to monitoring -type Credentials struct { - AppKey string `json:"appkey,omitempty"` - HostUUID string `json:"host_uuid,omitempty"` - Region string `json:"region,omitempty"` - DropletID int64 `json:"droplet_id,omitempty"` - LocalMACs []string `json:"local_macs,omitempty"` -} - -// MetadataReader is the interface a DigitalOcean metadata client should implement -type MetadataReader interface { - DropletID() (int, error) - Region() (string, error) - AuthToken() (string, error) -} - -func loadCredentialFromMetadata(md MetadataReader, monitor monitoringclient.MonitoringClient) (*Credentials, error) { - authToken, err := md.AuthToken() - if err != nil { - return nil, err - } - - appKey, err := monitor.GetAppKey(authToken) - if err != nil { - return nil, err - } - - dropletID, err := md.DropletID() - if err != nil { - return nil, err - } - - region, err := md.Region() - if err != nil { - return nil, err - } - - return &Credentials{ - AppKey: appKey, - DropletID: int64(dropletID), - Region: region, - }, nil -} - -func loadCredentialWithOverrides(monitor monitoringclient.MonitoringClient, configAppKey string, configDropletID int64, configAuthToken string) (*Credentials, error) { - appkey := configAppKey - if configAuthToken != "" { - sAppKey, err := monitor.GetAppKey(configAuthToken) - if err != nil { - return nil, fmt.Errorf("unable to retrieve appkey from monitoring: %s", err.Error()) - } - appkey = sAppKey - } - - return &Credentials{ - AppKey: appkey, - DropletID: configDropletID, - Region: "master", - }, nil -} - -// InitCredentials will read (or create) the credentials file (if running on a droplet) -func InitCredentials(md MetadataReader, monitor monitoringclient.MonitoringClient, configAppKey string, configDropletID int64, configAuthToken string) (*Credentials, error) { - if configAppKey != "" || configAuthToken != "" { - return loadCredentialWithOverrides(monitor, configAppKey, configDropletID, configAuthToken) - } - return loadCredentialFromMetadata(md, monitor) -} diff --git a/bootstrap/bootstrap_test.go b/bootstrap/bootstrap_test.go deleted file mode 100644 index 9ee32f05..00000000 --- a/bootstrap/bootstrap_test.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bootstrap - -import ( - "errors" - "testing" -) - -type stubMetadataClient struct { - DropletIDMethod func() (int, error) - RegionMethod func() (string, error) - AuthTokenMethod func() (string, error) -} - -func (s *stubMetadataClient) DropletID() (int, error) { return s.DropletIDMethod() } -func (s *stubMetadataClient) Region() (string, error) { return s.RegionMethod() } -func (s *stubMetadataClient) AuthToken() (string, error) { return s.AuthTokenMethod() } - -var _ MetadataReader = (*stubMetadataClient)(nil) - -type stubMonitoringClient struct { - getAppKeyMethod func(string) (string, error) - registerHostUUIDMethod func(string, string) error -} - -func (s *stubMonitoringClient) GetAppKey(authToken string) (string, error) { - return s.getAppKeyMethod(authToken) -} - -func (s *stubMonitoringClient) RegisterHostUUID(appKey, hostUUID string) error { - return s.registerHostUUIDMethod(appKey, hostUUID) -} - -func TestDropletBootstrap(t *testing.T) { - - expectedDropletID := int(999) - expectedRegion := "test1" - expectedAuthToken := "authtestabc" - - md := &stubMetadataClient{ - AuthTokenMethod: func() (string, error) { return expectedAuthToken, nil }, - DropletIDMethod: func() (int, error) { return expectedDropletID, nil }, - RegionMethod: func() (string, error) { return expectedRegion, nil }, - } - - expectedAppKey := "testappkey" - - monitoringClient := struct{ stubMonitoringClient }{} - monitoringClient.getAppKeyMethod = func(authToken string) (string, error) { - if authToken == expectedAuthToken { - return expectedAppKey, nil - } - return "", errors.New("Auth token invalid") - } - - credentials, err := InitCredentials(md, &monitoringClient, "", 0, "") - if err != nil { - t.Fatal(err) - } - - checkCredentials(t, credentials, 0, "", int64(expectedDropletID), expectedRegion, expectedAppKey) - - credentials2, err := InitCredentials(md, &monitoringClient, "", 0, "") - if err != nil { - t.Fatal(err) - } - - checkCredentials(t, credentials2, 0, "", credentials.DropletID, credentials.Region, credentials.AppKey) -} - -func TestDropletBootstrapWithOverides(t *testing.T) { - - expectedDropletID := int(999) - expectedDropletIDOverride := int64(0) - expectedRegion := "master" - expectedAuthToken := "authy" - - md := struct{ stubMetadataClient }{} - md.AuthTokenMethod = func() (string, error) { return expectedAuthToken, nil } - md.DropletIDMethod = func() (int, error) { return expectedDropletID, nil } - md.RegionMethod = func() (string, error) { return expectedRegion, nil } - - expectedAppKey := "testappkey" - expectedAppKeyOverride := "authy" - - monitoringClient := struct{ stubMonitoringClient }{} - monitoringClient.getAppKeyMethod = func(authToken string) (string, error) { - if authToken == expectedAuthToken { - return expectedAppKey, nil - } - return "", errors.New("Auth token invalid") - } - - // Configuring auth token should bypass the metadata service - credentials, err := InitCredentials(&md, &monitoringClient, "", int64(expectedDropletID), "authy") - if err != nil { - t.Fatal(err) - } - - checkCredentials(t, credentials, 0, "", int64(expectedDropletID), expectedRegion, expectedAppKey) - - // Configure app key should bypass metadata service and sonar service - credentials2, err := InitCredentials(&md, &monitoringClient, expectedAppKeyOverride, 0, "") - if err != nil { - t.Fatal(err) - } - - checkCredentials(t, credentials2, 0, "", expectedDropletIDOverride, expectedRegion, expectedAppKeyOverride) -} - -func checkCredentials(t *testing.T, c *Credentials, eMacsCount int, eHostUUID string, eDropletID int64, eRegion string, eAppKey string) { - if len(c.LocalMACs) != eMacsCount { - t.Errorf("LocalMacs: want %d got %d", eMacsCount, len(c.LocalMACs)) - } - if c.HostUUID != eHostUUID { - t.Errorf("HostUUID want %s, got %s", eHostUUID, c.HostUUID) - } - if c.DropletID != eDropletID { - t.Errorf("DropletID want %d got %d", eDropletID, c.DropletID) - } - if c.Region != eRegion { - t.Errorf("Region want %s got %s", eRegion, c.Region) - } - if c.AppKey != eAppKey { - t.Errorf("AppKey want %s got %s", eAppKey, c.AppKey) - } -} diff --git a/cmd/do-agent/config.go b/cmd/do-agent/config.go new file mode 100644 index 00000000..de01d491 --- /dev/null +++ b/cmd/do-agent/config.go @@ -0,0 +1,176 @@ +package main + +import ( + "context" + "fmt" + "net/url" + "os" + "strings" + "time" + + "github.com/digitalocean/do-agent/internal/log" + "github.com/digitalocean/do-agent/pkg/clients/tsclient" + "github.com/digitalocean/do-agent/pkg/collector" + "github.com/digitalocean/do-agent/pkg/decorate" + "github.com/digitalocean/do-agent/pkg/decorate/compat" + "github.com/digitalocean/do-agent/pkg/writer" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "gopkg.in/alecthomas/kingpin.v2" +) + +var ( + config struct { + targets map[string]string + metadataURL *url.URL + authURL *url.URL + sonarEndpoint string + stdoutOnly bool + debug bool + syslog bool + } + + // additionalParams is a list of extra command line flags to append + // this is mostly needed for appending node_exporter flags when necessary. + additionalParams = []string{} + + // disabledCollectors is a hash used by disableCollectors to prevent + // duplicate entries + disabledCollectors = map[string]interface{}{} +) + +const ( + defaultMetadataURL = "http://169.254.169.254/metadata" + defaultAuthURL = "https://sonar.digitalocean.com" + defaultSonarURL = "" +) + +func init() { + kingpin.Flag("auth-host", "Endpoint to use for obtaining droplet app key"). + Default(defaultAuthURL). + URLVar(&config.authURL) + + kingpin.Flag("metadata-host", "Endpoint to use for obtaining droplet metadata"). + Default(defaultMetadataURL). + URLVar(&config.metadataURL) + + kingpin.Flag("sonar-host", "Endpoint to use for delivering metrics"). + Default(defaultSonarURL). + StringVar(&config.sonarEndpoint) + + kingpin.Flag("stdout-only", "write all metrics to stdout only"). + BoolVar(&config.stdoutOnly) + + kingpin.Flag("debug", "display debug information to stdout"). + BoolVar(&config.debug) + + kingpin.Flag("syslog", "enable logging to syslog"). + BoolVar(&config.syslog) +} + +func checkConfig() error { + var err error + for name, uri := range config.targets { + if _, err = url.Parse(uri); err != nil { + return errors.Wrapf(err, "url for target %q is not valid", name) + } + } + return nil +} + +func initWriter(ctx context.Context) (metricWriter, throttler) { + if config.stdoutOnly { + return writer.NewFile(os.Stdout), &constThrottler{wait: 10 * time.Second} + } + + tsc, err := newTimeseriesClient(ctx) + if err != nil { + log.Fatal("failed to connect to sonar: %+v", err) + } + return writer.NewSonar(tsc), tsc +} + +func initDecorator() decorate.Chain { + return decorate.Chain{ + compat.Names{}, + compat.Disk{}, + compat.CPU{}, + decorate.LowercaseNames{}, + } +} + +// WrappedTSClient wraps the tsClient and adds a Name method to it +type WrappedTSClient struct { + tsclient.Client +} + +// Name returns the name of the client +func (m *WrappedTSClient) Name() string { return "tsclient" } + +func newTimeseriesClient(ctx context.Context) (*WrappedTSClient, error) { + clientOptions := []tsclient.ClientOptFn{ + tsclient.WithUserAgent(fmt.Sprintf("do-agent-%s", version)), + tsclient.WithRadarEndpoint(config.authURL.String()), + tsclient.WithMetadataEndpoint(config.metadataURL.String()), + } + + if config.sonarEndpoint != "" { + clientOptions = append(clientOptions, tsclient.WithWharfEndpoint(config.sonarEndpoint)) + } + + if config.debug { + logger := func(msg string) { + fmt.Println(strings.TrimSpace(msg)) + } + clientOptions = append(clientOptions, tsclient.WithLogger(logger)) + } + + tsClient := tsclient.New(clientOptions...) + wrappedTSClient := &WrappedTSClient{tsClient} + + return wrappedTSClient, nil +} + +// initCollectors initializes the prometheus collectors. By default this +// includes node_exporter and buildInfo for each remote target +func initCollectors() []prometheus.Collector { + // buildInfo provides build information for tracking metrics internally + cols := []prometheus.Collector{buildInfo} + + // create the default DO agent to collect metrics about + // this device + node, err := collector.NewNodeCollector() + if err != nil { + log.Fatal("failed to create DO agent: %+v", err) + } + log.Info("%d node_exporter collectors were registered", len(node.Collectors())) + + for name := range node.Collectors() { + log.Info("node_exporter collector registered %q", name) + } + cols = append(cols, node) + + return cols +} + +// disableCollectors disables collectors by names by adding a list of +// --no-collector. flags to additionalParams +func disableCollectors(names ...string) { + f := []string{} + for _, name := range names { + if _, ok := disabledCollectors[name]; ok { + // already disabled + continue + } + + disabledCollectors[name] = nil + f = append(f, disableCollectorFlag(name)) + } + + additionalParams = append(additionalParams, f...) +} + +// disableCollectorFlag creates the correct cli flag for the given collector name +func disableCollectorFlag(name string) string { + return fmt.Sprintf("--no-collector.%s", name) +} diff --git a/cmd/do-agent/config_dragonfly.go b/cmd/do-agent/config_dragonfly.go new file mode 100644 index 00000000..e7cce8f0 --- /dev/null +++ b/cmd/do-agent/config_dragonfly.go @@ -0,0 +1,8 @@ +package main + +func init() { + registerFilesystemFlags() + disableCollectors("boottime", "exec", "ntp", "runit", "supervisord", + "textfile", "time", + ) +} diff --git a/cmd/do-agent/config_filesystem.go b/cmd/do-agent/config_filesystem.go new file mode 100644 index 00000000..f85f2204 --- /dev/null +++ b/cmd/do-agent/config_filesystem.go @@ -0,0 +1,39 @@ +package main + +import ( + "strings" + "sync" +) + +const ( + ignoredMountPointFlag = "--collector.filesystem.ignored-mount-points" + ignoredFSTypesFlag = "--collector.filesystem.ignored-fs-types" +) + +var ( + ignoredMountPoints = strings.Join([]string{ + "fusectl", "lxcfs", "mqueue", "none", "rootfs", "sunrpc", + "systemd", "udev", + }, `|`) + + ignoredFSTypes = strings.Join([]string{ + "aufs", "autofs", "binfmt_misc", "cifs", "cgroup", "debugfs", + "devpts", "devtmpfs", "ecryptfs", "efivarfs", "fuse", + "hugetlbfs", "mqueue", "nfs", "overlayfs", "proc", "pstore", + "rpc_pipefs", "securityfs", "smb", "sysfs", "tmpfs", "tracefs", + "squashfs", + }, `|`) + + onceRegisterFilesystemFlags = new(sync.Once) +) + +// registerFilesystemFlags registers filesystem cli flags. +// This should be called from within OS-specific builds since the underlying +// collectors will not be registered otherwise. +// This func can be called multiple times. +func registerFilesystemFlags() { + onceRegisterFilesystemFlags.Do(func() { + additionalParams = append(additionalParams, ignoredFSTypesFlag, ignoredFSTypes) + additionalParams = append(additionalParams, ignoredMountPointFlag, ignoredMountPoints) + }) +} diff --git a/cmd/do-agent/config_filesystem_test.go b/cmd/do-agent/config_filesystem_test.go new file mode 100644 index 00000000..fc6903e0 --- /dev/null +++ b/cmd/do-agent/config_filesystem_test.go @@ -0,0 +1,23 @@ +package main + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRegisterFilesystemFlagsRegistersFSTypesFlag(t *testing.T) { + // this is initialized with the _linux.go file if run on linux, but + // this test should run on all operating systems so we initialize it + registerFilesystemFlags() + assert.NotEmpty(t, additionalParams) + assert.Contains(t, additionalParams, ignoredFSTypes) +} + +func TestRegisterFilesystemFlagsRegistersMountPointFlag(t *testing.T) { + // this is initialized with the _linux.go file if run on linux, but + // this test should run on all operating systems so we initialize it + registerFilesystemFlags() + assert.NotEmpty(t, additionalParams) + assert.Contains(t, additionalParams, ignoredMountPoints) +} diff --git a/cmd/do-agent/config_freebsd.go b/cmd/do-agent/config_freebsd.go new file mode 100644 index 00000000..e7cce8f0 --- /dev/null +++ b/cmd/do-agent/config_freebsd.go @@ -0,0 +1,8 @@ +package main + +func init() { + registerFilesystemFlags() + disableCollectors("boottime", "exec", "ntp", "runit", "supervisord", + "textfile", "time", + ) +} diff --git a/cmd/do-agent/config_linux.go b/cmd/do-agent/config_linux.go new file mode 100644 index 00000000..c231659c --- /dev/null +++ b/cmd/do-agent/config_linux.go @@ -0,0 +1,12 @@ +package main + +func init() { + registerFilesystemFlags() + disableCollectors("arp", "bcache", "bonding", "buddyinfo", "conntrack", + "drbd", "edac", "entropy", "filefd", "hwmon", "infiniband", + "interrupts", "ipvs", "ksmd", "logind", "mdadm", "meminfo_numa", + "mountstats", "nfs", "nfsd", "ntp", "qdisc", "runit", "sockstat", + "supervisord", "systemd", "tcpstat", "textfile", "time", + "wifi", "xfs", "zfs", + ) +} diff --git a/cmd/do-agent/config_test.go b/cmd/do-agent/config_test.go new file mode 100644 index 00000000..ec13df72 --- /dev/null +++ b/cmd/do-agent/config_test.go @@ -0,0 +1,37 @@ +package main + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDisableCollectorsAddsCorrectFlags(t *testing.T) { + // some params are added by init funcs in os files so reset it to test + additionalParams = []string{} + disabledCollectors = map[string]interface{}{} + + items := []string{"hello", "world"} + flags := make([]string, len(items)) + for i, item := range items { + flags[i] = disableCollectorFlag(item) + } + + disableCollectors(items...) + assert.EqualValues(t, flags, additionalParams) +} + +func TestDisableCollectorsIsIdempotent(t *testing.T) { + // some params are added by init funcs in os files so reset it to test + additionalParams = []string{} + disabledCollectors = map[string]interface{}{} + + items := []string{"hello", "world", "world"} + flags := []string{ + disableCollectorFlag("hello"), + disableCollectorFlag("world"), + } + + disableCollectors(items...) + assert.EqualValues(t, flags, additionalParams) +} diff --git a/cmd/do-agent/main.go b/cmd/do-agent/main.go index e62b2cae..e0401b27 100644 --- a/cmd/do-agent/main.go +++ b/cmd/do-agent/main.go @@ -1,265 +1,50 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package main import ( - "flag" - "fmt" - "net/url" + "context" "os" - "regexp" - "runtime" - "time" - - "github.com/digitalocean/do-agent/bootstrap" - "github.com/digitalocean/do-agent/collector" - "github.com/digitalocean/do-agent/config" - "github.com/digitalocean/do-agent/log" - "github.com/digitalocean/do-agent/monitoringclient" - "github.com/digitalocean/do-agent/plugins" - "github.com/digitalocean/do-agent/procfs" - "github.com/digitalocean/do-agent/update" - - "github.com/digitalocean/go-metadata" - "github.com/ianschenck/envflag" - "github.com/jpillora/backoff" -) - -var ( - defaultPluginPath = "/var/lib/do-agent/plugins" - - forceUpdate = flag.Bool("force_update", false, "Update the version of do-agent.") - logToSyslog = flag.Bool("log_syslog", false, "Log to syslog.") - logLevel = flag.String("log_level", "INFO", "Log level to log: ERROR, INFO, DEBUG") - debugAppKey = envflag.String("DO_AGENT_APPKEY", "", "Override AppKey") - debugAuthToken = envflag.String("DO_AGENT_AUTHTOKEN", "", "Override AuthToken") - debugAuthURL = envflag.String("DO_AGENT_AUTHENTICATION_URL", monitoringclient.AuthURL, "Override authentication URL") - debugDropletID = envflag.Int64("DO_AGENT_DROPLET_ID", 0, "Override Droplet ID") - debugLocalRepoPath = envflag.String("DO_AGENT_REPO_PATH", update.RepoLocalStore, "Override Local repository path") - debugMetadataURL = envflag.String("DO_AGENT_METADATA_URL", monitoringclient.MetadataURL, "Override metadata URL") - debugMetricsURL = envflag.String("DO_AGENT_METRICS_URL", "", "Override metrics URL") - debugUpdateURL = envflag.String("DO_AGENT_UPDATE_URL", update.RepoURL, "Override Update URL") - pluginPath = envflag.String("DO_AGENT_PLUGIN_PATH", defaultPluginPath, "Override plugin path") - procFSRoot = envflag.String("DO_AGENT_PROCFS_ROOT", "/proc", "Override location of /proc") - - // By default, only collect these metrics _and_ any plugins metrics. In a future version of - // the agent, the server will be requesting the metrics to gather. - defaultMetrics = map[string]collector.Filters{ - "cpu": collector.Filters{Regexps: []*regexp.Regexp{ - regexp.MustCompile("cpu_cpu.*"), - }}, - - "disk": collector.Filters{IncludeAll: true}, + "os/signal" + "syscall" - "filesystem": collector.Filters{Regexps: []*regexp.Regexp{ - regexp.MustCompile("filesystem_(free|size).*"), - }}, - - "load": collector.Filters{IncludeAll: true}, - - "memory": collector.Filters{Regexps: []*regexp.Regexp{ - regexp.MustCompile("memory_(free|cached|swap*|total)"), - }}, - - // Restrict network metrics to physical nics such as 'eno1' or 'eth1'. - // This prevents measuring VPN 'tun' or 'tap' devices and container 'veth'. - "network": collector.Filters{Regexps: []*regexp.Regexp{ - regexp.MustCompile(`network_(receive|transmit)_(bytes|packets)_(eno|eth)\d{1,}`), - }}, - - "node": collector.Filters{IncludeAll: true}, - - "process": collector.Filters{IncludeAll: true}, - } + "github.com/digitalocean/do-agent/internal/log" + "github.com/prometheus/client_golang/prometheus" + kingpin "gopkg.in/alecthomas/kingpin.v2" ) func main() { - envflag.Parse() - flag.Parse() - - if err := log.SetLogger(*logLevel, *logToSyslog); err != nil { - log.Fatal(err) - } - - log.Info("Do-Agent version ", config.Version()) - log.Info("Do-Agent build ", config.Build()) - log.Info("Architecture: ", runtime.GOARCH) - log.Info("Operating System: ", runtime.GOOS) - - if *debugMetadataURL != monitoringclient.MetadataURL { - log.Info("Metadata URL Override: ", *debugMetadataURL) - } - if *debugAuthURL != monitoringclient.AuthURL { - log.Info("Authentication URL Override: ", *debugAuthURL) - } - if *debugMetricsURL != "" { - log.Info("Metrics URL Override: ", *debugMetricsURL) - } - if *debugAppKey != "" { - log.Info("AppKey Override: ", *debugAppKey) - } - if *debugDropletID != 0 { - log.Info("DropletID Override: ", *debugDropletID) - } - if *debugAuthToken != "" { - log.Info("AuthToken Override: ", *debugAuthToken) - } - if *debugUpdateURL != update.RepoURL { - log.Info("Update URL Override: ", debugUpdateURL) - } - if *debugLocalRepoPath != update.RepoLocalStore { - log.Info("Local Repository Path Override: ", *debugLocalRepoPath) - } - if *procFSRoot != "/proc" { - log.Info("Using alternative location for procFS: ", *procFSRoot) - } - if *pluginPath != defaultPluginPath { - log.Info("Plugin path Override: ", *pluginPath) - } - updater := update.NewUpdate(*debugLocalRepoPath, *debugUpdateURL) - - if *forceUpdate { - updateAgentWithExit(updater) - } - - metadataURL, err := url.Parse(*debugMetadataURL) - if err != nil { - log.Fatal(err) - } - - metadataClient := metadata.NewClient(metadata.WithBaseURL(metadataURL)) - monitoringClient := monitoringclient.NewClient(*debugAuthURL) - - errorBackoffTimer := backoff.Backoff{ - Min: 500 * time.Millisecond, - Max: 5 * time.Minute, - Factor: 2, - Jitter: true, - } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - var credentials *bootstrap.Credentials - for { - credentials, err = bootstrap.InitCredentials(metadataClient, monitoringClient, *debugAppKey, *debugDropletID, *debugAuthToken) - if err == nil { - break + stop := make(chan os.Signal, 1) + signal.Notify(stop, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGHUP, syscall.SIGINT) + go func() { + if sig := <-stop; sig != nil { + log.Info("caught signal, shutting down: %s", sig.String()) } - log.Info("Unable to read credentials: ", err) - - if _, err = metadataClient.AuthToken(); err != nil { - log.Fatal("do-agent requires a DigitalOcean host") - } - time.Sleep(errorBackoffTimer.Duration()) - } - - if credentials.AppKey == "" { - log.Fatal("No Appkey is configured. do-agent requires a DigitalOcean host") - } - - smc, err := monitoringclient.CreateMetricsClient(credentials.AppKey, credentials.DropletID, credentials.Region, *debugMetricsURL) - if err != nil { - log.Fatal("Error creating monitoring client: ", err) - } - - updateAgentWithRestart(updater) - lastUpdate := time.Now() - procfs.ProcPath = *procFSRoot + cancel() + }() - r := smc.Registry() - collector.RegisterCPUMetrics(r, procfs.NewStat, defaultMetrics["cpu"]) - collector.RegisterDiskMetrics(r, procfs.NewDisk, defaultMetrics["disk"]) - collector.RegisterFSMetrics(r, procfs.NewMount, defaultMetrics["filesystem"]) - collector.RegisterLoadMetrics(r, procfs.NewLoad, defaultMetrics["load"]) - collector.RegisterMemoryMetrics(r, procfs.NewMemory, defaultMetrics["memory"]) - collector.RegisterNetworkMetrics(r, procfs.NewNetwork, defaultMetrics["network"]) - collector.RegisterNodeMetrics(r, procfs.NewOSRelease, defaultMetrics["node"]) - collector.RegisterProcessMetrics(r, procfs.NewProcProc, defaultMetrics["process"]) - plugins.RegisterPluginDir(r, *pluginPath) + os.Args = append(os.Args, additionalParams...) - for { - log.Debug("Transmitting metrics to DigitalOcean.") - pushInterval, err := smc.SendMetrics() - if err != nil { - log.Error("Sending metrics to DigitalOcean: ", err) - } - - log.Debug(fmt.Sprintf("sleeping for %d seconds", pushInterval)) - time.Sleep(time.Duration(pushInterval) * time.Second) + // parse all command line flags + kingpin.HelpFlag.Short('h') + kingpin.Parse() - if time.Now().After(lastUpdate.Add(1 * time.Hour)) { - lastUpdate = time.Now() - updateAgentWithRestart(updater) + if config.syslog { + if err := log.InitSyslog(); err != nil { + log.Error("failed to initialize syslog. Using standard logging: %+v", err) } } -} - -// updateAgentWithRestart looks for any available updates to the agent. If an update is found, it will -// update the agent binary and reinitialize itself. If an update isn't found or fails, it will -// only log the results of its attempt. -func updateAgentWithRestart(updater update.Updater) { - log.Info("Checking for newer version of do-agent") - if err := updater.FetchLatestAndExec(false); err != nil { - if err == update.ErrUpdateNotAvailable { - log.Info("No update available") - return - } - - if err == update.ErrUnableToRetrieveTargets { - log.Info("No target available for update") - return - } - - // covers when the agent can’t confirm that the update that is on the server is a valid - // update because the timestamp update itself has expired. - if _, ok := err.(update.ErrUnableToUpdateRepo); ok { - log.Info("No repository update available") - return - } - - log.Errorf("Unable to update do-agent: %s\n", err) + if err := checkConfig(); err != nil { + log.Fatal("configuration failure: %+v", err) } -} - -// updateAgentWithExit looks for any available updates to the agent. After attempting to update -// the agent it will gracefully terminate execution. -func updateAgentWithExit(updater update.Updater) { - log.Info("Checking for newer version of do-agent") - if err := updater.FetchLatest(true); err != nil { - if err == update.ErrUpdateNotAvailable { - log.Info("No update available") - os.Exit(0) - } - - if err == update.ErrUnableToRetrieveTargets { - log.Info("No target available for update") - os.Exit(0) - } - - // covers when the agent can’t confirm that the update that is on the server is a valid - // update because the timestamp update itself has expired. - if _, ok := err.(update.ErrUnableToUpdateRepo); ok { - log.Info("No repository update available") - os.Exit(0) - } - - log.Errorf("Unable to update do-agent: %s\n", err) - os.Exit(1) - } + cols := initCollectors() + reg := prometheus.NewRegistry() + reg.MustRegister(cols...) - log.Info("Updated successfully") - os.Exit(0) + w, th := initWriter(ctx) + d := initDecorator() + run(ctx, w, th, d, reg) } diff --git a/cmd/do-agent/run.go b/cmd/do-agent/run.go new file mode 100644 index 00000000..6d10834f --- /dev/null +++ b/cmd/do-agent/run.go @@ -0,0 +1,58 @@ +package main + +import ( + "context" + "time" + + "github.com/digitalocean/do-agent/internal/log" + "github.com/digitalocean/do-agent/pkg/decorate" + dto "github.com/prometheus/client_model/go" +) + +type metricWriter interface { + Write(mets []*dto.MetricFamily) error + Name() string +} + +type throttler interface { + WaitDuration() time.Duration + Name() string +} + +type gatherer interface { + Gather() ([]*dto.MetricFamily, error) +} + +func run(ctx context.Context, w metricWriter, th throttler, dec decorate.Decorator, g gatherer) { + exec := func() { + start := time.Now() + mfs, err := g.Gather() + if err != nil { + log.Error("failed to gather metrics: %v", err) + return + } + log.Info("stats collected in %s", time.Since(start)) + + start = time.Now() + dec.Decorate(mfs) + log.Info("stats decorated in %s", time.Since(start)) + + err = w.Write(mfs) + if err != nil { + log.Error("failed to send metrics: %v", err) + return + } + log.Info("stats written in %s", time.Since(start)) + } + + exec() + + for { + select { + case <-time.After(th.WaitDuration()): + exec() + case <-ctx.Done(): + return + } + } +} diff --git a/cmd/do-agent/throttler.go b/cmd/do-agent/throttler.go new file mode 100644 index 00000000..d8df6cf8 --- /dev/null +++ b/cmd/do-agent/throttler.go @@ -0,0 +1,16 @@ +package main + +import "time" + +type constThrottler struct { + wait time.Duration +} + +func (c *constThrottler) WaitDuration() time.Duration { + return c.wait +} + +// Name is the name of this throttler +func (c *constThrottler) Name() string { + return "constant" +} diff --git a/cmd/do-agent/version.go b/cmd/do-agent/version.go new file mode 100644 index 00000000..7fe53102 --- /dev/null +++ b/cmd/do-agent/version.go @@ -0,0 +1,69 @@ +package main + +import ( + "fmt" + "os" + "runtime" + "text/template" + "time" + + "github.com/digitalocean/do-agent/internal/log" + + "github.com/prometheus/client_golang/prometheus" + kingpin "gopkg.in/alecthomas/kingpin.v2" +) + +var ( + version string + revision string + buildDate string + goVersion = runtime.Version() +) + +var versionTmpl = template.Must(template.New("version").Parse(` +{{ .name }} (DigitalOcean Agent) + +Version: {{.version}} +Revision: {{.revision}} +Build Date: {{.buildDate}} +Go Version: {{.goVersion}} +Website: https://github.com/digitalocean/do-agent + +Copyright (c) {{.year}} DigitalOcean, Inc. All rights reserved. + +This work is licensed under the terms of the Apache 2.0 license. +For a copy, see . +`)) + +var buildInfo = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + // Namespace has to be sonar or it will get filtered + Namespace: "sonar", + Name: "build_info", + Help: "A metric with a constant '1' value labeled by version from which the agent was built.", + }, + []string{"version", "revision"}, +).WithLabelValues(version, revision) + +func init() { + buildInfo.Set(1) + kingpin.VersionFlag = kingpin.Flag("version", "Show the application version information"). + Short('v'). + PreAction(func(c *kingpin.ParseContext) error { + err := versionTmpl.Execute(os.Stdout, map[string]string{ + "name": "do-agent", + "version": version, + "revision": revision, + "buildDate": buildDate, + "goVersion": goVersion, + "year": fmt.Sprintf("%d", time.Now().UTC().Year()), + }) + if err != nil { + log.Fatal("failed to execute version template: %+v", err) + } + os.Exit(0) + return nil + }) + kingpin.VersionFlag.Bool() + +} diff --git a/collector/common_test.go b/collector/common_test.go deleted file mode 100644 index e16056b4..00000000 --- a/collector/common_test.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "sort" - "testing" - - "github.com/digitalocean/do-agent/metrics" -) - -type stubMetricSet struct { - Name string - Opts []metrics.RegOpt -} - -type stubRegistry struct { - RegisterNameOpts []stubMetricSet - RegisterResult metrics.MetricRef - AddCollectorFunc metrics.Collector -} - -func (s *stubRegistry) Register(name string, opts ...metrics.RegOpt) metrics.MetricRef { - set := stubMetricSet{ - Name: name, - Opts: opts, - } - s.RegisterNameOpts = append(s.RegisterNameOpts, set) - return s.RegisterResult -} - -func (s *stubRegistry) AddCollector(f metrics.Collector) { - s.AddCollectorFunc = f -} - -func (s *stubRegistry) Report(r metrics.Reporter) { - s.AddCollectorFunc(r) -} - -type stubUpdateSet struct { - Ref metrics.MetricRef - Value float64 - LabelValues []string -} - -type stubReporter struct { - UpdateSet []stubUpdateSet -} - -func (s *stubReporter) Update(ref metrics.MetricRef, value float64, labelValues ...string) { - set := stubUpdateSet{ - Ref: ref, - Value: value, - LabelValues: labelValues, - } - s.UpdateSet = append(s.UpdateSet, set) -} - -// Verify that the stubRegistry implements the metrics.Registry interface. -var _ metrics.Registry = (*stubRegistry)(nil) - -// Verify that the stubReporter implements the metrics.Reporter interface. -var _ metrics.Reporter = (*stubReporter)(nil) - -// compareStringsUnordered tests if two slices contain the same elements (in any order). -// If they do not contain the same elements, elements which were exclusively found in a and b will -// be returned. -// -// Example: -// ok, aExtra, bExtra := CompareStringsUnordered([]string{"hello", "good", "world"}, []string{"good", "bye"}) -// will return: -// ok -> false -// aExtra -> []string{"hello", "world"} -// bExtra -> []string{"bye"} -func compareStringsUnordered(a, b []string) (bool, []string, []string) { - aSorted := make([]string, len(a)) - copy(aSorted, a) - sort.Strings(aSorted) - bSorted := make([]string, len(b)) - copy(bSorted, b) - sort.Strings(bSorted) - - i := 0 - j := 0 - - aExtra := []string{} - bExtra := []string{} - - for { - if i == len(aSorted) { - for ; j < len(bSorted); j++ { - bExtra = append(bExtra, bSorted[j]) - } - break - } - if j == len(bSorted) { - for ; i < len(aSorted); i++ { - aExtra = append(aExtra, aSorted[i]) - } - break - } - if aSorted[i] == bSorted[j] { - i++ - j++ - } else if aSorted[i] < bSorted[j] { - aExtra = append(aExtra, aSorted[i]) - i++ - } else { - bExtra = append(bExtra, bSorted[j]) - j++ - } - } - ok := len(aExtra) == 0 && len(bExtra) == 0 - return ok, aExtra, bExtra -} - -func testForMetricNames(t *testing.T, expectedNames, actualNames []string) { - ok, namesNotFound, namesNotExpected := compareStringsUnordered(expectedNames, actualNames) - - if !ok && len(namesNotFound) > 0 { - for i := range namesNotFound { - t.Errorf("expected metric name not found: %s", namesNotFound[i]) - } - } - - if !ok && len(namesNotExpected) > 0 { - for i := range namesNotExpected { - t.Errorf("unexpected metric name encountered: %s", namesNotExpected[i]) - } - } -} diff --git a/collector/cpu.go b/collector/cpu.go deleted file mode 100644 index 30e9d160..00000000 --- a/collector/cpu.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "github.com/digitalocean/do-agent/log" - "github.com/digitalocean/do-agent/metrics" - "github.com/digitalocean/do-agent/procfs" -) - -// traditionally ticks are messured per second. With modern -// architectures this value may vary. -const ticksPerSecond = 100 - -type statFunc func() (procfs.Stat, error) - -// RegisterCPUMetrics registers CPU related metrics. -func RegisterCPUMetrics(r metrics.Registry, fn statFunc, f Filters) { - cpu := r.Register("cpu", metrics.WithMeasuredLabels("cpu", "mode"), - metrics.AsType(metrics.MetricType_COUNTER)) - interrupt := r.Register("intr", - metrics.AsType(metrics.MetricType_COUNTER)) - contextSwitch := r.Register("context_switches") - procsBlocked := r.Register("procs_blocked") - procsRunning := r.Register("procs_running") - - r.AddCollector(func(r metrics.Reporter) { - stat, err := fn() - if err != nil { - log.Debugf("Could not gather cpu metrics: %s", err) - return - } - - for _, value := range stat.CPUS { - if value.CPU == "cpu" { - continue - } - f.UpdateIfIncluded(r, cpu, float64(value.Guest)/ticksPerSecond, value.CPU, "guest") - f.UpdateIfIncluded(r, cpu, float64(value.GuestNice)/ticksPerSecond, value.CPU, "guestnice") - f.UpdateIfIncluded(r, cpu, float64(value.Idle)/ticksPerSecond, value.CPU, "idle") - f.UpdateIfIncluded(r, cpu, float64(value.Iowait)/ticksPerSecond, value.CPU, "iowait") - f.UpdateIfIncluded(r, cpu, float64(value.Irq)/ticksPerSecond, value.CPU, "irq") - f.UpdateIfIncluded(r, cpu, float64(value.Nice)/ticksPerSecond, value.CPU, "nice") - f.UpdateIfIncluded(r, cpu, float64(value.Softirq)/ticksPerSecond, value.CPU, "softirq") - f.UpdateIfIncluded(r, cpu, float64(value.Steal)/ticksPerSecond, value.CPU, "steal") - f.UpdateIfIncluded(r, cpu, float64(value.System)/ticksPerSecond, value.CPU, "system") - f.UpdateIfIncluded(r, cpu, float64(value.User)/ticksPerSecond, value.CPU, "user") - } - - f.UpdateIfIncluded(r, interrupt, float64(stat.Interrupt)) - f.UpdateIfIncluded(r, contextSwitch, float64(stat.ContextSwitch)) - f.UpdateIfIncluded(r, procsBlocked, float64(stat.ProcessesBlocked)) - f.UpdateIfIncluded(r, procsRunning, float64(stat.ProcessesRunning)) - }) -} diff --git a/collector/cpu_test.go b/collector/cpu_test.go deleted file mode 100644 index 44a7b1eb..00000000 --- a/collector/cpu_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "testing" - - "github.com/digitalocean/do-agent/procfs" -) - -type stubStater struct { - NewStatResultStat procfs.Stat - NewStatResultErr error -} - -func (s *stubStater) NewStat() (procfs.Stat, error) { return s.NewStatResultStat, s.NewStatResultErr } - -// Verify that the stubStater implements the procfs.Stater interface. -var _ procfs.Stater = (*stubStater)(nil) - -func TestRegisterCPUMetrics(t *testing.T) { - stat := &stubStater{} - stat.NewStatResultErr = nil - stat.NewStatResultStat = procfs.Stat{ - CPUS: []procfs.CPU{ - procfs.CPU{ - CPU: "cpu1", - User: uint64(1), - Nice: uint64(2), - System: uint64(3), - Idle: uint64(4), - Iowait: uint64(5), - Irq: uint64(6), - Softirq: uint64(7), - Steal: uint64(8), - Guest: uint64(9), - GuestNice: uint64(10), - }, - }, - Interrupt: uint64(1), - ContextSwitch: uint64(2), - Processes: uint64(3), - ProcessesRunning: uint64(4), - ProcessesBlocked: uint64(5), - } - - expectedNames := []string{ - "cpu", - "intr", - "context_switches", - "procs_blocked", - "procs_running", - } - - var actualNames []string - - r := &stubRegistry{} - f := Filters{IncludeAll: true} - RegisterCPUMetrics(r, stat.NewStat, f) - - for i := range r.RegisterNameOpts { - actualNames = append(actualNames, r.RegisterNameOpts[i].Name) - } - - testForMetricNames(t, expectedNames, actualNames) - - if r.AddCollectorFunc == nil { - t.Error("expected collector function, found none") - } -} diff --git a/collector/disk.go b/collector/disk.go deleted file mode 100644 index d5f9bdfa..00000000 --- a/collector/disk.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "regexp" - - "github.com/digitalocean/do-agent/log" - "github.com/digitalocean/do-agent/metrics" - "github.com/digitalocean/do-agent/procfs" -) - -//from prometheus node exporter -const ( - excludedDisks = "^(ram|loop|fd|(h|s|v|xv)d[a-z])\\d+$" - sectorSize = 512 - diskSystem = "disk" -) - -var edp = regexp.MustCompile(excludedDisks) - -type diskFunc func() ([]procfs.Disk, error) - -// RegisterDiskMetrics registers disk metrics. -func RegisterDiskMetrics(r metrics.Registry, fn diskFunc, f Filters) { - deviceLabel := metrics.WithMeasuredLabels("device") - ioNow := r.Register(diskSystem+"_io_now", deviceLabel) - ioTime := r.Register(diskSystem+"_io_time_ms", deviceLabel) - ioTimeWeighted := r.Register(diskSystem+"_io_time_weighted", deviceLabel) - readTime := r.Register(diskSystem+"_read_time_ms", deviceLabel) - readsCompleted := r.Register(diskSystem+"_reads_completed", deviceLabel) - readsMerged := r.Register(diskSystem+"_reads_merged", deviceLabel) - sectorsRead := r.Register(diskSystem+"_sectors_read", deviceLabel) - sectorsWritten := r.Register(diskSystem+"_sectors_written", deviceLabel) - writeTime := r.Register(diskSystem+"_write_time_ms", deviceLabel) - writesCompleted := r.Register(diskSystem+"_writes_completed", deviceLabel) - writesMerged := r.Register(diskSystem+"_writes_merged", deviceLabel) - bytesRead := r.Register(diskSystem+"_bytes_read", deviceLabel) - bytesWritten := r.Register(diskSystem+"_bytes_written", deviceLabel) - - r.AddCollector(func(r metrics.Reporter) { - disk, err := fn() - if err != nil { - log.Debugf("Could not gather disk metrics: %s", err) - return - } - - for _, value := range disk { - if edp.MatchString(value.DeviceName) { - log.Debugf("Excluding disk %s", value.DeviceName) - continue - } - - f.UpdateIfIncluded(r, ioNow, float64(value.IOInProgress), value.DeviceName) - f.UpdateIfIncluded(r, ioTime, float64(value.TimeSpentDoingIO), value.DeviceName) - f.UpdateIfIncluded(r, ioTimeWeighted, float64(value.WeightedTimeSpentDoingIO), value.DeviceName) - f.UpdateIfIncluded(r, readTime, float64(value.TimeSpentReading), value.DeviceName) - f.UpdateIfIncluded(r, readsCompleted, float64(value.ReadsCompleted), value.DeviceName) - f.UpdateIfIncluded(r, readsMerged, float64(value.ReadsMerged), value.DeviceName) - f.UpdateIfIncluded(r, sectorsRead, float64(value.SectorsRead), value.DeviceName) - f.UpdateIfIncluded(r, sectorsWritten, float64(value.SectorsWritten), value.DeviceName) - f.UpdateIfIncluded(r, writeTime, float64(value.TimeSpendWriting), value.DeviceName) - f.UpdateIfIncluded(r, writesCompleted, float64(value.WritesCompleted), value.DeviceName) - f.UpdateIfIncluded(r, writesMerged, float64(value.WritesMerged), value.DeviceName) - f.UpdateIfIncluded(r, bytesRead, float64(value.ReadsMerged)*sectorSize, value.DeviceName) - f.UpdateIfIncluded(r, bytesWritten, float64(value.WritesMerged)*sectorSize, value.DeviceName) - } - }) -} diff --git a/collector/disk_test.go b/collector/disk_test.go deleted file mode 100644 index 01f8d86b..00000000 --- a/collector/disk_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "testing" - - "github.com/digitalocean/do-agent/procfs" -) - -type stubDisker struct { - NewDiskResultDisk []procfs.Disk - NewDiskResultErr error -} - -func (s *stubDisker) NewDisk() ([]procfs.Disk, error) { - return s.NewDiskResultDisk, s.NewDiskResultErr -} - -// Verify that the stubDisker implements the procfs.Disker interface. -var _ procfs.Disker = (*stubDisker)(nil) - -func TestRegisterDiskMetrics(t *testing.T) { - testCases := []struct { - label string - disker *stubDisker - expectedNames []string - }{ - {"all_labels", &stubDisker{NewDiskResultErr: nil, - NewDiskResultDisk: []procfs.Disk{ - procfs.Disk{ - MajorNumber: uint64(1), - MinorNumber: uint64(2), - DeviceName: "fooDrive", - ReadsCompleted: uint64(3), - ReadsMerged: uint64(4), - SectorsRead: uint64(5), - TimeSpentReading: uint64(6), - WritesCompleted: uint64(7), - WritesMerged: uint64(8), - SectorsWritten: uint64(9), - TimeSpendWriting: uint64(10), - IOInProgress: uint64(11), - TimeSpentDoingIO: uint64(12), - WeightedTimeSpentDoingIO: uint64(13), - }, - }, - }, []string{ - "disk_io_now", - "disk_io_time_ms", - "disk_io_time_weighted", - "disk_read_time_ms", - "disk_reads_completed", - "disk_reads_merged", - "disk_sectors_read", - "disk_sectors_written", - "disk_write_time_ms", - "disk_writes_completed", - "disk_writes_merged", - "disk_bytes_read", - "disk_bytes_written", - }, - }, - {"ignored_disk", &stubDisker{NewDiskResultErr: nil, NewDiskResultDisk: []procfs.Disk{procfs.Disk{DeviceName: "loop0"}}}, []string{ - "disk_io_now", - "disk_io_time_ms", - "disk_io_time_weighted", - "disk_read_time_ms", - "disk_reads_completed", - "disk_reads_merged", - "disk_sectors_read", - "disk_sectors_written", - "disk_write_time_ms", - "disk_writes_completed", - "disk_writes_merged", - "disk_bytes_read", - "disk_bytes_written", - }}, - } - - for _, tc := range testCases { - t.Run(tc.label, func(t *testing.T) { - var actualNames []string - - r := &stubRegistry{} - f := Filters{IncludeAll: true} - RegisterDiskMetrics(r, tc.disker.NewDisk, f) - - for i := range r.RegisterNameOpts { - actualNames = append(actualNames, r.RegisterNameOpts[i].Name) - } - - testForMetricNames(t, tc.expectedNames, actualNames) - - if r.AddCollectorFunc == nil { - t.Error("expected collector function, found none") - } - }) - } -} diff --git a/collector/file_system.go b/collector/file_system.go deleted file mode 100644 index 1bb092c6..00000000 --- a/collector/file_system.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "strings" - "syscall" - - "github.com/digitalocean/do-agent/log" - "github.com/digitalocean/do-agent/metrics" - "github.com/digitalocean/do-agent/procfs" -) - -const ( - fsSystem = "filesystem" -) - -// excludedDevices are psudeo filesystems that are excluded from metrics. -var excludedDevices = []string{ - "fusectl", - "lxcfs", - "mqueue", - "none", - "rootfs", - "sunrpc", - "systemd", - "udev", -} - -// excludedFSes are pseudo filesystems that are excluded from metrics -var excludedFSes = []string{ - "aufs", - "autofs", - "binfmt_misc", - "cifs", - "cgroup", - "debugfs", - "devpts", - "devtmpfs", - "ecryptfs", - "efivarfs", - "fuse", - "hugetlbfs", - "mqueue", - "nfs", - "overlayfs", - "proc", - "pstore", - "rpc_pipefs", - "securityfs", - "smb", - "sysfs", - "tmpfs", - "tracefs", -} - -type mountFunc func() ([]procfs.Mount, error) - -// isExlcuded checks if a filesystems matches the exlcudedDevice list. Regexp's were -// considered but they can be slow. -func isExcluded(d, t string) bool { - for _, x := range excludedDevices { - if strings.Contains(d, x) { - return true - } - } - for _, y := range excludedFSes { - if strings.Contains(t, y) { - return true - } - } - return false -} - -// RegisterFSMetrics registers Filesystem related metrics.. -func RegisterFSMetrics(r metrics.Registry, fn mountFunc, f Filters) { - labels := metrics.WithMeasuredLabels("device", "mountpoint", "fstype") - available := r.Register(fsSystem+"_avail", labels) - files := r.Register(fsSystem+"_files", labels) - filesFree := r.Register(fsSystem+"_files_free", labels) - free := r.Register(fsSystem+"_free", labels) - size := r.Register(fsSystem+"_size", labels) - - r.AddCollector(func(r metrics.Reporter) { - mounts, err := fn() - if err != nil { - log.Debugf("Could not gather filesystem metrics: %s", err) - return - } - - for _, mount := range mounts { - if isExcluded(mount.Device, mount.FSType) { - log.Debugf("Ignoring filesystem for device : %s %s ", mount.Device, mount.FSType) - continue - } - - var fsStats syscall.Statfs_t - err := syscall.Statfs(mount.MountPoint, &fsStats) - if err != nil { - log.Debugf("syscall.Statfs had error on %s: %s", mount.MountPoint, err) - continue - } - - f.UpdateIfIncluded(r, available, float64(fsStats.Bavail)*float64(fsStats.Bsize), - mount.Device, mount.MountPoint, mount.FSType) - f.UpdateIfIncluded(r, files, float64(fsStats.Files), - mount.Device, mount.MountPoint, mount.FSType) - f.UpdateIfIncluded(r, filesFree, float64(fsStats.Ffree), - mount.Device, mount.MountPoint, mount.FSType) - f.UpdateIfIncluded(r, free, float64(fsStats.Bfree)*float64(fsStats.Bsize), - mount.Device, mount.MountPoint, mount.FSType) - f.UpdateIfIncluded(r, size, float64(fsStats.Blocks)*float64(fsStats.Bsize), - mount.Device, mount.MountPoint, mount.FSType) - } - }) -} diff --git a/collector/file_system_test.go b/collector/file_system_test.go deleted file mode 100644 index 89189c48..00000000 --- a/collector/file_system_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "testing" - - "github.com/digitalocean/do-agent/procfs" -) - -type stubMounter struct { - NewMountResultMounts []procfs.Mount - NewMountResultErr error -} - -func (s *stubMounter) NewMount() ([]procfs.Mount, error) { - return s.NewMountResultMounts, s.NewMountResultErr -} - -// Verify that the stubMounter implements the procfs.Mounter interface. -var _ procfs.Mounter = (*stubMounter)(nil) - -func TestRegisterFSMetrics(t *testing.T) { - m := &stubMounter{} - m.NewMountResultErr = nil - m.NewMountResultMounts = []procfs.Mount{ - procfs.Mount{ - Device: "rootfs", - MountPoint: "/", - FSType: "shoes", - }, - } - - expectedNames := []string{ - "filesystem_avail", - "filesystem_files", - "filesystem_files_free", - "filesystem_free", - "filesystem_size", - } - - var actualNames []string - - r := &stubRegistry{} - f := Filters{IncludeAll: true} - RegisterFSMetrics(r, m.NewMount, f) - - for i := range r.RegisterNameOpts { - actualNames = append(actualNames, r.RegisterNameOpts[i].Name) - } - - testForMetricNames(t, expectedNames, actualNames) - - if r.AddCollectorFunc == nil { - t.Error("expected collector function, found none") - } -} diff --git a/collector/filter.go b/collector/filter.go deleted file mode 100644 index 7ba32b1f..00000000 --- a/collector/filter.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "regexp" - "strings" - - "github.com/digitalocean/do-agent/log" - "github.com/digitalocean/do-agent/metrics" -) - -// Filters is used to limit collection of metrics. -type Filters struct { - IncludeAll bool - Regexps []*regexp.Regexp -} - -// UpdateIfIncluded call r.Update if the metric should be included. -func (f *Filters) UpdateIfIncluded(r metrics.Reporter, ref metrics.MetricRef, value float64, labelValues ...string) { - def, ok := ref.(*metrics.Definition) - if !ok { - log.Debugf("unknown metric: %d", ref) - return - } - - l := def.Name - if len(labelValues) > 0 { - l += "_" + strings.Join(labelValues, "_") - } - - if f.IncludeAll { - r.Update(ref, value, labelValues...) - log.Debugf("(+) included via catch all: %v", l) - return - } - - for _, e := range f.Regexps { - if e.MatchString(l) { - r.Update(ref, value, labelValues...) - log.Debugf("(+) included via regex: %v", l) - return - } - } - - log.Debugf("(-) excluded: %v", l) -} diff --git a/collector/load.go b/collector/load.go deleted file mode 100644 index fbc0283d..00000000 --- a/collector/load.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "github.com/digitalocean/do-agent/log" - "github.com/digitalocean/do-agent/metrics" - "github.com/digitalocean/do-agent/procfs" -) - -type loadFunc func() (procfs.Load, error) - -// RegisterLoadMetrics registers system load related metrics. -func RegisterLoadMetrics(r metrics.Registry, fn loadFunc, f Filters) { - load1 := r.Register("load1") - load5 := r.Register("load5") - load15 := r.Register("load15") - - r.AddCollector(func(r metrics.Reporter) { - loads, err := fn() - if err != nil { - log.Debugf("couldn't get load: %s", err) - return - } - f.UpdateIfIncluded(r, load1, loads.Load1) - f.UpdateIfIncluded(r, load5, loads.Load5) - f.UpdateIfIncluded(r, load15, loads.Load15) - }) -} diff --git a/collector/load_test.go b/collector/load_test.go deleted file mode 100644 index 08ea5f6d..00000000 --- a/collector/load_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "testing" - - "github.com/digitalocean/do-agent/procfs" -) - -type stubLoader struct { - NewLoadResultLoad procfs.Load - NewLoadResultErr error -} - -func (s *stubLoader) NewLoad() (procfs.Load, error) { - return s.NewLoadResultLoad, s.NewLoadResultErr -} - -// Verify that the stubMounter implements the procfs.Mounter interface. -var _ procfs.Loader = (*stubLoader)(nil) - -func TestRegisterLoadMetrics(t *testing.T) { - l := &stubLoader{} - l.NewLoadResultErr = nil - l.NewLoadResultLoad = procfs.Load{ - Load1: float64(1), - Load5: float64(2), - Load15: float64(3), - RunningProcs: uint64(4), - TotalProcs: uint64(5), - LastPIDUsed: uint64(6), - } - - expectedNames := []string{ - "load1", - "load5", - "load15", - } - - var actualNames []string - - r := &stubRegistry{} - f := Filters{IncludeAll: true} - RegisterLoadMetrics(r, l.NewLoad, f) - - for i := range r.RegisterNameOpts { - actualNames = append(actualNames, r.RegisterNameOpts[i].Name) - } - - testForMetricNames(t, expectedNames, actualNames) - - if r.AddCollectorFunc == nil { - t.Error("expected collector function, found none") - } -} diff --git a/collector/memory.go b/collector/memory.go deleted file mode 100644 index b07997be..00000000 --- a/collector/memory.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "github.com/digitalocean/do-agent/log" - "github.com/digitalocean/do-agent/metrics" - "github.com/digitalocean/do-agent/procfs" -) - -const memorySystem = "memory" - -var memoryMetrics = map[string]string{ - "Active": "active", - "Active_anon": "active_anonymous", - "Active_file": "active_file", - "AnonHugePages": "anonymous_hugepages", - "AnonPages": "anonymous_pages", - "Bounce": "bounce", - "Buffers": "buffers", - "Cached": "cached", - "CommitLimit": "commit_limit", - "Committed_AS": "committed_as", - "DirectMap1G": "direct_map_1g", - "DirectMap2M": "direct_map_2m", - "DirectMap4k": "direct_map_4k", - "Dirty": "dirty", - "HardwareCorrupted": "hardware_corrupted", - "HugePages_Free": "hugepages_free", - "HugePages_Rsvd": "hugepages_reserved", - "HugePages_Surp": "hugepages_surplus", - "HugePages_Total": "hugepages_total", - "Hugepagesize": "hugepages_size", - "Inactive": "inactive", - "Inactive_anon": "inactive_anonymous", - "Inactive_file": "inactive_file", - "KernelStack": "kernel_stack", - "Mapped": "mapped", - "MemFree": "free", - "MemTotal": "total", - "Mlocked": "locked", - "NFS_Unstable": "nfs_unstable", - "PageTables": "page_tables", - "SReclaimable": "slab_reclaimable", - "SUnreclaim": "slab_unreclaimable", - "Shmem": "shmem", - "Slab": "slab", - "SwapCached": "swap_cached", - "SwapFree": "swap_free", - "SwapTotal": "swap_total", - "Unevictable": "unevictable", - "VmallocChunk": "virtual_malloc_chunk", - "VmallocTotal": "virtual_malloc_total", - "VmallocUsed": "virtual_malloc_used", - "Writeback": "writeback", - "WritebackTmp": "writeback_temporary", -} - -type memoryFunc func() (procfs.Memory, error) - -//RegisterMemoryMetrics creates a reference to a MemoryCollector. -func RegisterMemoryMetrics(r metrics.Registry, fn memoryFunc, f Filters) { - m := make(map[string]metrics.MetricRef) - for procLabel, name := range memoryMetrics { - m[procLabel] = r.Register(memorySystem + "_" + name) - } - - r.AddCollector(func(r metrics.Reporter) { - mem, err := fn() - if err != nil { - log.Debugf("couldn't get memory: %s", err) - return - } - - f.UpdateIfIncluded(r, m["Active"], mem.Active) - f.UpdateIfIncluded(r, m["Active_anon"], mem.ActiveAnon) - f.UpdateIfIncluded(r, m["Active_file"], mem.ActiveFile) - f.UpdateIfIncluded(r, m["AnonHugePages"], mem.AnonHugePages) - f.UpdateIfIncluded(r, m["AnonPages"], mem.AnonPages) - f.UpdateIfIncluded(r, m["Bounce"], mem.Bounce) - f.UpdateIfIncluded(r, m["Buffers"], mem.Buffers) - f.UpdateIfIncluded(r, m["Cached"], mem.Cached) - f.UpdateIfIncluded(r, m["CommitLimit"], mem.CommitLimit) - f.UpdateIfIncluded(r, m["Committed_AS"], mem.CommittedAS) - f.UpdateIfIncluded(r, m["DirectMap1G"], mem.DirectMap1G) - f.UpdateIfIncluded(r, m["DirectMap2M"], mem.DirectMap2M) - f.UpdateIfIncluded(r, m["DirectMap4k"], mem.DirectMap4k) - f.UpdateIfIncluded(r, m["Dirty"], mem.Dirty) - f.UpdateIfIncluded(r, m["HardwareCorrupted"], mem.HardwareCorrupted) - f.UpdateIfIncluded(r, m["HugePages_Free"], mem.HugePagesFree) - f.UpdateIfIncluded(r, m["HugePages_Rsvd"], mem.HugePagesRsvd) - f.UpdateIfIncluded(r, m["HugePages_Surp"], mem.HugePagesSurp) - f.UpdateIfIncluded(r, m["HugePages_Total"], mem.HugePagesTotal) - f.UpdateIfIncluded(r, m["Hugepagesize"], mem.Hugepagesize) - f.UpdateIfIncluded(r, m["Inactive"], mem.Inactive) - f.UpdateIfIncluded(r, m["Inactive_anon"], mem.InactiveAnon) - f.UpdateIfIncluded(r, m["Inactive_file"], mem.InactiveFile) - f.UpdateIfIncluded(r, m["KernelStack"], mem.KernelStack) - f.UpdateIfIncluded(r, m["Mapped"], mem.Mapped) - f.UpdateIfIncluded(r, m["MemFree"], mem.MemFree) - f.UpdateIfIncluded(r, m["MemTotal"], mem.MemTotal) - f.UpdateIfIncluded(r, m["Mlocked"], mem.Mlocked) - f.UpdateIfIncluded(r, m["NFS_Unstable"], mem.NFSUnstable) - f.UpdateIfIncluded(r, m["PageTables"], mem.PageTables) - f.UpdateIfIncluded(r, m["SReclaimable"], mem.SReclaimable) - f.UpdateIfIncluded(r, m["SUnreclaim"], mem.SUnreclaim) - f.UpdateIfIncluded(r, m["Shmem"], mem.Shmem) - f.UpdateIfIncluded(r, m["Slab"], mem.Slab) - f.UpdateIfIncluded(r, m["SwapCached"], mem.SwapCached) - f.UpdateIfIncluded(r, m["SwapFree"], mem.SwapFree) - f.UpdateIfIncluded(r, m["SwapTotal"], mem.SwapTotal) - f.UpdateIfIncluded(r, m["Unevictable"], mem.Unevictable) - f.UpdateIfIncluded(r, m["VmallocChunk"], mem.VmallocChunk) - f.UpdateIfIncluded(r, m["VmallocTotal"], mem.VmallocTotal) - f.UpdateIfIncluded(r, m["VmallocUsed"], mem.VmallocUsed) - f.UpdateIfIncluded(r, m["Writeback"], mem.Writeback) - f.UpdateIfIncluded(r, m["WritebackTmp"], mem.WritebackTmp) - }) -} diff --git a/collector/memory_test.go b/collector/memory_test.go deleted file mode 100644 index 2e4397b2..00000000 --- a/collector/memory_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "testing" - - "github.com/digitalocean/do-agent/procfs" -) - -type stubMemoryer struct { - NewMemoryResultMemory procfs.Memory - NewMemoryResultErr error -} - -func (s *stubMemoryer) NewMemory() (procfs.Memory, error) { - return s.NewMemoryResultMemory, s.NewMemoryResultErr -} - -// Verify that the stubMemoryer implements the procfs.Memoryer interface. -var _ procfs.Memoryer = (*stubMemoryer)(nil) - -func TestRegisterMemoryMetrics(t *testing.T) { - m := &stubMemoryer{} - m.NewMemoryResultErr = nil - m.NewMemoryResultMemory = procfs.Memory{} - - expectedNames := []string{ - "memory_active", - "memory_active_anonymous", - "memory_active_file", - "memory_anonymous_hugepages", - "memory_anonymous_pages", - "memory_bounce", - "memory_buffers", - "memory_cached", - "memory_commit_limit", - "memory_committed_as", - "memory_direct_map_1g", - "memory_direct_map_2m", - "memory_direct_map_4k", - "memory_dirty", - "memory_hardware_corrupted", - "memory_hugepages_free", - "memory_hugepages_reserved", - "memory_hugepages_surplus", - "memory_hugepages_total", - "memory_hugepages_size", - "memory_inactive", - "memory_inactive_anonymous", - "memory_inactive_file", - "memory_kernel_stack", - "memory_mapped", - "memory_free", - "memory_total", - "memory_locked", - "memory_nfs_unstable", - "memory_page_tables", - "memory_slab_reclaimable", - "memory_slab_unreclaimable", - "memory_shmem", - "memory_slab", - "memory_swap_cached", - "memory_swap_free", - "memory_swap_total", - "memory_unevictable", - "memory_virtual_malloc_chunk", - "memory_virtual_malloc_total", - "memory_virtual_malloc_used", - "memory_writeback", - "memory_writeback_temporary", - } - - var actualNames []string - - r := &stubRegistry{} - f := Filters{IncludeAll: true} - RegisterMemoryMetrics(r, m.NewMemory, f) - - for i := range r.RegisterNameOpts { - actualNames = append(actualNames, r.RegisterNameOpts[i].Name) - } - - testForMetricNames(t, expectedNames, actualNames) - - if r.AddCollectorFunc == nil { - t.Error("expected collector function, found none") - } -} diff --git a/collector/network.go b/collector/network.go deleted file mode 100644 index 7314fa57..00000000 --- a/collector/network.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "github.com/digitalocean/do-agent/log" - "github.com/digitalocean/do-agent/metrics" - "github.com/digitalocean/do-agent/procfs" -) - -const networkSystem = "network" - -var networkNames = []string{ - "receive_bytes", - "receive_compressed", - "receive_drop", - "receive_errs", - "receive_fifo", - "receive_frame", - "receive_multicast", - "receive_packets", - "transmit_bytes", - "transmit_compressed", - "transmit_drop", - "transmit_errs", - "transmit_fifo", - "transmit_frame", - "transmit_packets"} - -type networkFunc func() ([]procfs.Network, error) - -//RegisterNetworkMetrics creates a reference to a NewtworkCollector. -func RegisterNetworkMetrics(r metrics.Registry, fn networkFunc, f Filters) { - nc := map[string]metrics.MetricRef{} - deviceLabel := metrics.WithMeasuredLabels("device") - for _, name := range networkNames { - nc[name] = r.Register(networkSystem+"_"+name, deviceLabel) - } - - r.AddCollector(func(r metrics.Reporter) { - network, err := fn() - if err != nil { - log.Debugf("Could not gather network metrics: %s", err) - return - } - - for _, value := range network { - f.UpdateIfIncluded(r, nc["receive_bytes"], float64(value.RXBytes), value.Interface) - f.UpdateIfIncluded(r, nc["receive_compressed"], float64(value.RXCompressed), value.Interface) - f.UpdateIfIncluded(r, nc["receive_drop"], float64(value.RXDrop), value.Interface) - f.UpdateIfIncluded(r, nc["receive_errs"], float64(value.RXErrs), value.Interface) - f.UpdateIfIncluded(r, nc["receive_fifo"], float64(value.RXFifo), value.Interface) - f.UpdateIfIncluded(r, nc["receive_frame"], float64(value.RXFrame), value.Interface) - f.UpdateIfIncluded(r, nc["receive_multicast"], float64(value.RXMulticast), value.Interface) - f.UpdateIfIncluded(r, nc["receive_packets"], float64(value.RXPackets), value.Interface) - f.UpdateIfIncluded(r, nc["transmit_bytes"], float64(value.TXBytes), value.Interface) - f.UpdateIfIncluded(r, nc["transmit_compressed"], float64(value.TXCompressed), value.Interface) - f.UpdateIfIncluded(r, nc["transmit_drop"], float64(value.TXDrop), value.Interface) - f.UpdateIfIncluded(r, nc["transmit_errs"], float64(value.TXErrs), value.Interface) - f.UpdateIfIncluded(r, nc["transmit_fifo"], float64(value.TXFifo), value.Interface) - f.UpdateIfIncluded(r, nc["transmit_packets"], float64(value.TXPackets), value.Interface) - } - }) -} diff --git a/collector/network_test.go b/collector/network_test.go deleted file mode 100644 index ea21205a..00000000 --- a/collector/network_test.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "testing" - - "github.com/digitalocean/do-agent/procfs" -) - -type stubNetworker struct { - NewNetworkResultNetworks []procfs.Network - NewNetworkResultErr error -} - -func (s *stubNetworker) NewNetwork() ([]procfs.Network, error) { - return s.NewNetworkResultNetworks, s.NewNetworkResultErr -} - -// Verify that the stubNetworker implements the procfs.Networker interface. -var _ procfs.Networker = (*stubNetworker)(nil) - -func TestRegisterNetworkMetrics(t *testing.T) { - n := &stubNetworker{} - n.NewNetworkResultErr = nil - n.NewNetworkResultNetworks = []procfs.Network{ - procfs.Network{ - Interface: "inet15", - RXBytes: uint64(1), - RXPackets: uint64(2), - RXErrs: uint64(3), - RXDrop: uint64(4), - RXFifo: uint64(5), - RXFrame: uint64(6), - RXCompressed: uint64(7), - RXMulticast: uint64(8), - TXBytes: uint64(9), - TXPackets: uint64(10), - TXErrs: uint64(11), - TXDrop: uint64(12), - TXFifo: uint64(13), - TXColls: uint64(14), - TXCarrier: uint64(15), - TXCompressed: uint64(16), - }, - } - - expectedNames := []string{ - "network_receive_bytes", - "network_receive_compressed", - "network_receive_drop", - "network_receive_errs", - "network_receive_fifo", - "network_receive_frame", - "network_receive_multicast", - "network_receive_packets", - "network_transmit_bytes", - "network_transmit_compressed", - "network_transmit_drop", - "network_transmit_errs", - "network_transmit_fifo", - "network_transmit_frame", - "network_transmit_packets", - } - - var actualNames []string - - r := &stubRegistry{} - f := Filters{IncludeAll: true} - RegisterNetworkMetrics(r, n.NewNetwork, f) - - for i := range r.RegisterNameOpts { - actualNames = append(actualNames, r.RegisterNameOpts[i].Name) - } - - testForMetricNames(t, expectedNames, actualNames) - - if r.AddCollectorFunc == nil { - t.Error("expected collector function, found none") - } -} diff --git a/collector/node.go b/collector/node.go deleted file mode 100644 index a7ad801c..00000000 --- a/collector/node.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "fmt" - "net" - "os" - "runtime" - - "github.com/digitalocean/do-agent/config" - "github.com/digitalocean/do-agent/log" - "github.com/digitalocean/do-agent/metrics" - "github.com/digitalocean/do-agent/procfs" -) - -const nodeSystem = "node" - -type osReleaseFunc func() (procfs.OSRelease, error) - -func kernelVersion(f osReleaseFunc) string { - version, err := f() - if err != nil { - log.Debugf("Unable to collect kernel version: %s", err) - return "Unavailable kernel version" - } - return string(version) -} - -//RegisterNodeMetrics creates a reference to a NodeCollector. -func RegisterNodeMetrics(r metrics.Registry, fn osReleaseFunc, f Filters) { - labels := map[string]string{ - "os": runtime.GOOS, - "architecture": runtime.GOARCH, - "sonar_agent_version": config.Version(), - "build": config.Build(), - "kernel": kernelVersion(fn), - } - addrs, err := net.InterfaceAddrs() - if err != nil { - log.Debug("Unable to collect interface IP Addresses") - return - } - - ipCount := 0 - for _, addr := range addrs { - var ip net.IP - switch v := addr.(type) { - case *net.IPNet: - ip = v.IP - case *net.IPAddr: - ip = v.IP - default: - continue - } - if ip.IsLoopback() { - continue - } - ipCount++ - labels[fmt.Sprintf("ipaddress%d", ipCount)] = ip.String() - } - - info := r.Register(nodeSystem+"_info", - metrics.WithCommonLabels(labels), - metrics.WithMeasuredLabels("host_name")) - - r.AddCollector(func(r metrics.Reporter) { - hostName, err := os.Hostname() - if err != nil { - log.Debugf("Unable to collect hostname: %s", err) - hostName = "Unavailable Hostname" - } - - f.UpdateIfIncluded(r, info, 0, hostName) - }) -} diff --git a/collector/node_test.go b/collector/node_test.go deleted file mode 100644 index 697a1fe2..00000000 --- a/collector/node_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "testing" - - "github.com/digitalocean/do-agent/procfs" -) - -type stubOSReleaser struct { - NewOSReleaseResultOSRelease procfs.OSRelease - NewOSReleaseResultErr error -} - -func (s *stubOSReleaser) NewOSRelease() (procfs.OSRelease, error) { - return s.NewOSReleaseResultOSRelease, s.NewOSReleaseResultErr -} - -// Verify that the stubOSReleaser implements the procfs.OSReleaser interface. -var _ procfs.OSReleaser = (*stubOSReleaser)(nil) - -func TestRegisterNodeMetrics(t *testing.T) { - o := &stubOSReleaser{} - o.NewOSReleaseResultErr = nil - o.NewOSReleaseResultOSRelease = procfs.OSRelease("lingus") - - expectedNames := []string{"node_info"} - - var actualNames []string - - r := &stubRegistry{} - f := Filters{IncludeAll: true} - RegisterNodeMetrics(r, o.NewOSRelease, f) - - for i := range r.RegisterNameOpts { - actualNames = append(actualNames, r.RegisterNameOpts[i].Name) - } - - testForMetricNames(t, expectedNames, actualNames) - - if r.AddCollectorFunc == nil { - t.Error("expected collector function, found none") - } -} diff --git a/collector/process.go b/collector/process.go deleted file mode 100644 index 7d1c4f9d..00000000 --- a/collector/process.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "github.com/digitalocean/do-agent/log" - "github.com/digitalocean/do-agent/metrics" - "github.com/digitalocean/do-agent/procfs" -) - -const processSystem = "process" - -type process struct { - totalCPUUtilization float64 - totalMemory float64 -} - -type procprocFunc func() ([]procfs.ProcProc, error) - -// RegisterProcessMetrics registers process metrics. -func RegisterProcessMetrics(r metrics.Registry, fn procprocFunc, f Filters) { - memory := r.Register(processSystem+"_memory", - metrics.WithMeasuredLabels("process")) - cpu := r.Register(processSystem+"_cpu", - metrics.WithMeasuredLabels("process")) - - r.AddCollector(func(r metrics.Reporter) { - procs, err := fn() - if err != nil { - log.Debugf("couldn't get processes: %s", err) - return - } - - m := make(map[string]*process) - for _, proc := range procs { - if value, ok := m[proc.Comm]; ok { - value.totalCPUUtilization += proc.CPUUtilization - value.totalMemory += float64(proc.ResidentMemory) - } else { - m[proc.Comm] = &process{ - totalCPUUtilization: proc.CPUUtilization, - totalMemory: float64(proc.ResidentMemory), - } - } - } - - for key, value := range m { - f.UpdateIfIncluded(r, memory, value.totalMemory, key) - f.UpdateIfIncluded(r, cpu, value.totalCPUUtilization, key) - } - }) -} diff --git a/collector/process_test.go b/collector/process_test.go deleted file mode 100644 index bfae0a57..00000000 --- a/collector/process_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "testing" - - "github.com/digitalocean/do-agent/procfs" -) - -type stubProcer struct { - NewProcProcResultProcProcs []procfs.ProcProc - NewProcProcResultErr error -} - -func (s *stubProcer) NewProcProc() ([]procfs.ProcProc, error) { - return s.NewProcProcResultProcProcs, s.NewProcProcResultErr -} - -// Verify that the stubProcProc implements the procfs.Procer interface. -var _ procfs.Procer = (*stubProcer)(nil) - -func TestRegisterProcessMetrics(t *testing.T) { - p := &stubProcer{} - p.NewProcProcResultErr = nil - p.NewProcProcResultProcProcs = []procfs.ProcProc{ - procfs.ProcProc{ - PID: int(1), - CPUUtilization: float64(1.0 / 3.0), - ResidentMemory: int(3), - VirtualMemory: int(4), - Comm: "foo", - CmdLine: []string{"a", "b"}, - }, - procfs.ProcProc{ - PID: int(2), - CPUUtilization: float64(3.0 / 4.0), - ResidentMemory: int(2), - VirtualMemory: int(1), - Comm: "foo", - CmdLine: []string{"a", "b"}, - }, - } - - expectedNames := []string{ - "process_memory", - "process_cpu", - } - - var actualNames []string - - r := &stubRegistry{} - f := Filters{IncludeAll: true} - RegisterProcessMetrics(r, p.NewProcProc, f) - - for i := range r.RegisterNameOpts { - actualNames = append(actualNames, r.RegisterNameOpts[i].Name) - } - - testForMetricNames(t, expectedNames, actualNames) - - if r.AddCollectorFunc == nil { - t.Error("expected collector function, found none") - } -} diff --git a/config/config.go b/config/config.go deleted file mode 100644 index 14196385..00000000 --- a/config/config.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -var ( - //Version contains the application version - version string - //Build contains the repository branch and commit hash used to - //compile the agent. - build string -) - -// Version contains the semantic version of the agent build -func Version() string { - if version != "" { - return version - } - return "dev" -} - -// Build is the git branch and commit hash provided during compilation -func Build() string { - return build -} diff --git a/config/config_test.go b/config/config_test.go deleted file mode 100644 index a71d561a..00000000 --- a/config/config_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import "testing" - -func TestVersion(t *testing.T) { - version = "1.2.3" - expectedVersion := version - if version != Version() { - t.Errorf("version expected %s got %s", expectedVersion, Version()) - } -} - -func TestVersionEmpty(t *testing.T) { - version = "" - expectedVersion := "dev" - if expectedVersion != Version() { - t.Errorf("version expected %s got %s", expectedVersion, Version()) - } -} - -func TestBuild(t *testing.T) { - build = "foo" - expectedBuild := build - if build != Build() { - t.Errorf("build expected %s got %s", expectedBuild, Build()) - } -} diff --git a/gometalinter.json b/gometalinter.json new file mode 100644 index 00000000..5656f0f6 --- /dev/null +++ b/gometalinter.json @@ -0,0 +1,20 @@ +{ + "Enable": [ + "megacheck", + "ineffassign", + "misspell", + "nakedret", + "gosec", + "golint", + "goconst", + "unused", + "deadcode", + "vet", + "gosimple", + "gofmt" + ], + "Vendor": false, + "Deadline": "120s", + "Aggregate": true, + "Skip": ["pkg/clients/timeseries", "vendor"] +} diff --git a/internal/log/log.go b/internal/log/log.go new file mode 100644 index 00000000..6b09ec00 --- /dev/null +++ b/internal/log/log.go @@ -0,0 +1,60 @@ +package log + +import ( + "fmt" + "log" + "log/syslog" + "os" + + "github.com/pkg/errors" +) + +const ( + initFailed = "failed to initialize syslog logger" + syslogFlags = log.Lshortfile + normalFlags = log.LUTC | log.Ldate | log.Ltime | log.Lshortfile +) + +var ( + infolog = log.New(os.Stdout, "INFO: ", normalFlags) + errlog = log.New(os.Stderr, "ERROR: ", normalFlags) +) + +// InitSyslog initializes logging to syslog +func InitSyslog() (err error) { + il, err := syslog.NewLogger(syslog.LOG_NOTICE, syslogFlags) + if err != nil { + return errors.Wrap(err, initFailed) + } + infolog = il + + el, err := syslog.NewLogger(syslog.LOG_ERR, syslogFlags) + if err != nil { + return errors.Wrap(err, initFailed) + } + errlog = el + + return nil +} + +// Info prints a message to syslog with level LOG_NOTICE +func Info(msg string, params ...interface{}) { + if err := infolog.Output(2, fmt.Sprintf(msg, params...)); err != nil { + fmt.Fprintf(os.Stderr, "ERROR writing log output: %+v", err) + } +} + +// Error prints an error to syslog with level LOG_ERR +func Error(msg string, params ...interface{}) { + if err := errlog.Output(2, fmt.Sprintf(msg, params...)); err != nil { + fmt.Fprintf(os.Stderr, "ERROR writing log output: %+v", err) + } +} + +// Fatal prints an error to syslog with level LOG_ERR with Fatal +func Fatal(msg string, params ...interface{}) { + if err := errlog.Output(2, fmt.Sprintf(msg, params...)); err != nil { + fmt.Fprintf(os.Stderr, "ERROR writing log output: %+v", err) + } + os.Exit(1) +} diff --git a/k8s-daemonset.yml b/k8s-daemonset.yml new file mode 100644 index 00000000..b79f8f8f --- /dev/null +++ b/k8s-daemonset.yml @@ -0,0 +1,76 @@ +--- +kind: Namespace +apiVersion: v1 +metadata: + name: monitoring + labels: + name: monitoring + + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: do-agent + namespace: monitoring + + +--- +apiVersion: apps/v1beta2 +kind: DaemonSet +metadata: + labels: + app: do-agent + name: do-agent + namespace: monitoring +spec: + selector: + matchLabels: + app: do-agent + template: + metadata: + labels: + app: do-agent + spec: + containers: + - name: do-agent + image: docker.io/digitalocean/do-agent:1.0.5 + resources: + limits: + cpu: 102m + memory: 100Mi + requests: + cpu: 102m + memory: 80Mi + volumeMounts: + - mountPath: /host/proc + name: proc + readOnly: true + - mountPath: /host/sys + name: sys + readOnly: true + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + hostNetwork: true + hostPID: true + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + runAsNonRoot: true + runAsUser: 65534 + serviceAccountName: do-agent + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + volumes: + - hostPath: + path: /proc + name: proc + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root diff --git a/log/log.go b/log/log.go deleted file mode 100644 index 0e16e05a..00000000 --- a/log/log.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package log - -import ( - "errors" - stdlog "log" - "log/syslog" -) - -var ( - logLevel level - - // ErrUnrecognizedLogLevel log level not valid - ErrUnrecognizedLogLevel = errors.New("unrecognized error level") -) - -const ( - errorLevel level = iota - infoLevel - debugLevel -) - -const ( - errorLabel = "ERROR" - infoLabel = "INFO" - debugLabel = "DEBUG" -) - -type level uint64 - -// setLevel will set the log level to one of the appropriate levels -func setLevel(level level) { - logLevel = level -} - -// setSyslogger enables the syslog writer -func setSyslogger() { - if logwriter, err := syslog.New(syslog.LOG_NOTICE|syslog.LOG_DAEMON, "do-agent"); err == nil { - stdlog.SetOutput(logwriter) - } -} - -// SetLogger sets the log level with one of the labels and sets the syslog level -func SetLogger(levelLabel string, logToSyslog bool) error { - ll, err := toLevel(levelLabel) - if err != nil { - return err - } - setLevel(ll) - if logToSyslog { - setSyslogger() - } - return nil -} - -// LogToLevel converts a log label to the corresponding level -func toLevel(label string) (level, error) { - switch label { - case debugLabel: - return debugLevel, nil - case infoLabel: - return infoLevel, nil - case errorLabel: - return errorLevel, nil - default: - return 0, ErrUnrecognizedLogLevel - } -} - -// Debugf logs at debug level with the use of format specifiers -func Debugf(format string, args ...interface{}) { - if logLevel >= debugLevel { - stdlog.Printf(format, args...) - } -} - -// Infof logs at info level with the use of format specifiers -func Infof(format string, args ...interface{}) { - if logLevel >= infoLevel { - stdlog.Printf(format, args...) - } -} - -// Errorf logs at error level with the use of format specifiers -func Errorf(format string, args ...interface{}) { - if logLevel >= errorLevel { - stdlog.Printf(format, args...) - } -} - -// Fatalf logs a message with the use of format specifiers and exits -func Fatalf(format string, args ...interface{}) { - stdlog.Fatalf(format, args...) -} - -// Debug logs at debug level -func Debug(args ...interface{}) { - if logLevel >= debugLevel { - stdlog.Print(args...) - } -} - -// Info logs at info level -func Info(args ...interface{}) { - if logLevel >= infoLevel { - stdlog.Print(args...) - } -} - -// Error logs at error level -func Error(args ...interface{}) { - if logLevel >= errorLevel { - stdlog.Print(args...) - } -} - -// Fatal logs a message and exits -func Fatal(args ...interface{}) { - stdlog.Fatal(args...) -} diff --git a/log/log_test.go b/log/log_test.go deleted file mode 100644 index d34f2d64..00000000 --- a/log/log_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package log - -import "testing" - -func TestLogToLevel(t *testing.T) { - tests := []struct { - label string - want level - err error - }{ - { - label: errorLabel, - want: errorLevel, - err: nil, - }, - { - label: infoLabel, - want: infoLevel, - err: nil, - }, - { - label: debugLabel, - want: debugLevel, - err: nil, - }, - { - label: "foo", - want: 0, - err: ErrUnrecognizedLogLevel, - }, - } - - for _, test := range tests { - l, err := toLevel(test.label) - if test.err != nil && err != test.err { - t.Errorf("want=%+v got=%+v", test.err, err) - continue - } - if l != test.want { - t.Errorf("want=%d got=%d", test.want, l) - } - } -} - -func TestSetLevel(t *testing.T) { - setLevel(errorLevel) - if logLevel != errorLevel { - t.Error("log level not expected value") - } -} diff --git a/metrics/gen-pb.sh b/metrics/gen-pb.sh deleted file mode 100755 index bb7c2e04..00000000 --- a/metrics/gen-pb.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2016 DigitalOcean - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -PTYPES=github.com/golang/protobuf/ptypes -TIMESTAMP_MAP="Mgoogle/protobuf/timestamp.proto=${PTYPES}/timestamp" - -protoc -I. --gogofaster_out=. *.proto - diff --git a/metrics/metrics.pb.go b/metrics/metrics.pb.go deleted file mode 100644 index e24d8f53..00000000 --- a/metrics/metrics.pb.go +++ /dev/null @@ -1,1130 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: metrics.proto -// DO NOT EDIT! - -/* - Package metrics is a generated protocol buffer package. - - It is generated from these files: - metrics.proto - - It has these top-level messages: - LabelPair - Gauge - Counter - Metric - MetricFamily -*/ -package metrics - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" -import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type MetricType int32 - -const ( - MetricType_COUNTER MetricType = 0 - MetricType_GAUGE MetricType = 1 -) - -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", -} -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, -} - -func (x MetricType) Enum() *MetricType { - p := new(MetricType) - *p = x - return p -} -func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) -} -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") - if err != nil { - return err - } - *x = MetricType(value) - return nil -} -func (MetricType) EnumDescriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{0} } - -type LabelPair struct { - Name string `protobuf:"bytes,1,req,name=name" json:"name"` - Value string `protobuf:"bytes,2,req,name=value" json:"value"` -} - -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} -func (*LabelPair) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{0} } - -func (m *LabelPair) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *LabelPair) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -type Gauge struct { - Value float64 `protobuf:"fixed64,1,req,name=value" json:"value"` -} - -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} -func (*Gauge) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{1} } - -func (m *Gauge) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -type Counter struct { - Value float64 `protobuf:"fixed64,1,req,name=value" json:"value"` -} - -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} -func (*Counter) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{2} } - -func (m *Counter) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{3} } - -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label - } - return nil -} - -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge - } - return nil -} - -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter - } - return nil -} - -type MetricFamily struct { - Name string `protobuf:"bytes,1,req,name=name" json:"name"` - // reserved 2; - Type MetricType `protobuf:"varint,3,req,name=type,enum=metrics.MetricType" json:"type"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} -func (*MetricFamily) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{4} } - -func (m *MetricFamily) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *MetricFamily) GetType() MetricType { - if m != nil { - return m.Type - } - return MetricType_COUNTER -} - -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric - } - return nil -} - -func init() { - proto.RegisterType((*LabelPair)(nil), "metrics.LabelPair") - proto.RegisterType((*Gauge)(nil), "metrics.Gauge") - proto.RegisterType((*Counter)(nil), "metrics.Counter") - proto.RegisterType((*Metric)(nil), "metrics.Metric") - proto.RegisterType((*MetricFamily)(nil), "metrics.MetricFamily") - proto.RegisterEnum("metrics.MetricType", MetricType_name, MetricType_value) -} -func (m *LabelPair) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LabelPair) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintMetrics(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintMetrics(data, i, uint64(len(m.Value))) - i += copy(data[i:], m.Value) - return i, nil -} - -func (m *Gauge) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Gauge) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x9 - i++ - i = encodeFixed64Metrics(data, i, uint64(math.Float64bits(float64(m.Value)))) - return i, nil -} - -func (m *Counter) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Counter) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x9 - i++ - i = encodeFixed64Metrics(data, i, uint64(math.Float64bits(float64(m.Value)))) - return i, nil -} - -func (m *Metric) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Metric) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Label) > 0 { - for _, msg := range m.Label { - data[i] = 0xa - i++ - i = encodeVarintMetrics(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Gauge != nil { - data[i] = 0x12 - i++ - i = encodeVarintMetrics(data, i, uint64(m.Gauge.Size())) - n1, err := m.Gauge.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.Counter != nil { - data[i] = 0x1a - i++ - i = encodeVarintMetrics(data, i, uint64(m.Counter.Size())) - n2, err := m.Counter.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - } - return i, nil -} - -func (m *MetricFamily) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *MetricFamily) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintMetrics(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x18 - i++ - i = encodeVarintMetrics(data, i, uint64(m.Type)) - if len(m.Metric) > 0 { - for _, msg := range m.Metric { - data[i] = 0x22 - i++ - i = encodeVarintMetrics(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeFixed64Metrics(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Metrics(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintMetrics(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *LabelPair) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovMetrics(uint64(l)) - l = len(m.Value) - n += 1 + l + sovMetrics(uint64(l)) - return n -} - -func (m *Gauge) Size() (n int) { - var l int - _ = l - n += 9 - return n -} - -func (m *Counter) Size() (n int) { - var l int - _ = l - n += 9 - return n -} - -func (m *Metric) Size() (n int) { - var l int - _ = l - if len(m.Label) > 0 { - for _, e := range m.Label { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.Gauge != nil { - l = m.Gauge.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Counter != nil { - l = m.Counter.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} - -func (m *MetricFamily) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovMetrics(uint64(l)) - n += 1 + sovMetrics(uint64(m.Type)) - if len(m.Metric) > 0 { - for _, e := range m.Metric { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func sovMetrics(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozMetrics(x uint64) (n int) { - return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *LabelPair) Unmarshal(data []byte) error { - var hasFields [1]uint64 - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - hasFields[0] |= uint64(0x00000001) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(data[iNdEx:postIndex]) - iNdEx = postIndex - hasFields[0] |= uint64(0x00000002) - default: - iNdEx = preIndex - skippy, err := skipMetrics(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") - } - if hasFields[0]&uint64(0x00000002) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Gauge) Unmarshal(data []byte) error { - var hasFields [1]uint64 - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Gauge: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Gauge: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - iNdEx += 8 - v = uint64(data[iNdEx-8]) - v |= uint64(data[iNdEx-7]) << 8 - v |= uint64(data[iNdEx-6]) << 16 - v |= uint64(data[iNdEx-5]) << 24 - v |= uint64(data[iNdEx-4]) << 32 - v |= uint64(data[iNdEx-3]) << 40 - v |= uint64(data[iNdEx-2]) << 48 - v |= uint64(data[iNdEx-1]) << 56 - m.Value = float64(math.Float64frombits(v)) - hasFields[0] |= uint64(0x00000001) - default: - iNdEx = preIndex - skippy, err := skipMetrics(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Counter) Unmarshal(data []byte) error { - var hasFields [1]uint64 - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Counter: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Counter: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - iNdEx += 8 - v = uint64(data[iNdEx-8]) - v |= uint64(data[iNdEx-7]) << 8 - v |= uint64(data[iNdEx-6]) << 16 - v |= uint64(data[iNdEx-5]) << 24 - v |= uint64(data[iNdEx-4]) << 32 - v |= uint64(data[iNdEx-3]) << 40 - v |= uint64(data[iNdEx-2]) << 48 - v |= uint64(data[iNdEx-1]) << 56 - m.Value = float64(math.Float64frombits(v)) - hasFields[0] |= uint64(0x00000001) - default: - iNdEx = preIndex - skippy, err := skipMetrics(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metric) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metric: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Label = append(m.Label, &LabelPair{}) - if err := m.Label[len(m.Label)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Gauge == nil { - m.Gauge = &Gauge{} - } - if err := m.Gauge.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Counter == nil { - m.Counter = &Counter{} - } - if err := m.Counter.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricFamily) Unmarshal(data []byte) error { - var hasFields [1]uint64 - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricFamily: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricFamily: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - hasFields[0] |= uint64(0x00000001) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Type |= (MetricType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - hasFields[0] |= uint64(0x00000002) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metric = append(m.Metric, &Metric{}) - if err := m.Metric[len(m.Metric)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") - } - if hasFields[0]&uint64(0x00000002) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMetrics(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthMetrics - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipMetrics(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("metrics.proto", fileDescriptorMetrics) } - -var fileDescriptorMetrics = []byte{ - // 278 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xcd, 0x4d, 0x2d, 0x29, - 0xca, 0x4c, 0x2e, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x87, 0x72, 0x95, 0x4c, 0xb8, - 0x38, 0x7d, 0x12, 0x93, 0x52, 0x73, 0x02, 0x12, 0x33, 0x8b, 0x84, 0x84, 0xb8, 0x58, 0xf2, 0x12, - 0x73, 0x53, 0x25, 0x18, 0x15, 0x98, 0x34, 0x38, 0x9d, 0x58, 0x4e, 0xdc, 0x93, 0x67, 0x10, 0x12, - 0xe6, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x95, 0x60, 0x42, 0x08, 0x2a, 0xc9, 0x70, 0xb1, 0xba, - 0x27, 0x96, 0xa6, 0xa7, 0x22, 0x64, 0x41, 0x5a, 0x18, 0xa1, 0xb2, 0x72, 0x5c, 0xec, 0xce, 0xf9, - 0xa5, 0x79, 0x25, 0xa9, 0x45, 0xd8, 0xe5, 0x73, 0xb9, 0xd8, 0x7c, 0xc1, 0xd6, 0x0b, 0x29, 0x72, - 0xb1, 0xe6, 0x80, 0x6c, 0x97, 0x60, 0x54, 0x60, 0xd6, 0xe0, 0x36, 0x12, 0xd2, 0x83, 0xb9, 0x12, - 0xe1, 0x26, 0x59, 0x2e, 0xd6, 0x74, 0x90, 0x55, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xdc, 0x46, 0x7c, - 0x70, 0x25, 0x10, 0x07, 0x28, 0x72, 0xb1, 0x27, 0x43, 0xec, 0x92, 0x60, 0x06, 0x2b, 0x10, 0x80, - 0x2b, 0x80, 0xba, 0x41, 0x29, 0x87, 0x8b, 0x07, 0x62, 0x9d, 0x5b, 0x62, 0x6e, 0x66, 0x4e, 0x25, - 0x56, 0x5f, 0xaa, 0x73, 0xb1, 0x94, 0x54, 0x16, 0xa4, 0x4a, 0x30, 0x2b, 0x30, 0x69, 0xf0, 0x19, - 0x09, 0xc3, 0xcd, 0x80, 0x68, 0x0c, 0xa9, 0x2c, 0x48, 0x85, 0x2a, 0x94, 0xe7, 0x62, 0x83, 0xc8, - 0x49, 0xb0, 0x80, 0x9d, 0xcc, 0x8f, 0xa6, 0x54, 0x4b, 0x85, 0x8b, 0x0b, 0xa1, 0x49, 0x88, 0x9b, - 0x8b, 0xdd, 0xd9, 0x3f, 0xd4, 0x2f, 0xc4, 0x35, 0x48, 0x80, 0x41, 0x88, 0x93, 0x8b, 0xd5, 0xdd, - 0x31, 0xd4, 0xdd, 0x55, 0x80, 0xd1, 0x49, 0xe0, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, - 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0x01, 0x10, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x95, - 0x2c, 0x6f, 0xa1, 0x01, 0x00, 0x00, -} diff --git a/metrics/metrics.proto b/metrics/metrics.proto deleted file mode 100644 index 5d7ecd75..00000000 --- a/metrics/metrics.proto +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2013 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// -// DO: this is a heavily paired down extract of prometheus metric definitions. -// TODO(vgough): remove entirely once the server supports a more compact format. - -syntax = "proto2"; - -package metrics; - -message LabelPair { - required string name = 1; - required string value = 2; -} - -enum MetricType { - COUNTER = 0; - GAUGE = 1; - // reserved 2, 3, 4; -} - -message Gauge { - required double value = 1; -} - -message Counter { - required double value = 1; -} - -message Metric { - repeated LabelPair label = 1; - optional Gauge gauge = 2; - optional Counter counter = 3; - // reserved 4, 5, 6, 7; -} - -message MetricFamily { - required string name = 1; - // reserved 2; - required MetricType type = 3; - repeated Metric metric = 4; -} diff --git a/metrics/registry.go b/metrics/registry.go deleted file mode 100644 index b1c64db3..00000000 --- a/metrics/registry.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -// MetricRef is a unique identifier for a metric, provided when the metric is -// registered. It is used by the related collector when reporting metric values. -type MetricRef interface{} - -// Collector is a function which reports current metric values. -type Collector func(r Reporter) - -// Registry is a tracked set of metrics. -// When a new metric is added, a unique id is provided to the caller. -// This should be used within a registered collection function to report metrics -// at collection time. -type Registry interface { - // Register defines a metric collector for custom named metrics, returning - // a unique id that is used to record metric samples. - Register(name string, opts ...RegOpt) MetricRef - - // AddCollector adds a collection function to be called to collect metrics. - // The collector reports the current value of metrics via the Reporter. - AddCollector(f Collector) - - // Report has all registered collectors report to the given reporter. - Report(r Reporter) -} - -// Reporter defines a metric measurement reporting API. -// Collection functions are periodically called to report metric values. -type Reporter interface { - Update(ref MetricRef, value float64, labelValues ...string) -} - -// NewRegistry returns a new registry. -func NewRegistry() Registry { - return new(registry) -} - -// RegOpt is an option initializer for metric registration. -// Use With* calls to add options. -type RegOpt func(*Definition) - -// Definition holds the description of a metric. -type Definition struct { - Name string `json:"name"` - Type MetricType `json:"type"` - CommonLabels map[string]string `json:"labels,omitempty"` - MeasuredLabelKeys []string `json:"label_keys,omitempty"` -} - -// AsType sets the metric type, or else the default is Gauge. -func AsType(t MetricType) RegOpt { - return func(o *Definition) { - o.Type = t - } -} - -// WithCommonLabels adds common labels that are used for every measurement on -// the associated metric. -func WithCommonLabels(labels map[string]string) RegOpt { - return func(o *Definition) { - if o.CommonLabels == nil { - o.CommonLabels = labels - return - } - for k, v := range labels { - o.CommonLabels[k] = v - } - } -} - -// WithMeasuredLabels adds label keys to the associated metric. Each time -// the metric is measured, the associated label values must be provided (in -// order!). If label values are constant for a metric (eg hostname), then -// use WithCommonLabels instead. -func WithMeasuredLabels(labelKeys ...string) RegOpt { - return func(o *Definition) { - o.MeasuredLabelKeys = append(o.MeasuredLabelKeys, labelKeys...) - } -} - -type registry struct { - collectors []Collector -} - -func (r *registry) Register(name string, opts ...RegOpt) MetricRef { - d := &Definition{ - Name: name, - Type: MetricType_GAUGE, - } - for _, o := range opts { - o(d) - } - - return d -} - -func (r *registry) AddCollector(c Collector) { - r.collectors = append(r.collectors, c) -} - -func (r *registry) Report(reporter Reporter) { - for _, c := range r.collectors { - c(reporter) - } -} diff --git a/monitoringclient/metrics.go b/monitoringclient/metrics.go deleted file mode 100644 index 97e298d7..00000000 --- a/monitoringclient/metrics.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package monitoringclient - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "math" - "math/rand" - "net" - "net/http" - "strconv" - "time" - - "github.com/gogo/protobuf/proto" - - "github.com/digitalocean/do-agent/log" - "github.com/digitalocean/do-agent/metrics" -) - -const ( - authKeyHeader = "X-Auth-Key" - // MetricsMasterURL is the address for metrics general server - MetricsMasterURL = "https://master.sonar.digitalocean.com" - - // default push intervals in seconds for cases where the server does not specify a frequency - defaultPushInterval = 60 - jitterMin = -15 - jitterMax = 15 - jitterStdDev = 3.2 // variance ~= 10 - pushIntervalHeaderKey = "X-Metric-Push-Interval" - contentTypeHeader = "Content-Type" - httpTimeout = 10 * time.Second -) - -// DelimitedTelemetryContentType is the content type set on telemetry -// data responses in delimited protobuf format. -const DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited` - -// MonitoringMetricsClient interface describes available sonar metrics API calls -type MonitoringMetricsClient interface { - SendMetrics() (int, error) - - Registry() metrics.Registry -} - -// monitoringMetricsClientDroplet provide the interface for droplets to -//send metrics to Monitoring -type monitoringMetricsClientDroplet struct { - url string - appKey string - dropletID int64 - r metrics.Registry -} - -//newMetricsClientDroplet creates a new monitoring metrics client with a specific region -func newMetricsClientDroplet(appKey string, dropletID int64, region, wharfURL string) monitoringMetricsClientDroplet { - url := fmt.Sprintf("https://%s.sonar.digitalocean.com", region) - if wharfURL != "" { - url = wharfURL - requireHTTPS = false - log.Debugf("HTTPS requirement not enforced for overridden url: %s", url) - } - - return monitoringMetricsClientDroplet{ - url: url, - appKey: appKey, - dropletID: dropletID, - r: metrics.NewRegistry(), - } -} - -// randomizedPushInterval returns an update interval w/ jitter applied. -func randomizedPushInterval() int { - return defaultPushInterval + - int(math.Max(math.Min(rand.NormFloat64()*jitterStdDev, jitterMax), jitterMin)) -} - -//SendMetrics sends metrics to monitoring server, the server returns how many seconds to wait until next push -func (s monitoringMetricsClientDroplet) SendMetrics() (int, error) { - postURL := s.url + fmt.Sprintf("/v1/metrics/droplet_id/%d", s.dropletID) - appKey := s.appKey - nextPush := randomizedPushInterval() - - if s.r == nil { - return nextPush, errors.New("no registry") - } - err := httpsCheck(postURL) - if err != nil { - return nextPush, err - } - - // Collect all metrics. - report := s.CreateReport() - - log.Debugf("Posting metrics to: %s", postURL) - req, err := http.NewRequest("POST", postURL, bytes.NewBuffer(report)) - if err != nil { - return nextPush, err - } - addUserAgentToHTTPRequest(req) - req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType) - req.Header.Add(authKeyHeader, appKey) - - hc := http.Client{ - Timeout: httpTimeout, - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: httpTimeout, - }).Dial, - TLSHandshakeTimeout: httpTimeout, - ResponseHeaderTimeout: httpTimeout, - DisableKeepAlives: true, - }, - } - - resp, err := hc.Do(req) - if err != nil { - return nextPush, err - } - defer resp.Body.Close() - if resp.StatusCode != 202 { - return nextPush, - fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, postURL) - } - - receivedHeaders := resp.Header - sendInterval, err := strconv.Atoi(receivedHeaders.Get(pushIntervalHeaderKey)) - if err != nil { - sendInterval = nextPush - } - return sendInterval, nil -} - -func (s monitoringMetricsClientDroplet) Registry() metrics.Registry { - return s.r -} - -//CreateMetricsClient creates a new metrics client -func CreateMetricsClient(appkey string, dropletID int64, region string, wharfURL string) (MonitoringMetricsClient, error) { - return newMetricsClientDroplet(appkey, dropletID, region, wharfURL), nil -} - -func (s monitoringMetricsClientDroplet) CreateReport() []byte { - reporter := &prometheusReporter{ - metrics: make(map[string]*metrics.MetricFamily), - } - s.r.Report(reporter) - - var buf bytes.Buffer - for _, m := range reporter.metrics { - var err error - if reporter.asText { - err = proto.MarshalText(&buf, m) - } else { - _, err = appendDelimited(&buf, m) - } - if err != nil { - log.Debugf("serialization error: %s", err) - } - } - - return buf.Bytes() -} - -type prometheusReporter struct { - metrics map[string]*metrics.MetricFamily - asText bool -} - -func (p *prometheusReporter) Update(id metrics.MetricRef, - value float64, labelValues ...string) { - def, ok := id.(*metrics.Definition) - if !ok { - log.Debugf("unknown metric: %d", id) - return - } - - if len(labelValues) != len(def.MeasuredLabelKeys) { - log.Debugf("label mismatch for metric: %s", def.Name) - return - } - - m := &metrics.Metric{} - switch def.Type { - case metrics.MetricType_COUNTER: - m.Counter = &metrics.Counter{Value: value} - case metrics.MetricType_GAUGE: - m.Gauge = &metrics.Gauge{Value: value} - } - - name := "sonar_" + def.Name - fam, ok := p.metrics[name] - if !ok { - fam = &metrics.MetricFamily{ - Name: name, - Type: def.Type, - } - p.metrics[name] = fam - } - fam.Metric = append(fam.Metric, m) - - if def.CommonLabels != nil { - for k, v := range def.CommonLabels { - m.Label = append(m.Label, &metrics.LabelPair{Name: k, Value: v}) - } - } - - for i, v := range labelValues { - m.Label = append(m.Label, - &metrics.LabelPair{Name: def.MeasuredLabelKeys[i], Value: v}) - } -} - -// appendDelimited appends a length-delimited protobuf message to the writer. -// Returns the number of bytes written, and any error. -func appendDelimited(out *bytes.Buffer, m proto.Message) (int, error) { - buf, err := proto.Marshal(m) - if err != nil { - return 0, err - } - - var delim [binary.MaxVarintLen32]byte - len := binary.PutUvarint(delim[:], uint64(len(buf))) - n, err := out.Write(delim[:len]) - if err != nil { - return n, err - } - - dn, err := out.Write(buf) - return n + dn, err -} diff --git a/monitoringclient/metrics_test.go b/monitoringclient/metrics_test.go deleted file mode 100644 index b1d10ce0..00000000 --- a/monitoringclient/metrics_test.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package monitoringclient - -import ( - "fmt" - "net/http" - "net/http/httptest" - "strconv" - "testing" - - "github.com/digitalocean/do-agent/metrics" -) - -func TestNewMetricsClientDroplet(t *testing.T) { - expAppKey := "xxx" - expDroplet := int64(123) - expRegion := "miami" - expURL := "https://miami.sonar.digitalocean.com" - - mcd := newMetricsClientDroplet(expAppKey, expDroplet, expRegion, "") - if mcd.url != expURL { - t.Errorf("got: %s expected: %s", mcd.url, expURL) - } - if mcd.appKey != expAppKey { - t.Errorf("got: %s expected: %s", mcd.appKey, expAppKey) - } - if mcd.dropletID != expDroplet { - t.Errorf("got: %d expected: %d", mcd.dropletID, expDroplet) - } -} - -func TestNewMetricsClientDropletNonHTTPS(t *testing.T) { - requireHTTPS = true - monitoringClient := &monitoringMetricsClientDroplet{ - url: "http://insecurelink", - r: metrics.NewRegistry(), - } - _, err := monitoringClient.SendMetrics() - if err != ErrAuthURLNotHTTPS { - t.Error("unsecure URL accepted") - } -} - -func TestDropletSendMetrics(t *testing.T) { - requireHTTPS = false - defer func() { requireHTTPS = true }() - - expectedAppkey := "appkey" - expectedDropletID := int64(123456789) - expectedURLPath := fmt.Sprintf("/v1/metrics/droplet_id/%d", expectedDropletID) - expectedPushInterval := 52 - - s := httptest.NewServer( - http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - if r.URL.String() != expectedURLPath { - w.WriteHeader(http.StatusNotFound) - } else { - w.Header().Set(pushIntervalHeaderKey, - strconv.FormatInt(int64(expectedPushInterval), 10)) - w.WriteHeader(http.StatusAccepted) - } - })) - defer s.Close() - - monitoringClient := &monitoringMetricsClientDroplet{ - url: s.URL, - appKey: expectedAppkey, - dropletID: expectedDropletID, - r: metrics.NewRegistry(), - } - - actualPushInterval, err := monitoringClient.SendMetrics() - if err != nil { - t.Fatal(err) - } - if actualPushInterval != expectedPushInterval { - t.Errorf("want %v got %v", expectedPushInterval, actualPushInterval) - } -} - -func TestRandomizedPutInterval(t *testing.T) { - for i := 0; i < 100; i++ { - j := randomizedPushInterval() - if j < defaultPushInterval+jitterMin { - t.Fatalf("interval too short: %d", j) - } - if j > defaultPushInterval+jitterMax { - t.Fatalf("interval too long: %d", j) - } - } -} diff --git a/monitoringclient/monitoringclient.go b/monitoringclient/monitoringclient.go deleted file mode 100644 index a135dfa7..00000000 --- a/monitoringclient/monitoringclient.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package monitoringclient - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "strings" - - "github.com/digitalocean/do-agent/config" - "github.com/digitalocean/do-agent/log" -) - -const ( - // AuthURL is the address to the Sonar authentication server - AuthURL = "https://sonar.digitalocean.com" - - // MetadataURL is the address to the metadata service - MetadataURL = "http://169.254.169.254" - - userAgentHeader = "User-Agent" -) - -var ( - // ErrAuthURLNotHTTPS occurs if we connect over http rather than https - ErrAuthURLNotHTTPS = errors.New("Sonar URL not HTTPS") - - // Allow unittests to disable this - requireHTTPS = true -) - -//MonitoringClient interface describes available sonar API calls -type MonitoringClient interface { - GetAppKey(string) (string, error) -} - -type monitoringClient struct { - url string -} - -// GetAppKey retrieves the appkey from the sonar service. -func (s *monitoringClient) GetAppKey(authToken string) (string, error) { - err := httpsCheck(s.url) - if err != nil { - return "", err - } - - hc := http.Client{ - Timeout: httpTimeout, - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: httpTimeout, - }).Dial, - TLSHandshakeTimeout: httpTimeout, - ResponseHeaderTimeout: httpTimeout, - DisableKeepAlives: true, - }, - } - - req, err := http.NewRequest("GET", s.url+"/v1/appkey/droplet-auth-token", nil) - if err != nil { - errMsg := "DigitalOcean sonar service unreachable: %s" - log.Errorf(errMsg, err) - return "", err - } - - addUserAgentToHTTPRequest(req) - req.Header.Add("Authorization", "DOMETADATA "+authToken) - - resp, err := hc.Do(req) - if err != nil { - return "", fmt.Errorf("DigitalOcean sonar service unreachable: %s", err.Error()) - } - if resp.StatusCode != 200 { - return "", fmt.Errorf("DigitalOcean sonar service returned unexpected status: %d", resp.StatusCode) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Debugf("Error parsing value from http request: %s", err) - return "", err - } - - var appKey string - err = json.Unmarshal(body, &appKey) - if err != nil { - log.Errorf("Failed to unmarshall appKey: %s", err) - return "", err - } - - return appKey, nil -} - -// NewClient creates a new monitoring client -func NewClient(configURL string) MonitoringClient { - if configURL != AuthURL { - requireHTTPS = false - log.Debugf("HTTPS requirement not enforced for overridden url: %s", configURL) - return &monitoringClient{ - url: configURL, - } - } - - return &monitoringClient{ - url: AuthURL, - } -} - -func httpsCheck(url string) error { - if !strings.HasPrefix(strings.ToLower(url), "https://") && requireHTTPS { - return ErrAuthURLNotHTTPS - } - return nil -} - -// addUserAgentToHTTPRequest adds sonar agent label with agent version -// number to the HTTP user agent header -func addUserAgentToHTTPRequest(req *http.Request) { - req.Header.Add(userAgentHeader, "do-agent-"+config.Version()) -} diff --git a/monitoringclient/monitoringclient_test.go b/monitoringclient/monitoringclient_test.go deleted file mode 100644 index 7eb2b302..00000000 --- a/monitoringclient/monitoringclient_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package monitoringclient - -import ( - "fmt" - "net/http" - "net/http/httptest" - "testing" - - "github.com/digitalocean/do-agent/config" -) - -func TestGetAppKeyRejectsNonHTTPS(t *testing.T) { - requireHTTPS = true - monitoringClient := &monitoringClient{ - url: "http://insecurelink", - } - _, err := monitoringClient.GetAppKey("abc") - if err != ErrAuthURLNotHTTPS { - t.Error("unsecure URL accepted") - } -} - -func TestGetAppKey(t *testing.T) { - requireHTTPS = false - defer func() { requireHTTPS = true }() - - expectedAppkey := "appkey" - s := httptest.NewServer( - http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "\"%s\"", expectedAppkey) - })) - defer s.Close() - - monitoringClient := &monitoringClient{ - url: s.URL, - } - appkey, err := monitoringClient.GetAppKey("abc") - if err != nil { - t.Fatal(err) - } - if appkey != expectedAppkey { - t.Errorf("got: %s want: %s", appkey, expectedAppkey) - } -} - -func TestAddUserAgentToHTTPRequest(t *testing.T) { - expectedAgent := "do-agent-" + config.Version() - - r, _ := http.NewRequest("POST", "http://www.digitalocean.com", nil) - addUserAgentToHTTPRequest(r) - - if r.UserAgent() != expectedAgent { - t.Errorf("got: %s expected: %s", r.UserAgent(), expectedAgent) - } -} diff --git a/packaging/scripts/after_install.sh b/packaging/scripts/after_install.sh new file mode 100755 index 00000000..227f6c67 --- /dev/null +++ b/packaging/scripts/after_install.sh @@ -0,0 +1,135 @@ +#!/bin/sh +# noexpandtab is required for EOF/heredoc +# vim: noexpandtab +# +# IMPORTANT: this script will execute with /bin/sh which is dash on some +# systems so this shebang should not be changed +# DO NOT change this and make sure you are linting with shellcheck to ensure +# compatbility with scripts + +set -ue + +INSTALL_DIR=/opt/digitalocean/do-agent +SVC_NAME=do-agent +NOBODY_USER=nobody +NOBODY_GROUP=nogroup +CRON=/etc/cron.daily/do-agent + +# fedora uses nobody instead of nogroup +getent group nobody 2> /dev/null \ + && NOBODY_GROUP=nobody + +main() { + update_selinux + + if command -v systemctl >/dev/null 2>&1; then + init_systemd + elif command -v initctl >/dev/null 2>&1; then + init_upstart + else + echo "Unknown init system. Exiting..." > /dev/stderr + exit 1 + fi + + patch_updates +} + + +update_selinux() { + echo "Detecting SELinux" + enforced=$(getenforce 2>/dev/null || echo) + + if [ "$enforced" != "Enforcing" ]; then + echo "SELinux not enforced" + return + fi + + echo "setting nis_enabled to 1 to allow do-agent to execute" + setsebool -P nis_enabled 1 || echo "Failed" > /dev/stderr +} + +patch_updates() { + # make sure we have the latest + [ -f "${CRON}" ] && rm -rf "${CRON}" + script="${INSTALL_DIR}/scripts/update.sh" + + cat <<-EOF > "${CRON}" + #!/bin/sh + /bin/bash ${script} + EOF + + chmod a+x "${CRON}" + + echo "cron installed" +} + +init_systemd() { + # cannot use symlink because of an old bug https://bugzilla.redhat.com/show_bug.cgi?id=955379 + SVC=/etc/systemd/system/${SVC_NAME}.service + cat <<-EOF > "$SVC" + [Unit] + Description=DigitalOcean do-agent agent + After=network-online.target + Wants=network-online.target + + [Service] + User=${NOBODY_USER} + Group=${NOBODY_GROUP} + ExecStart=/usr/local/bin/do-agent + Restart=always + + OOMScoreAdjust=-900 + SyslogIdentifier=DigitalOceanAgent + PrivateTmp=yes + ProtectSystem=full + ProtectHome=yes + NoNewPrivileges=yes + + [Install] + WantedBy=multi-user.target + EOF + + # enable --now is unsupported on older versions of debian/systemd + systemctl enable ${SVC} + systemctl stop ${SVC_NAME} || true + systemctl start ${SVC_NAME} +} + +init_upstart() { + cat <<-EOF > /etc/init/${SVC_NAME}.conf + # do-agent - An agent that collects system metrics. + # + # An agent that collects system metrics and transmits them to DigitalOcean. + description "The DigitalOcean Monitoring Agent" + author "DigitalOcean" + + start on runlevel [2345] + stop on runlevel [!2345] + console none + normal exit 0 TERM + kill timeout 5 + respawn + + script + exec su -s /bin/sh -c 'exec "\$0" "\$@"' ${NOBODY_USER} -- /usr/local/bin/do-agent --syslog + end script + EOF + initctl reload-configuration + initctl stop ${SVC_NAME} || true + initctl start ${SVC_NAME} +} + + +dist() { + if [ -f /etc/os-release ]; then + awk -F= '$1 == "ID" {gsub("\"", ""); print$2}' /etc/os-release + elif [ -f /etc/redhat-release ]; then + awk '{print tolower($1)}' /etc/redhat-release + fi +} + + +# never put anything below this line. This is to prevent any partial execution +# if curl ever interrupts the download prematurely. In that case, this script +# will not execute since this is the last line in the script. +main diff --git a/packaging/scripts/after_remove.sh b/packaging/scripts/after_remove.sh new file mode 100644 index 00000000..7a8117fc --- /dev/null +++ b/packaging/scripts/after_remove.sh @@ -0,0 +1,47 @@ +#!/bin/sh +# noexpandtab is required for EOF/heredoc +# vim: noexpandtab +# +# IMPORTANT: this script will execute with /bin/sh which is dash on some +# systems so this shebang should not be changed +# DO NOT change this and make sure you are linting with shellcheck to ensure +# compatbility with scripts + +set -ue + +SVC_NAME=do-agent +CRON=/etc/cron.daily/do-agent + +main() { + if command -v systemctl >/dev/null 2>&1; then + echo "Configure systemd..." + clean_systemd + elif command -v initctl >/dev/null 2>&1; then + echo "Configure upstart..." + clean_upstart + else + echo "Unknown init system" > /dev/stderr + fi + + remove_cron +} + +remove_cron() { + rm -f "${CRON}" + echo "cron removed" +} + +clean_upstart() { + initctl stop ${SVC_NAME} || true + unlink /etc/init/${SVC_NAME}.conf || true + initctl reload-configuration || true +} + +clean_systemd() { + systemctl stop ${SVC_NAME} || true + systemctl disable ${SVC_NAME}.service || true + unlink /etc/systemd/system/${SVC_NAME}.service || true + systemctl daemon-reload || true +} + +main diff --git a/packaging/scripts/before_install.sh b/packaging/scripts/before_install.sh new file mode 100644 index 00000000..d64029e7 --- /dev/null +++ b/packaging/scripts/before_install.sh @@ -0,0 +1,23 @@ +#!/bin/sh +# noexpandtab is required for EOF/heredoc +# vim: noexpandtab +# +# IMPORTANT: this script will execute with /bin/sh which is dash on some +# systems so this shebang should not be changed +# DO NOT change this and make sure you are linting with shellcheck to ensure +# compatbility with scripts +set -ue + +SVC_NAME=do-agent + +main () { + if command -v systemctl >/dev/null 2>&1; then + systemctl stop ${SVC_NAME} || true + elif command -v initctl >/dev/null 2>&1; then + initctl stop ${SVC_NAME} || true + else + echo "Unknown init system" > /dev/stderr + fi +} + +main diff --git a/pkg/clients/http.go b/pkg/clients/http.go new file mode 100644 index 00000000..37a12cac --- /dev/null +++ b/pkg/clients/http.go @@ -0,0 +1,40 @@ +package clients + +import ( + "net" + "net/http" + "time" +) + +// HTTPClient is can make HTTP requests +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} + +// NewHTTP creates a new HTTP client with the provided timeout +func NewHTTP(timeout time.Duration) *http.Client { + return &http.Client{ + Timeout: timeout, + Transport: &http.Transport{ + DialContext: (&net.Dialer{ + Timeout: timeout, + }).DialContext, + TLSHandshakeTimeout: timeout, + ResponseHeaderTimeout: timeout, + DisableKeepAlives: true, + }, + } +} + +// FakeHTTPClient is used for testing +type FakeHTTPClient struct { + DoFunc func(*http.Request) (*http.Response, error) +} + +// Do an HTTP request for testing +func (c *FakeHTTPClient) Do(req *http.Request) (*http.Response, error) { + if c.DoFunc != nil { + return c.DoFunc(req) + } + return nil, nil +} diff --git a/pkg/clients/tsclient/client.go b/pkg/clients/tsclient/client.go new file mode 100644 index 00000000..ef2cca11 --- /dev/null +++ b/pkg/clients/tsclient/client.go @@ -0,0 +1,506 @@ +/* +Package tsclient provides a common client for sending metrics to the DO timeseries system. + +The timeseries system is a push-based system where metrics are submitted in batches +via the SendMetrics method at fixed time intervals. Metrics are submitted to the wharf +server. + +Wharf responds with a rate-limit value which the client must wait that many seconds +or longer before submitting the next batch of metrics -- this is exposed via the WaitDuration() +method. + +*/ +package tsclient + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "math/rand" + "net" + "net/http" + "strconv" + "time" + + "github.com/digitalocean/do-agent/pkg/clients/tsclient/structuredstream" + + "github.com/golang/snappy" +) + +const ( + binaryContentType = "application/timeseries-binary-0" + userAgentHeader = "User-Agent" + pushIntervalHeaderKey = "X-Metric-Push-Interval" + authKeyHeader = "X-Auth-Key" + contentTypeHeader = "Content-Type" + + defaultWaitInterval = time.Second * 60 + maxWaitInterval = time.Hour +) + +// Client is an interface for sending batches of metrics +type Client interface { + AddMetric(def *Definition, value float64, labels ...string) error + AddMetricWithTime(def *Definition, t time.Time, value float64, labels ...string) error + Flush() error + WaitDuration() time.Duration + ResetWaitTimer() +} + +// HTTPClient is used to send metrics via http +type HTTPClient struct { + httpClient *http.Client + userAgent string + metadataEndpoint string + radarEndpoint string + wharfEndpoints []string + wharfEndpointSSLHostname string + lastFlushAttempt time.Time + lastFlushConnection time.Time + waitInterval time.Duration + numConsecutiveFailures int + bootstrapRequired bool + trusted bool + logger LoggerFn + lastSend map[string]int64 + isZeroTime bool + + // variables only used when trusted + appName string + appKey string + + // variables only used when non-trusted + dropletID string + region string + + buf *bytes.Buffer + w *snappy.Writer +} + +// ClientOptions are client options +type ClientOptions struct { + UserAgent string + WharfEndpoints []string + WharfEndpointSSLHostname string + AppName string + AppKey string + MetadataEndpoint string + RadarEndpoint string + Timeout time.Duration + IsTrusted bool + Logger LoggerFn +} + +// ClientOptFn allows for overriding options +type ClientOptFn func(*ClientOptions) + +// LoggerFn allows for a custom logger to be passed +type LoggerFn func(msg string) + +// WithWharfEndpoint overrides the default wharf endpoint, this option must be set when WithTrustedAppKey is used. +func WithWharfEndpoint(endpoint string) ClientOptFn { + return WithWharfEndpoints([]string{endpoint}) +} + +// WithWharfEndpoints overrides the default wharf endpoint, this option must be set when WithTrustedAppKey is used. +func WithWharfEndpoints(endpoints []string) ClientOptFn { + return func(o *ClientOptions) { + o.WharfEndpoints = endpoints + } +} + +// WithWharfEndpointSSLHostname overrides the default wharf endpoint, this option must be set when WithTrustedAppKey is used. +func WithWharfEndpointSSLHostname(hostname string) ClientOptFn { + return func(o *ClientOptions) { + o.WharfEndpointSSLHostname = hostname + } +} + +// WithMetadataEndpoint overrides the default metadata endpoint, this option is only applicable to non-trusted clients (i.e. running on a customer droplet). +func WithMetadataEndpoint(endpoint string) ClientOptFn { + return func(o *ClientOptions) { + o.MetadataEndpoint = endpoint + } +} + +// WithRadarEndpoint overrides the default radar endpoint, this option is only applicable to non-trusted clients (i.e. running on a customer droplet). +func WithRadarEndpoint(endpoint string) ClientOptFn { + return func(o *ClientOptions) { + o.RadarEndpoint = endpoint + } +} + +// WithTimeout overrides the default wharf endpoint +func WithTimeout(timeout time.Duration) ClientOptFn { + return func(o *ClientOptions) { + o.Timeout = timeout + } +} + +// WithUserAgent overrides the http user agent +func WithUserAgent(s string) ClientOptFn { + return func(o *ClientOptions) { + o.UserAgent = s + } +} + +// WithTrustedAppKey disables metadata authentication; trusted apps can override the host_id and user_id tags. +func WithTrustedAppKey(appName, appKey string) ClientOptFn { + return func(o *ClientOptions) { + o.AppName = appName + o.AppKey = appKey + o.IsTrusted = true + } +} + +// WithLogger enables logging to passed in function +func WithLogger(logger LoggerFn) ClientOptFn { + return func(o *ClientOptions) { + o.Logger = logger + } +} + +// New creates a new client +func New(opts ...ClientOptFn) Client { + opt := &ClientOptions{ + UserAgent: "tsclient-unknown", + Timeout: 10 * time.Second, + MetadataEndpoint: "http://169.254.169.254/metadata", + RadarEndpoint: "https://sonar.digitalocean.com", + } + + for _, fn := range opts { + fn(opt) + } + + var tlsConfig tls.Config + if opt.WharfEndpointSSLHostname != "" { + tlsConfig.ServerName = opt.WharfEndpointSSLHostname + } + + httpClient := &http.Client{ + Timeout: opt.Timeout, + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: opt.Timeout, + }).Dial, + TLSHandshakeTimeout: opt.Timeout, + ResponseHeaderTimeout: opt.Timeout, + DisableKeepAlives: true, + TLSClientConfig: &tlsConfig, + }, + } + + if opt.IsTrusted { + if len(opt.WharfEndpoints) == 0 { + panic("WithWharfEndpoint() must be used WithTrustedAppKey") + } + if opt.AppName == "" { + panic("appname must be set") + } + } + + logger := opt.Logger + if logger == nil { + logger = stubLogger + } + + return &HTTPClient{ + userAgent: opt.UserAgent, + metadataEndpoint: opt.MetadataEndpoint, + radarEndpoint: opt.RadarEndpoint, + wharfEndpoints: opt.WharfEndpoints, + wharfEndpointSSLHostname: opt.WharfEndpointSSLHostname, + appName: opt.AppName, + appKey: opt.AppKey, + httpClient: httpClient, + waitInterval: defaultWaitInterval, + bootstrapRequired: true, + trusted: opt.IsTrusted, + logger: logger, + lastSend: map[string]int64{}, + } +} + +func stubLogger(msg string) {} + +func (c *HTTPClient) bootstrapFromMetadata() error { + var err error + + if c.trusted { + return nil + } + + c.dropletID, err = c.GetDropletID() + if err != nil { + return err + } + c.logger(fmt.Sprintf("droplet ID: %s", c.dropletID)) + + c.region, err = c.GetRegion() + if err != nil { + return err + } + c.logger(fmt.Sprintf("region: %s", c.region)) + + authToken, err := c.GetAuthToken() + if err != nil { + return err + } + c.logger(fmt.Sprintf("auth token: %s", authToken)) + + appKey, err := c.GetAppKey(authToken) + if err != nil { + return err + } + c.appKey = appKey + c.logger(fmt.Sprintf("appkey: %s", c.appKey)) + + return nil +} + +// url returns a potentially randomized endpoint to send data to +// the url must constantly be randomized; otherwise the cache across all wharf endpoints +// will be skewed (i.e. only a single node will know about the droplet -> user ID lookups) +// and when a restart/failure finally happens, then a different wharf endpoint will be picked, +// and it wont have anything in its cache. +func (c *HTTPClient) url() string { + if c.trusted { + if len(c.wharfEndpoints) == 0 { + panic("trusted app with no wharf endpoints; shouldnt happen") + } + if c.appName == "" { + panic("appname not defined; shouldnt happen") + } + endpoint := c.wharfEndpoints[rand.Intn(len(c.wharfEndpoints))] + return fmt.Sprintf("%s/v1/metrics/trusted/%s", endpoint, c.appName) + } + + endpoint := fmt.Sprintf("https://%s.sonar.digitalocean.com", c.region) + if len(c.wharfEndpoints) > 0 { + endpoint = c.wharfEndpoints[rand.Intn(len(c.wharfEndpoints))] + } + return fmt.Sprintf("%s/v1/metrics/droplet_id/%s", endpoint, c.dropletID) +} + +// WaitDuration returns the duration before the next batch of metrics will be accepted +func (c *HTTPClient) WaitDuration() time.Duration { + d := time.Since(c.lastFlushAttempt) + if d < c.waitInterval { + return c.waitInterval - d + } + return 0 +} + +// AddMetric adds a metric to the batch +func (c *HTTPClient) AddMetric(def *Definition, value float64, labels ...string) error { + return c.addMetricWithMSEpochTime(def, 0, value, labels...) +} + +// AddMetricWithTime adds a metric to the batch +func (c *HTTPClient) AddMetricWithTime(def *Definition, t time.Time, value float64, labels ...string) error { + ms := t.UTC().UnixNano() / int64(time.Millisecond) + return c.addMetricWithMSEpochTime(def, ms, value, labels...) +} + +func (c *HTTPClient) addMetricWithMSEpochTime(def *Definition, ms int64, value float64, labels ...string) error { + isZeroTime := bool(ms == 0) + if c.buf == nil { + c.buf = new(bytes.Buffer) + c.w = snappy.NewBufferedWriter(c.buf) + c.lastSend = map[string]int64{} + c.isZeroTime = isZeroTime + } else { + if isZeroTime != c.isZeroTime { + panic("client support for AddMetrics and AddMetricWithTime is mutually exclusive") + } + } + lfm, err := GetLFM(def, labels) + if err != nil { + return err + } + + if !isZeroTime { + // ensure sufficient time between reported metric values + if lastSend, ok := c.lastSend[lfm]; ok && (time.Duration(ms-lastSend)*time.Millisecond) < c.waitInterval { + return ErrSendTooFrequent + } + c.lastSend[lfm] = ms + } + + writer := structuredstream.NewWriter(c.w) + writer.WriteUint16PrefixedString(lfm) + writer.Write(int64(ms)) + writer.Write(float64(value)) + return writer.Error() +} + +func (c *HTTPClient) clearBufferedMetrics() { + c.buf = nil + + // clean lastSend (potential memory leak otherwise) + nowMS := time.Now().UTC().UnixNano() / int64(time.Millisecond) + for lfm, t := range c.lastSend { + if (nowMS - t) > 60*60*1000 { + delete(c.lastSend, lfm) + } + } +} + +// ResetWaitTimer causes the wait duration timer to reset +func (c *HTTPClient) ResetWaitTimer() { + c.lastFlushAttempt = time.Now() +} + +// Flush sends the batch of metrics to wharf +func (c *HTTPClient) Flush() error { + now := time.Now() + if now.Sub(c.lastFlushAttempt) < c.waitInterval { + return ErrFlushTooFrequent + } + c.lastFlushAttempt = now + + if c.numConsecutiveFailures > 3 { + timeSinceLastConnection := now.Sub(c.lastFlushConnection) + requiredWait := time.Minute * time.Duration(c.numConsecutiveFailures+rand.Intn(3)) + if requiredWait > maxWaitInterval { + requiredWait = maxWaitInterval + } + if timeSinceLastConnection < requiredWait { + return ErrCircuitBreaker + } + } + + if c.buf == nil { + return nil + } + + c.lastFlushConnection = now + + if c.bootstrapRequired || c.numConsecutiveFailures > 60 { + if err := c.bootstrapFromMetadata(); err != nil { + c.numConsecutiveFailures++ + return err + } + c.bootstrapRequired = false + } + + err := c.w.Flush() + if err != nil { + return err + } + + url := c.url() + c.logger(fmt.Sprintf("sending metrics to %s", url)) + req, err := http.NewRequest("POST", url, bytes.NewBuffer(c.buf.Bytes())) + if err != nil { + c.numConsecutiveFailures++ + if c.isZeroTime { + c.clearBufferedMetrics() + } + return err + } + + req.Header.Add(userAgentHeader, c.userAgent) + if c.wharfEndpointSSLHostname != "" { + req.Host = c.wharfEndpointSSLHostname + } + req.Header.Set(contentTypeHeader, binaryContentType) + req.Header.Add(authKeyHeader, c.appKey) + + resp, err := c.httpClient.Do(req) + if err != nil { + c.numConsecutiveFailures++ + if c.isZeroTime { + c.clearBufferedMetrics() + } + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusAccepted { + c.numConsecutiveFailures++ + if c.isZeroTime { + c.clearBufferedMetrics() + } + return &UnexpectedHTTPStatusError{StatusCode: resp.StatusCode} + } + + sendInterval, err := strconv.Atoi(resp.Header.Get(pushIntervalHeaderKey)) + if err != nil { + c.waitInterval = defaultWaitInterval + } else { + c.waitInterval = time.Duration(sendInterval) * time.Second + } + c.numConsecutiveFailures = 0 + c.clearBufferedMetrics() + return nil +} + +// GetWaitInterval returns the wait interval between metrics +func (c *HTTPClient) GetWaitInterval() time.Duration { + return c.waitInterval +} + +// GetDropletID returns the droplet ID +func (c *HTTPClient) GetDropletID() (string, error) { + return c.httpGet(fmt.Sprintf("%s/v1/id", c.metadataEndpoint), "") +} + +// GetRegion returns the region +func (c *HTTPClient) GetRegion() (string, error) { + return c.httpGet(fmt.Sprintf("%s/v1/region", c.metadataEndpoint), "") +} + +// GetAuthToken returns an auth token +func (c *HTTPClient) GetAuthToken() (string, error) { + return c.httpGet(fmt.Sprintf("%s/v1/auth-token", c.metadataEndpoint), "") +} + +// GetAppKey returns the appkey +func (c *HTTPClient) GetAppKey(authToken string) (string, error) { + body, err := c.httpGet(fmt.Sprintf("%s/v1/appkey/droplet-auth-token", c.radarEndpoint), authToken) + if err != nil { + return "", err + } + + var appKey string + err = json.Unmarshal([]byte(body), &appKey) + if err != nil { + return "", err + } + + return appKey, nil +} + +func (c *HTTPClient) httpGet(url, authToken string) (string, error) { + c.logger(fmt.Sprintf("HTTP GET %s", url)) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return "", err + } + if authToken != "" { + authValue := "DOMETADATA " + authToken + req.Header.Add("Authorization", authValue) + c.logger(fmt.Sprintf("Authorization: %s", authValue)) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + c.logger(fmt.Sprintf("got status code %d while fetching %s (auth token: %s)", resp.StatusCode, url, authToken)) + return "", &UnexpectedHTTPStatusError{StatusCode: resp.StatusCode} + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(body), nil +} diff --git a/pkg/clients/tsclient/errors.go b/pkg/clients/tsclient/errors.go new file mode 100644 index 00000000..d8c3fda3 --- /dev/null +++ b/pkg/clients/tsclient/errors.go @@ -0,0 +1,29 @@ +package tsclient + +import "fmt" + +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is +// returned when making a registry api call. +type UnexpectedHTTPStatusError struct { + StatusCode int +} + +// Error returns the error string +func (e *UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("received unexpected HTTP status: %d", e.StatusCode) +} + +// ErrSendTooFrequent happens if the client attempts to send metrics faster +// than what the server requested +var ErrSendTooFrequent = fmt.Errorf("metrics sent faster than server requested") + +// ErrFlushTooFrequent happens if the client attempts to flush metrics faster +// than what the server requested +var ErrFlushTooFrequent = fmt.Errorf("metrics sent faster than server requested") + +// ErrCircuitBreaker happens when there are many back to back failures sending metrics, the client will +// deliberately fail in order to reduce network load on the server +var ErrCircuitBreaker = fmt.Errorf("circuit breaker is open; deliberately failing due to exponential backoff") + +// ErrLabelMissmatch happens if the number of supplied labels is incorrect +var ErrLabelMissmatch = fmt.Errorf("unexpected number of labels") diff --git a/pkg/clients/tsclient/metric_definition.go b/pkg/clients/tsclient/metric_definition.go new file mode 100644 index 00000000..7e2007e1 --- /dev/null +++ b/pkg/clients/tsclient/metric_definition.go @@ -0,0 +1,131 @@ +package tsclient + +import ( + "fmt" + "sort" + "strings" +) + +type labelDefType int + +const ( + dynamicLabel labelDefType = iota + commonLabel +) + +type definitionLabel struct { + name string + labelType labelDefType + + // only when labelType is commonLabel + commonValue string + + // only when labelType is dynamicLabel + i int +} + +// DefinitionOpts can be changed with opt setters +type DefinitionOpts struct { + // CommonLabels is a set of static key-value labels + CommonLabels map[string]string + + // MeasuredLabelKeys is a list of label keys whose values will be specified + // at run-time + MeasuredLabelKeys []string +} + +// Definition holds the description of a metric. +type Definition struct { + name string + sortedLabels []definitionLabel +} + +// DefinitionOpt is an option initializer for metric registration. +type DefinitionOpt func(*DefinitionOpts) + +// WithCommonLabels includes common labels +func WithCommonLabels(labels map[string]string) DefinitionOpt { + return func(o *DefinitionOpts) { + for k, v := range labels { + o.CommonLabels[k] = v + } + } +} + +// WithMeasuredLabels includes labels +func WithMeasuredLabels(labelKeys ...string) DefinitionOpt { + return func(o *DefinitionOpts) { + o.MeasuredLabelKeys = append(o.MeasuredLabelKeys, labelKeys...) + } +} + +// NewDefinition returns a new definition +func NewDefinition(name string, opts ...DefinitionOpt) *Definition { + def := &DefinitionOpts{ + CommonLabels: map[string]string{}, + MeasuredLabelKeys: []string{}, + } + for _, opt := range opts { + opt(def) + } + + seen := map[string]bool{} + sortedLabels := []definitionLabel{} + for k, v := range def.CommonLabels { + if _, ok := seen[k]; ok { + panic(fmt.Sprintf("duplicate key: %q", k)) + } + seen[k] = true + sortedLabels = append(sortedLabels, definitionLabel{ + labelType: commonLabel, + name: k, + commonValue: v, + }) + } + for i, x := range def.MeasuredLabelKeys { + if _, ok := seen[x]; ok { + panic(fmt.Sprintf("duplicate key: %q", x)) + } + seen[x] = true + sortedLabels = append(sortedLabels, definitionLabel{ + labelType: dynamicLabel, + name: x, + i: i, + }) + } + sort.Slice(sortedLabels, func(i, j int) bool { return sortedLabels[i].name < sortedLabels[j].name }) + + return &Definition{ + name: name, + sortedLabels: sortedLabels, + } +} + +// NewDefinitionFromMap returns a new definition with common labels for each given value, a "__name__" key must also be present +func NewDefinitionFromMap(m map[string]string) *Definition { + name, ok := m["__name__"] + if !ok { + panic("missing __name__ key") + } + delete(m, "__name__") + + return NewDefinition(name, WithCommonLabels(m)) +} + +// GetLFM returns an lfm corresponding to a defitnition +func GetLFM(def *Definition, labels []string) (string, error) { + lfm := []string{def.name} + for _, x := range def.sortedLabels { + lfm = append(lfm, x.name) + switch x.labelType { + case commonLabel: + lfm = append(lfm, x.commonValue) + case dynamicLabel: + if x.i >= len(labels) { + return "", ErrLabelMissmatch + } + lfm = append(lfm, labels[x.i]) + } + } + return strings.Join(lfm, "\x00"), nil +} diff --git a/pkg/clients/tsclient/structuredstream/structured_stream_reader.go b/pkg/clients/tsclient/structuredstream/structured_stream_reader.go new file mode 100644 index 00000000..400ca76b --- /dev/null +++ b/pkg/clients/tsclient/structuredstream/structured_stream_reader.go @@ -0,0 +1,138 @@ +package structuredstream + +import ( + "encoding/binary" + "io" + "time" +) + +// Reader wraps a reader with calls for reading binary data types +// If any error is encountered, all subsequent calls will fail +// error checking must be done in a separate call to error +type Reader struct { + r io.Reader + err error + byteOrder binary.ByteOrder +} + +// NewReader returns a new reader +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + err: nil, + byteOrder: binary.LittleEndian, + } +} + +// Read takes a pointer to a data type (e.g. uint16, int64, []byte) and reads +// data from the wrapped reader, and advances the reader offset to the next value. +func (s *Reader) Read(x interface{}) { + if s.err == nil { + s.err = binary.Read(s.r, s.byteOrder, x) + } +} + +// ReadInt8 returns a int8 +func (s *Reader) ReadInt8() int8 { + var x int8 + s.Read(&x) + return x +} + +// ReadUint8 returns a uint8 +func (s *Reader) ReadUint8() uint8 { + var x uint8 + s.Read(&x) + return x +} + +// ReadInt16 returns an int16 +func (s *Reader) ReadInt16() int16 { + var x int16 + s.Read(&x) + return x +} + +// ReadUint16 returns a uint16 +func (s *Reader) ReadUint16() uint16 { + var x uint16 + s.Read(&x) + return x +} + +// ReadInt32 returns an int32 +func (s *Reader) ReadInt32() int32 { + var x int32 + s.Read(&x) + return x +} + +// ReadUint32 retursn a uint32 +func (s *Reader) ReadUint32() uint32 { + var x uint32 + s.Read(&x) + return x +} + +// ReadInt64 retursn a int64 +func (s *Reader) ReadInt64() int64 { + var x int64 + s.Read(&x) + return x +} + +// ReadUint64 retursn a uint64 +func (s *Reader) ReadUint64() uint64 { + var x uint64 + s.Read(&x) + return x +} + +// ReadFloat64 retursn a float64 +func (s *Reader) ReadFloat64() float64 { + var x float64 + s.Read(&x) + return x +} + +// ReadBytes returns l many bytes +func (s *Reader) ReadBytes(l int) []byte { + buf := make([]byte, l) + + var n int + n, s.err = io.ReadFull(s.r, buf) + if n != l { + panic("underflow") + } + return buf +} + +// ReadUint16PrefixedBytes first reads a uint16, then reads that many following bytes +func (s *Reader) ReadUint16PrefixedBytes() []byte { + l := s.ReadUint16() + if s.err == nil { + x := s.ReadBytes(int(l)) + return x + } + return nil +} + +// ReadUint16PrefixedString first reads a uint16, then reads that many following chars +func (s *Reader) ReadUint16PrefixedString() string { + return string(s.ReadUint16PrefixedBytes()) +} + +// ReadUnixTime64UTC reads a uint64 representing unix epoch time in UTC and converts it to a time.time +func (s *Reader) ReadUnixTime64UTC() time.Time { + var x int64 + s.Read(&x) + + // can't use time.Unix which assumes timezone is local + t := time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC).Add(time.Second * time.Duration(x)) + return t +} + +// Error returns the last encountered error +func (s *Reader) Error() error { + return s.err +} diff --git a/pkg/clients/tsclient/structuredstream/structured_stream_writer.go b/pkg/clients/tsclient/structuredstream/structured_stream_writer.go new file mode 100644 index 00000000..8591d6e8 --- /dev/null +++ b/pkg/clients/tsclient/structuredstream/structured_stream_writer.go @@ -0,0 +1,57 @@ +package structuredstream + +import ( + "encoding/binary" + "io" + "time" +) + +// Writer wraps a writer with calls for writing binary data types +// If any error is encountered, all subsequent calls will fail +// error checking must be done in a separate call to error +type Writer struct { + w io.Writer + err error + byteOrder binary.ByteOrder +} + +// NewWriter returns a new writer +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + err: nil, + byteOrder: binary.LittleEndian, + } +} + +// Write writes a given type to the stream +func (s *Writer) Write(x interface{}) { + if s.err == nil { + s.err = binary.Write(s.w, s.byteOrder, x) + } +} + +// WriteUint16PrefixedBytes writes a uint16 specifying the length of the bytes buffer, followed by the payload +func (s *Writer) WriteUint16PrefixedBytes(x []byte) { + l := len(x) + if l > 0xFFFF { + panic("overflow") + } + s.Write(uint16(l)) + s.Write(x) +} + +// WriteUint16PrefixedString writes a uint16 specifying the length of the string, followed by the actual string +func (s *Writer) WriteUint16PrefixedString(x string) { + s.WriteUint16PrefixedBytes([]byte(x)) +} + +// WriteUnixTime64UTC writes a time as unix epoch in UTC; sub-second accuracy is truncated +func (s *Writer) WriteUnixTime64UTC(x time.Time) { + s.Write(int64(x.UTC().Unix())) +} + +// Error returns any errors the occurred since the writer was first constructed +func (s *Writer) Error() error { + return s.err +} diff --git a/pkg/collector/node.go b/pkg/collector/node.go new file mode 100644 index 00000000..f0438870 --- /dev/null +++ b/pkg/collector/node.go @@ -0,0 +1,52 @@ +package collector + +import ( + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/node_exporter/collector" +) + +// NewNodeCollector creates a new prometheus NodeCollector +func NewNodeCollector() (*NodeCollector, error) { + c, err := collector.NewNodeCollector() + if err != nil { + return nil, errors.Wrap(err, "failed to create NodeCollector") + } + + return &NodeCollector{ + collectFunc: c.Collect, + describeFunc: c.Describe, + collectorsFunc: func() map[string]collector.Collector { + return c.Collectors + }, + }, nil +} + +// NodeCollector is a collector that collects data using +// prometheus/node_exporter. Since prometheus returns an internal type we have +// to wrap it with our own type +type NodeCollector struct { + collectFunc func(ch chan<- prometheus.Metric) + describeFunc func(ch chan<- *prometheus.Desc) + collectorsFunc func() map[string]collector.Collector +} + +// Collectors returns the list of collectors registered +func (n *NodeCollector) Collectors() map[string]collector.Collector { + return n.collectorsFunc() +} + +// Name returns the name of this collector +func (n *NodeCollector) Name() string { + return "do-agent" +} + +// Collect collects metrics using prometheus/node_exporter +func (n *NodeCollector) Collect(ch chan<- prometheus.Metric) { + n.collectFunc(ch) +} + +// Describe describes the metrics collected using prometheus/node_exporter +func (n *NodeCollector) Describe(ch chan<- *prometheus.Desc) { + n.describeFunc(ch) +} diff --git a/pkg/collector/scraper.go b/pkg/collector/scraper.go new file mode 100644 index 00000000..fc566f50 --- /dev/null +++ b/pkg/collector/scraper.go @@ -0,0 +1,249 @@ +package collector + +import ( + "bufio" + "compress/gzip" + "context" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/digitalocean/do-agent/internal/log" + "github.com/digitalocean/do-agent/pkg/clients" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" +) + +// NewScraper creates a new scraper to scrape metrics from the provided host +func NewScraper(name, host string, timeout time.Duration) (*Scraper, error) { + host = strings.TrimRight(host, "/") + req, err := http.NewRequest("GET", fmt.Sprintf("%s/metrics", host), nil) + if err != nil { + return nil, errors.Wrap(err, "failed to create http request") + } + req.Header.Add("Accept", `text/plain;version=0.0.4;q=1,*/*;q=0.1`) + req.Header.Add("Accept-Encoding", "gzip") + req.Header.Set("User-Agent", "Prometheus/2.3.0") + req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", fmt.Sprintf("%f", timeout.Seconds())) + + return &Scraper{ + req: req, + name: name, + timeout: timeout, + client: clients.NewHTTP(timeout), + scrapeDurationDesc: prometheus.NewDesc( + prometheus.BuildFQName(name, "scrape", "collector_duration_seconds"), + fmt.Sprintf("%s: Duration of a collector scrape.", name), + []string{"collector"}, + nil, + ), + scrapeSuccessDesc: prometheus.NewDesc( + prometheus.BuildFQName(name, "scrape", "collector_success"), + fmt.Sprintf("%s: Whether a collector succeeded.", name), + []string{"collector"}, + nil, + ), + }, nil +} + +// Scraper is a remote metric scraper that scrapes HTTP endpoints +type Scraper struct { + timeout time.Duration + req *http.Request + client *http.Client + name string + scrapeDurationDesc *prometheus.Desc + scrapeSuccessDesc *prometheus.Desc +} + +// readStream makes an HTTP request to the remote and returns the response body +// upon successful response +func (s *Scraper) readStream(ctx context.Context) (r io.ReadCloser, outerr error) { + // close the reader if we return an error + defer func() { + if outerr == nil || r == nil { + return + } + if err := r.Close(); err != nil { + // This should not happen, but if it does it'll be nice + // to know why we have a bunch of unclosed messages + log.Error("failed to close stream on error: %+v", errors.WithStack(err)) + } + }() + + resp, err := s.client.Do(s.req.WithContext(ctx)) + if err != nil { + return nil, errors.Wrap(err, "HTTP request failed") + } + + if resp.StatusCode != http.StatusOK { + return nil, errors.Errorf("server returned bad HTTP status %s", resp.Status) + } + + if resp.Header.Get("Content-Encoding") != "gzip" { + return resp.Body, nil + } + + reader, err := gzip.NewReader(bufio.NewReader(resp.Body)) + return reader, errors.Wrap(err, "failed to create gzip reader") +} + +// Describe describes this collector +func (s *Scraper) Describe(ch chan<- *prometheus.Desc) { + ch <- s.scrapeDurationDesc + ch <- s.scrapeSuccessDesc +} + +// Collect collectrs metrics from the remote endpoint and reports them to ch +func (s *Scraper) Collect(ch chan<- prometheus.Metric) { + var failed bool + defer func(start time.Time) { + dur := time.Since(start).Seconds() + var success float64 + if !failed { + success = 1 + } + ch <- prometheus.MustNewConstMetric(s.scrapeDurationDesc, prometheus.GaugeValue, dur, s.Name()) + ch <- prometheus.MustNewConstMetric(s.scrapeSuccessDesc, prometheus.GaugeValue, success, s.Name()) + }(time.Now()) + + ctx, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + + if err := s.scrape(ctx, ch); err != nil { + failed = true + log.Error("collection failed for %q: %v", s.Name(), err) + } +} + +func (s *Scraper) scrape(ctx context.Context, ch chan<- prometheus.Metric) (outerr error) { + stream, err := s.readStream(ctx) + if err != nil { + return err + } + defer stream.Close() + + parsed, err := new(expfmt.TextParser).TextToMetricFamilies(stream) + if err != nil { + return errors.Wrapf(err, "parsing message failed") + } + + for _, mf := range parsed { + convertMetricFamily(mf, ch) + } + + return nil +} + +// Name returns the name of this scraper +func (s *Scraper) Name() string { + return s.name +} + +// convertMetricFamily converts the dto metrics parsed from the expfmt package +// into the prometheus.Metrics required to pass over the channel +// +// this was copied from github.com/prometheus/node_exporter +// see https://github.com/prometheus/node_exporter/blob/f56e8fcdf48ead56f1f149dbf1301ac028ef589b/collector/textfile.go#L63 +// for more details +func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric) { + var valType prometheus.ValueType + var val float64 + + allLabelNames := map[string]struct{}{} + for _, metric := range metricFamily.Metric { + labels := metric.GetLabel() + for _, label := range labels { + if _, ok := allLabelNames[label.GetName()]; !ok { + allLabelNames[label.GetName()] = struct{}{} + } + } + } + + for _, metric := range metricFamily.Metric { + labels := metric.GetLabel() + var names []string + var values []string + for _, label := range labels { + names = append(names, label.GetName()) + values = append(values, label.GetValue()) + } + + for k := range allLabelNames { + present := false + for _, name := range names { + if k == name { + present = true + break + } + } + if !present { + names = append(names, k) + values = append(values, "") + } + } + + metricType := metricFamily.GetType() + switch metricType { + case dto.MetricType_COUNTER: + valType = prometheus.CounterValue + val = metric.Counter.GetValue() + + case dto.MetricType_GAUGE: + valType = prometheus.GaugeValue + val = metric.Gauge.GetValue() + + case dto.MetricType_UNTYPED: + valType = prometheus.UntypedValue + val = metric.Untyped.GetValue() + + case dto.MetricType_SUMMARY: + quantiles := map[float64]float64{} + for _, q := range metric.Summary.Quantile { + quantiles[q.GetQuantile()] = q.GetValue() + } + ch <- prometheus.MustNewConstSummary( + prometheus.NewDesc( + *metricFamily.Name, + metricFamily.GetHelp(), + names, nil, + ), + metric.Summary.GetSampleCount(), + metric.Summary.GetSampleSum(), + quantiles, values..., + ) + case dto.MetricType_HISTOGRAM: + buckets := map[float64]uint64{} + for _, b := range metric.Histogram.Bucket { + buckets[b.GetUpperBound()] = b.GetCumulativeCount() + } + ch <- prometheus.MustNewConstHistogram( + prometheus.NewDesc( + *metricFamily.Name, + metricFamily.GetHelp(), + names, nil, + ), + metric.Histogram.GetSampleCount(), + metric.Histogram.GetSampleSum(), + buckets, values..., + ) + default: + log.Error("unknown metric type %q", metricType.String()) + continue + } + if metricType == dto.MetricType_GAUGE || metricType == dto.MetricType_COUNTER || metricType == dto.MetricType_UNTYPED { + ch <- prometheus.MustNewConstMetric( + prometheus.NewDesc( + *metricFamily.Name, + metricFamily.GetHelp(), + names, nil, + ), + valType, val, values..., + ) + } + } +} diff --git a/pkg/decorate/chain.go b/pkg/decorate/chain.go new file mode 100644 index 00000000..fffc681e --- /dev/null +++ b/pkg/decorate/chain.go @@ -0,0 +1,24 @@ +package decorate + +import dto "github.com/prometheus/client_model/go" + +// Decorator decorates a list of metric families +type Decorator interface { + Decorate([]*dto.MetricFamily) + Name() string +} + +// Chain of decorators to be applied to the metric family +type Chain []Decorator + +// Decorate the metric family +func (c Chain) Decorate(mfs []*dto.MetricFamily) { + for _, d := range c { + d.Decorate(mfs) + } +} + +// Name is the name of the decorator +func (c Chain) Name() string { + return "Chain" +} diff --git a/pkg/decorate/compat/cpu.go b/pkg/decorate/compat/cpu.go new file mode 100644 index 00000000..def434d4 --- /dev/null +++ b/pkg/decorate/compat/cpu.go @@ -0,0 +1,43 @@ +package compat + +import ( + "fmt" + "strconv" + "strings" + + "github.com/digitalocean/do-agent/internal/log" + dto "github.com/prometheus/client_model/go" +) + +// CPU converts node_exporter cpu labels from 0-indexed to 1-indexed with prefix +type CPU struct{} + +// Name is the name of this decorator +func (c CPU) Name() string { + return fmt.Sprintf("%T", c) +} + +// Decorate executes the decorator against the give metrics +func (CPU) Decorate(mfs []*dto.MetricFamily) { + for _, mf := range mfs { + if !strings.EqualFold(mf.GetName(), "node_cpu_seconds_total") { + continue + } + + mf.Name = sptr("sonar_cpu") + for _, met := range mf.GetMetric() { + for _, l := range met.GetLabel() { + if !strings.EqualFold(l.GetName(), "cpu") { + continue + } + num, err := strconv.Atoi(l.GetValue()) + if err != nil { + log.Error("failed to parse cpu number: %+v", l) + continue + } + + l.Value = sptr(fmt.Sprintf("cpu%d", num)) + } + } + } +} diff --git a/pkg/decorate/compat/cpu_test.go b/pkg/decorate/compat/cpu_test.go new file mode 100644 index 00000000..cd5002b6 --- /dev/null +++ b/pkg/decorate/compat/cpu_test.go @@ -0,0 +1,166 @@ +package compat + +import ( + "fmt" + "testing" + + dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/assert" +) + +const nodeExporterCPUName = "node_cpu_seconds_total" + +func TestCPUChangesNames(t *testing.T) { + const expected = "sonar_cpu" + + mfs := []*dto.MetricFamily{{Name: sptr(nodeExporterCPUName)}} + CPU{}.Decorate(mfs) + + assert.Equal(t, expected, mfs[0].GetName()) +} + +func TestCPUChangesLabelValues(t *testing.T) { + for i := 0; i < 4; i++ { + dec := CPU{} + + cpu := i + expected := fmt.Sprintf("cpu%d", cpu) + t.Run(expected, func(t *testing.T) { + v := 1.0 + metric := dto.Metric{ + Gauge: &dto.Gauge{Value: &v}, + Label: []*dto.LabelPair{ + { + Name: sptr("cpu"), + Value: sptr(fmt.Sprint(cpu)), + }, + }, + } + mfs := []*dto.MetricFamily{ + { + Type: &counterMetricType, + Name: sptr(nodeExporterCPUName), + Metric: []*dto.Metric{&metric}, + }, + } + dec.Decorate(mfs) + assert.EqualValues(t, expected, metric.Label[0].GetValue()) + }) + } +} + +func TestCPUUpdatesAllLabels(t *testing.T) { + mfs := []*dto.MetricFamily{} + + for i := 0; i < 4; i++ { + v := 1.0 + m := dto.Metric{ + Gauge: &dto.Gauge{Value: &v}, + Label: []*dto.LabelPair{ + { + Name: sptr("cpu"), + Value: sptr(fmt.Sprint(i)), + }, + }, + } + mfs = append(mfs, &dto.MetricFamily{ + Type: &counterMetricType, + Name: sptr(nodeExporterCPUName), + Metric: []*dto.Metric{&m}, + }) + } + + CPU{}.Decorate(mfs) + + for i, mf := range mfs { + expected := fmt.Sprintf("cpu%d", i) + assert.EqualValues(t, expected, mf.GetMetric()[0].Label[0].GetValue()) + } +} + +func TestCPUDoesNotChangeOtherLabelValues(t *testing.T) { + dec := CPU{} + + const expected = "0" + + v := 1.0 + metric := dto.Metric{ + Gauge: &dto.Gauge{Value: &v}, + Label: []*dto.LabelPair{ + { + Name: sptr("notcpu"), + Value: sptr(expected), + }, + }, + } + mfs := []*dto.MetricFamily{ + { + Type: &counterMetricType, + Name: sptr(nodeExporterCPUName), + Metric: []*dto.Metric{&metric}, + }, + } + dec.Decorate(mfs) + assert.EqualValues(t, expected, metric.Label[0].GetValue()) +} + +func TestCPUDoesNotChangeOtherMetrics(t *testing.T) { + dec := CPU{} + + const expected = "0" + + v := 1.0 + metric := dto.Metric{ + Gauge: &dto.Gauge{Value: &v}, + Label: []*dto.LabelPair{ + { + Name: sptr("cpu"), + Value: sptr(expected), + }, + }, + } + mfs := []*dto.MetricFamily{ + { + Type: &counterMetricType, + Name: sptr("something else"), + Metric: []*dto.Metric{&metric}, + }, + } + dec.Decorate(mfs) + assert.EqualValues(t, expected, metric.Label[0].GetValue()) +} + +func TestCPUSkipsWhenFailsParsingCPUNumber(t *testing.T) { + dec := CPU{} + + v := 1.0 + metric := dto.Metric{ + Gauge: &dto.Gauge{Value: &v}, + Label: []*dto.LabelPair{ + { + Name: sptr("cpu"), + // this should be a number + Value: sptr("not a number"), + }, + { + Name: sptr("cpu"), + Value: sptr("1"), + }, + }, + } + mfs := []*dto.MetricFamily{ + { + Type: &counterMetricType, + Name: sptr(nodeExporterCPUName), + Metric: []*dto.Metric{&metric}, + }, + } + dec.Decorate(mfs) + + assert.EqualValues(t, "not a number", metric.Label[0].GetValue()) + assert.EqualValues(t, "cpu1", metric.Label[1].GetValue()) +} + +func TestCPUHasName(t *testing.T) { + assert.Equal(t, "compat.CPU", CPU{}.Name()) +} diff --git a/pkg/decorate/compat/disk.go b/pkg/decorate/compat/disk.go new file mode 100644 index 00000000..575226f3 --- /dev/null +++ b/pkg/decorate/compat/disk.go @@ -0,0 +1,43 @@ +package compat + +import ( + "fmt" + "strings" + + dto "github.com/prometheus/client_model/go" +) + +const diskSectorSize = float64(512) + +// Disk converts node_exporter disk metrics from bytes to sectors +type Disk struct{} + +// Name is the name of this decorator +func (d Disk) Name() string { + return fmt.Sprintf("%T", d) +} + +// Decorate converts bytes to sectors +func (Disk) Decorate(mfs []*dto.MetricFamily) { + for _, mf := range mfs { + n := strings.ToLower(mf.GetName()) + switch n { + case "node_disk_read_bytes_total": + mf.Name = sptr("sonar_disk_sectors_read") + for _, met := range mf.GetMetric() { + met.Counter.Value = bytesToSector(met.Counter.Value) + } + case "node_disk_written_bytes_total": + mf.Name = sptr("sonar_disk_sectors_written") + for _, met := range mf.GetMetric() { + met.Counter.Value = bytesToSector(met.Counter.Value) + } + } + } +} + +func bytesToSector(val *float64) *float64 { + v := *val + v = v / diskSectorSize + return &v +} diff --git a/pkg/decorate/compat/disk_test.go b/pkg/decorate/compat/disk_test.go new file mode 100644 index 00000000..3ee27683 --- /dev/null +++ b/pkg/decorate/compat/disk_test.go @@ -0,0 +1,64 @@ +package compat + +import ( + "testing" + + dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var counterMetricType = dto.MetricType(0) + +func TestDiskChangesNames(t *testing.T) { + m := map[string]string{ + "node_disk_read_bytes_total": "sonar_disk_sectors_read", + "node_disk_written_bytes_total": "sonar_disk_sectors_written", + } + + dec := Disk{} + for old, new := range m { + t.Run(old, func(t *testing.T) { + mfs := []*dto.MetricFamily{ + {Name: &old}, + } + dec.Decorate(mfs) + + assert.Equal(t, new, mfs[0].GetName()) + }) + } +} + +func TestDiskConvertsBytesToSectors(t *testing.T) { + names := []string{ + "node_disk_read_bytes_total", + "node_disk_written_bytes_total", + } + + for _, name := range names { + // make sure to reset num for every test since it's a pointer + num := 63219712.0 + exp := num / diskSectorSize + + dec := Disk{} + metric := dto.Metric{ + Counter: &dto.Counter{Value: &num}, + } + + t.Run(name, func(t *testing.T) { + mfs := []*dto.MetricFamily{ + { + Type: &counterMetricType, + Name: &name, + Metric: []*dto.Metric{&metric}, + }, + } + dec.Decorate(mfs) + require.EqualValues(t, exp, metric.Counter.GetValue()) + }) + } +} + +func TestDiskHasName(t *testing.T) { + assert.Equal(t, "compat.Disk", Disk{}.Name()) +} diff --git a/pkg/decorate/compat/names.go b/pkg/decorate/compat/names.go new file mode 100644 index 00000000..eb11ba35 --- /dev/null +++ b/pkg/decorate/compat/names.go @@ -0,0 +1,47 @@ +package compat + +import ( + "fmt" + "strings" + + dto "github.com/prometheus/client_model/go" +) + +// nameConversions is a list of metrics which differ only in name +var nameConversions = map[string]string{ + "node_network_receive_bytes_total": "sonar_network_receive_bytes", + "node_network_transmit_bytes_total": "sonar_network_transmit_bytes", + "node_memory_memtotal_bytes": "sonar_memory_total", + "node_memory_memfree_bytes": "sonar_memory_free", + "node_memory_cached_bytes": "sonar_memory_cached", + "node_memory_swapcached_bytes": "sonar_memory_swap_cached", + "node_memory_swapfree_bytes": "sonar_memory_swap_free", + "node_memory_swaptotal_bytes": "sonar_memory_swap_total", + "node_filesystem_size_bytes": "sonar_filesystem_size", + "node_filesystem_free_bytes": "sonar_filesystem_free", + "node_load1": "sonar_load1", + "node_load5": "sonar_load5", + "node_load15": "sonar_load15", +} + +// Names converts node_exporter metric names to sonar names +type Names struct{} + +// Name is the name of this decorator +func (n Names) Name() string { + return fmt.Sprintf("%T", n) +} + +// Decorate decorates the provided metrics for compatibility +func (Names) Decorate(mfs []*dto.MetricFamily) { + for _, mf := range mfs { + n := strings.ToLower(mf.GetName()) + if newName, ok := nameConversions[n]; ok { + mf.Name = &newName + } + } +} + +func sptr(s string) *string { + return &s +} diff --git a/pkg/decorate/compat/names_test.go b/pkg/decorate/compat/names_test.go new file mode 100644 index 00000000..5b35dafe --- /dev/null +++ b/pkg/decorate/compat/names_test.go @@ -0,0 +1,39 @@ +package compat + +import ( + "strings" + "testing" + + dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/assert" +) + +func TestCompatConvertsLabels(t *testing.T) { + for old, new := range nameConversions { + t.Run(old, func(t *testing.T) { + mfs := []*dto.MetricFamily{ + {Name: &old}, + } + Names{}.Decorate(mfs) + + assert.Equal(t, new, mfs[0].GetName()) + }) + } +} + +func TestCompatIsCaseInsensitive(t *testing.T) { + for old, new := range nameConversions { + t.Run(old, func(t *testing.T) { + mfs := []*dto.MetricFamily{ + {Name: sptr(strings.ToUpper(old))}, + } + Names{}.Decorate(mfs) + + assert.Equal(t, new, mfs[0].GetName()) + }) + } +} + +func TestNamesHasName(t *testing.T) { + assert.Equal(t, "compat.Names", Names{}.Name()) +} diff --git a/pkg/decorate/lowercase.go b/pkg/decorate/lowercase.go new file mode 100644 index 00000000..7d0396db --- /dev/null +++ b/pkg/decorate/lowercase.go @@ -0,0 +1,25 @@ +package decorate + +import ( + "strings" + + dto "github.com/prometheus/client_model/go" +) + +// LowercaseNames decorates metrics to be have all lowercase label names +type LowercaseNames struct{} + +// Decorate decorates the provided metrics for compatibility +func (LowercaseNames) Decorate(mfs []*dto.MetricFamily) { + // names come back with varying cases like some_TCP_connection + // and we want consistency so we lowercase them + for _, fam := range mfs { + lower := strings.ToLower(fam.GetName()) + fam.Name = &lower + } +} + +// Name is the name of this decorator +func (LowercaseNames) Name() string { + return "LowercaseNames" +} diff --git a/pkg/decorate/lowercase_test.go b/pkg/decorate/lowercase_test.go new file mode 100644 index 00000000..36f55913 --- /dev/null +++ b/pkg/decorate/lowercase_test.go @@ -0,0 +1,22 @@ +package decorate + +import ( + "strings" + "testing" + + dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/assert" +) + +func TestLowercaseNamesChangesLabels(t *testing.T) { + d := LowercaseNames{} + + actual := "JKLKJSFDJKLjkasdfjklasdf" + expected := strings.ToLower(actual) + + items := []*dto.MetricFamily{ + {Name: &actual}, + } + d.Decorate(items) + assert.Equal(t, expected, items[0].GetName()) +} diff --git a/pkg/writer/file.go b/pkg/writer/file.go new file mode 100644 index 00000000..b6464f36 --- /dev/null +++ b/pkg/writer/file.go @@ -0,0 +1,40 @@ +package writer + +import ( + "fmt" + "io" + "sync" + + dto "github.com/prometheus/client_model/go" +) + +// File writes metrics to an io.Writer +type File struct { + w io.Writer + m *sync.Mutex +} + +// NewFile creates a new File writer with the provided writer +func NewFile(w io.Writer) *File { + return &File{ + w: w, + m: new(sync.Mutex), + } +} + +// Write writes metrics to the file +func (w *File) Write(mets []*dto.MetricFamily) error { + w.m.Lock() + defer w.m.Unlock() + for _, mf := range mets { + for _, met := range mf.Metric { + fmt.Fprintf(w.w, "[%s]: %s: %s\n", mf.GetType(), mf.GetName(), met.String()) + } + } + return nil +} + +// Name is the name of this writer +func (w *File) Name() string { + return "file" +} diff --git a/pkg/writer/sonar.go b/pkg/writer/sonar.go new file mode 100644 index 00000000..90bff03d --- /dev/null +++ b/pkg/writer/sonar.go @@ -0,0 +1,68 @@ +package writer + +import ( + "github.com/digitalocean/do-agent/internal/log" + "github.com/digitalocean/do-agent/pkg/clients/tsclient" + dto "github.com/prometheus/client_model/go" +) + +// Sonar writes metrics to DigitalOcean sonar +type Sonar struct { + client tsclient.Client + firstWriteSent bool +} + +// NewSonar creates a new Sonar writer +func NewSonar(client tsclient.Client) *Sonar { + return &Sonar{ + client: client, + firstWriteSent: false, + } +} + +// Write writes the metrics to Sonar and returns the amount of time to wait +// before the next write +func (s *Sonar) Write(mets []*dto.MetricFamily) error { + for _, mf := range mets { + for _, metric := range mf.Metric { + var value float64 + switch *mf.Type { + case dto.MetricType_GAUGE: + value = *metric.Gauge.Value + case dto.MetricType_COUNTER: + value = *metric.Counter.Value + case dto.MetricType_UNTYPED: + value = *metric.Untyped.Value + default: + // FIXME -- expand this to support other types + continue + } + + labels := map[string]string{} + for _, label := range metric.Label { + labels[*label.Name] = *label.Value + } + + err := s.client.AddMetric( + tsclient.NewDefinition(*mf.Name, tsclient.WithCommonLabels(labels)), + value) + if err != nil { + log.Error("Failed to add metric %q: %+v", mf.GetName(), err) + } + + } + + } + err := s.client.Flush() + httpError, ok := err.(*tsclient.UnexpectedHTTPStatusError) + if !s.firstWriteSent && ok && httpError.StatusCode == 429 { + err = nil + } + s.firstWriteSent = true + return err +} + +// Name is the name of this writer +func (s *Sonar) Name() string { + return "sonar" +} diff --git a/plugins/collector.go b/plugins/collector.go deleted file mode 100644 index be55a072..00000000 --- a/plugins/collector.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package plugins - -import ( - "encoding/json" - - "github.com/digitalocean/do-agent/log" - "github.com/digitalocean/do-agent/metrics" -) - -type initResult struct { - // Definitions indexed by name. - Definitions map[string]*metrics.Definition `json:"definitions"` -} - -type metricsResult struct { - // Metric values indexed by name. - Metrics map[string]*metricValue `json:"metrics"` -} - -type metricValue struct { - Value float64 `json:"value"` - LabelValues []string `json:"label_values,omitempty"` -} - -// RegisterPluginDir adds a collector to retrieve metrics from external -// plugins. -func RegisterPluginDir(r metrics.Registry, dirPath string) { - h := NewExternalPluginHandler(dirPath) - - refs := make(map[string]metrics.MetricRef) - - for _, result := range h.ExecuteAll("config") { - if len(result.Stderr) > 0 { - log.Errorf("plugin error %q: %s", result.PluginPath, result.Stderr) - } - - var res initResult - err := json.Unmarshal(result.Output, &res) - if err != nil { - log.Errorf("unable to parse plugin %q: %s", result.PluginPath, err) - h.RemovePlugin(result.PluginPath) - continue - } - if len(res.Definitions) == 0 { - log.Debugf("no metric definitions in %q", result.PluginPath) - h.RemovePlugin(result.PluginPath) - continue - } - - for name, d := range res.Definitions { - ref := r.Register(name, metrics.AsType(d.Type), - metrics.WithCommonLabels(d.CommonLabels), - metrics.WithMeasuredLabels(d.MeasuredLabelKeys...)) - refs[name] = ref - } - } - - r.AddCollector(func(reporter metrics.Reporter) { - for _, result := range h.ExecuteAll() { - if len(result.Stderr) > 0 { - log.Errorf("plugin error %q: %s", result.PluginPath, result.Stderr) - } - - var res metricsResult - err := json.Unmarshal(result.Output, &res) - if err != nil { - log.Errorf("unable to parse plugin %q: %s", result.PluginPath, err) - continue - } - - for name, m := range res.Metrics { - ref, ok := refs[name] - if !ok { - log.Debugf("undefined metric from plugin %q: %s", - result.PluginPath, name) - continue - } - - reporter.Update(ref, m.Value, m.LabelValues...) - } - } - }) -} diff --git a/plugins/external.go b/plugins/external.go deleted file mode 100644 index 45269d70..00000000 --- a/plugins/external.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package plugins - -import ( - "bytes" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/digitalocean/do-agent/log" -) - -// NewExternalPluginHandler creates a new handlers for external plugin support. -func NewExternalPluginHandler(root string) *ExternalPluginHandler { - h := &ExternalPluginHandler{ - plugins: make(map[string]*externalPlugin), - } - - filepath.Walk(root, func(p string, f os.FileInfo, err error) error { - if err != nil { - return err - } - mode := f.Mode() - if f.IsDir() || !mode.IsRegular() || (mode&0111 == 0) { - return nil - } - - abs, err := filepath.Abs(p) - if err != nil { - log.Debugf("unable to get plugin path for %q: %s", p, err) - return nil - } - - // Found an executable file. - h.plugins[abs] = &externalPlugin{} - return nil - }) - return h -} - -// ExternalPluginHandler is used to manage external plugins. -// External plugins are separate programs which can be used by do-agent to -// collect metrics on its behalf. -type ExternalPluginHandler struct { - plugins map[string]*externalPlugin -} - -type externalPlugin struct { -} - -// ExecResult holds the result of a plugin run. -type ExecResult struct { - PluginPath string - Output []byte - Stderr string - Error error -} - -// ExecuteAll runs a command on all plugins, reporting results from successful -// runs. -// Returns a mapping from plugin path to result. -func (e *ExternalPluginHandler) ExecuteAll(args ...string) []*ExecResult { - results := make([]*ExecResult, 0, len(e.plugins)) - for binPath := range e.plugins { - r := e.Execute(binPath, args...) - if r.Error != nil { - log.Errorf("unable to execute plugin %q: %s", binPath, r.Error) - continue - } - results = append(results, e.Execute(binPath, args...)) - } - return results -} - -// RemovePlugin drops the plugin reference to the given path. -// This does not touch files on disk, it just removes the in-memory reference -// so that it will not be run in future calls to ExecuteAll. -func (e *ExternalPluginHandler) RemovePlugin(binPath string) { - delete(e.plugins, binPath) -} - -// Execute runs an external plugin and returns the results. -func (e *ExternalPluginHandler) Execute(binPath string, args ...string) *ExecResult { - cmd := exec.Command(binPath) - if len(args) > 0 { - cmd.Args = append(cmd.Args, args...) - } - var stderrBuf bytes.Buffer - cmd.Stderr = &stderrBuf - out, err := cmd.Output() - - return &ExecResult{ - PluginPath: binPath, - Output: out, - Stderr: strings.TrimSpace(string(stderrBuf.Bytes())), - Error: err, - } -} diff --git a/plugins/plugin_test.go b/plugins/plugin_test.go deleted file mode 100644 index 20e7dfa0..00000000 --- a/plugins/plugin_test.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package plugins - -import ( - "strings" - "testing" - - "github.com/digitalocean/do-agent/log" - "github.com/digitalocean/do-agent/metrics" -) - -func TestExternalPluginHandler(t *testing.T) { - handler := NewExternalPluginHandler(".") - results := handler.ExecuteAll("config") - var found bool - for _, res := range results { - t.Logf("reading result from %q", res.PluginPath) - t.Logf("-- output: %q", string(res.Output)) - if strings.Contains(string(res.Output), "definitions") { - found = true - } - } - if !found { - t.Errorf("no plugin info found") - } -} - -func TestExternalPluginHandlerMissingDir(t *testing.T) { - handler := NewExternalPluginHandler("/should_not_exist/i_hope") - if len(handler.plugins) != 0 { - t.Logf("expected 0 plugins, found: %v", handler.plugins) - } -} - -func TestExternalPluginError(t *testing.T) { - h := NewExternalPluginHandler(".") - var found bool - for _, res := range h.ExecuteAll() { - if len(res.Stderr) > 0 { - t.Logf("found plugin error: %q", string(res.Stderr)) - if string(res.Stderr) != "intentional error" { - t.Errorf("unexpected error: %q", res.Stderr) - } - found = true - } - } - if !found { - t.Errorf("expected plugin error not found") - } -} - -type mockReporter struct { - updates []*update -} - -type update struct { - id metrics.MetricRef - value float64 - labels []string -} - -func (m *mockReporter) Update(id metrics.MetricRef, value float64, labels ...string) { - m.updates = append(m.updates, &update{ - id: id, - value: value, - labels: labels, - }) -} - -func TestPluginRegistry(t *testing.T) { - registry := metrics.NewRegistry() - RegisterPluginDir(registry, ".") - - reporter := &mockReporter{} - registry.Report(reporter) - - var found bool - for _, up := range reporter.updates { - def, ok := up.id.(*metrics.Definition) - if !ok { - t.Errorf("unable to get metrics definition") - continue - } - log.Debugf("found metric %q with value %f", def.Name, up.value) - if def.Name == "test" { - found = true - } - } - if !found { - t.Errorf("test plugin metric not found in result set") - } -} diff --git a/plugins/reporter.go b/plugins/reporter.go deleted file mode 100644 index 86c8482f..00000000 --- a/plugins/reporter.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package plugins - -import ( - "bytes" - "encoding/json" - "io" - - "github.com/digitalocean/do-agent/log" - "github.com/digitalocean/do-agent/metrics" -) - -// NewExportReporter returns a new reporter for use in plugins when exporting -// metrics. -func NewExportReporter() *ExportReporter { - return &ExportReporter{ - defs: &initResult{ - Definitions: make(map[string]*metrics.Definition), - }, - metrics: &metricsResult{ - Metrics: make(map[string]*metricValue), - }, - } -} - -// ExportReporter is a metrics.Reporter implementation which can serialize -// results for a collection plugin. -type ExportReporter struct { - defs *initResult - metrics *metricsResult -} - -var _ metrics.Reporter = &ExportReporter{} - -// Update handles a metric update. -func (r *ExportReporter) Update(id metrics.MetricRef, value float64, - labelValues ...string) { - - def, ok := id.(*metrics.Definition) - if !ok { - log.Debugf("unknown metric: %d", id) - return - } - if _, ok := r.defs.Definitions[def.Name]; !ok { - r.defs.Definitions[def.Name] = def - } - - r.metrics.Metrics[def.Name] = &metricValue{ - Value: value, - LabelValues: labelValues, - } -} - -func (r *ExportReporter) Write(w io.Writer, writeConfig bool) error { - var data interface{} - if writeConfig { - data = r.defs - } else { - data = r.metrics - } - - buf, err := json.MarshalIndent(data, "", " ") - if err != nil { - return err - } - - _, err = io.Copy(w, bytes.NewBuffer(buf)) - return err -} diff --git a/plugins/reporter_test.go b/plugins/reporter_test.go deleted file mode 100644 index cb2349ac..00000000 --- a/plugins/reporter_test.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package plugins - -import ( - "bytes" - "strings" - "testing" - - "github.com/digitalocean/do-agent/metrics" -) - -func TestExportReporter(t *testing.T) { - registry := metrics.NewRegistry() - testRef := registry.Register("test") - registry.AddCollector(func(r metrics.Reporter) { - r.Update(testRef, 3.1415) - }) - - // Check that the metric gets produced on the output. - // This does not validate the format. - reporter := NewExportReporter() - registry.Report(reporter) - - var out bytes.Buffer - err := reporter.Write(&out, true) - if err != nil { - t.Errorf("config write failure: %s", err) - } - t.Logf("plugin config: %q", string(out.Bytes())) - if !strings.Contains(string(out.Bytes()), "definitions") { - t.Errorf("config output invalid: %q", string(out.Bytes())) - } - - out.Reset() - err = reporter.Write(&out, false) - if err != nil { - t.Errorf("value write failure: %s", err) - } - t.Logf("plugin value: %q", string(out.Bytes())) - if !strings.Contains(string(out.Bytes()), "3.1415") { - t.Errorf("config value invalid: %q", string(out.Bytes())) - } -} diff --git a/plugins/test-error.sh b/plugins/test-error.sh deleted file mode 100755 index 85c8c46e..00000000 --- a/plugins/test-error.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh - -# Copyright 2016 DigitalOcean -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http:#www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ->&2 echo "intentional error" diff --git a/plugins/test.sh b/plugins/test.sh deleted file mode 100755 index 6daa20e6..00000000 --- a/plugins/test.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/sh - -# Copyright 2016 DigitalOcean -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http:#www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# If "config" is passed as the argument, then send back the metric definition(s). -case $1 in - config) - cat <<'EOM' -{ - "protocol": "do-agent:1", - "definitions": { - "test": { - "type": 1, - "labels": { - "user": "foo" - } - } - } -} -EOM - exit 0;; -esac - -# Otherwise send the metric value(s). -cat <<'EOM' -{ - "metrics": { - "test": { - "value": 42.0 - } - } -} -EOM - - diff --git a/procfs/diskstats.go b/procfs/diskstats.go deleted file mode 100644 index e66f48dd..00000000 --- a/procfs/diskstats.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -const diskPathSuffix = "diskstats" - -// Disk contains the data exposed by the /proc/diskstats pseudo-file -// system file since Linux 2.5.69. -type Disk struct { - //major number - MajorNumber uint64 - //minor mumber - MinorNumber uint64 - //device name - DeviceName string - //reads completed successfully - ReadsCompleted uint64 - //reads merged - ReadsMerged uint64 - //sectors read - SectorsRead uint64 - //time spent reading (ms) - TimeSpentReading uint64 - //writes completed - WritesCompleted uint64 - //writes merged - WritesMerged uint64 - //sectors written - SectorsWritten uint64 - //time spent writing (ms) - TimeSpendWriting uint64 - //I/Os currently in progress - IOInProgress uint64 - //time spent doing I/Os (ms) - TimeSpentDoingIO uint64 - //weighted time spent doing I/Os (ms) - WeightedTimeSpentDoingIO uint64 -} - -// Disker is a collection of Disk metrics exposed by the -// procfs. -type Disker interface { - NewDisk() ([]Disk, error) -} - -// diskPath returns the relative procfs location. -func diskPath() string { - return fmt.Sprintf("%s/%s", ProcPath, diskPathSuffix) -} - -// NewDisk collects data from the /proc/diskstats pseudo-file system -// and converts it into an slice of Disk structures. -func NewDisk() ([]Disk, error) { - f, err := os.Open(diskPath()) - if err != nil { - err = fmt.Errorf("Unable to collect disk metrics from %s - error: %s", diskPath(), err) - return []Disk{}, err - } - defer f.Close() - return readDisk(f) -} - -func readDisk(f io.Reader) ([]Disk, error) { - scanner := bufio.NewScanner(f) - - var disks []Disk - - for scanner.Scan() { - line := scanner.Text() - - disk, err := parseDisk(line) - if err != nil { - return []Disk{}, err - } - disks = append(disks, disk) - } - return disks, scanner.Err() -} - -// parseDisk parses a string and returns a Disk if the string is in -// the expected format. -func parseDisk(line string) (Disk, error) { - lineArray := strings.Fields(line) - - if len(lineArray) < 14 { - err := fmt.Errorf("Unsupported %s format: %s", diskPath(), line) - return Disk{}, err - } - - disk := Disk{} - - disk.MajorNumber, _ = strconv.ParseUint(lineArray[0], 10, 64) - disk.MinorNumber, _ = strconv.ParseUint(lineArray[1], 10, 64) - disk.DeviceName = lineArray[2] - disk.ReadsCompleted, _ = strconv.ParseUint(lineArray[3], 10, 64) - disk.ReadsMerged, _ = strconv.ParseUint(lineArray[4], 10, 64) - disk.SectorsRead, _ = strconv.ParseUint(lineArray[5], 10, 64) - disk.TimeSpentReading, _ = strconv.ParseUint(lineArray[6], 10, 64) - disk.WritesCompleted, _ = strconv.ParseUint(lineArray[7], 10, 64) - disk.WritesMerged, _ = strconv.ParseUint(lineArray[8], 10, 64) - disk.SectorsWritten, _ = strconv.ParseUint(lineArray[9], 10, 64) - disk.TimeSpendWriting, _ = strconv.ParseUint(lineArray[10], 10, 64) - disk.IOInProgress, _ = strconv.ParseUint(lineArray[11], 10, 64) - disk.TimeSpentDoingIO, _ = strconv.ParseUint(lineArray[12], 10, 64) - disk.WeightedTimeSpentDoingIO, _ = strconv.ParseUint(lineArray[13], 10, 64) - - return disk, nil -} diff --git a/procfs/diskstats_test.go b/procfs/diskstats_test.go deleted file mode 100644 index f0ce31ff..00000000 --- a/procfs/diskstats_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "reflect" - "strings" - "testing" -) - -var diskTestValues = ` 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 - 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 - 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 - 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 - 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 - 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 - 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 - 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 - 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 - 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 - 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 - 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 - 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 - 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 - 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 - 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 - 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 - 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 - 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 - 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 - 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 - 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 - 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 - 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 - 253 0 vda 36472 5 1433914 9388 620084 597722 65646312 2187924 0 373096 2196720 - 253 1 vda1 36283 0 1432362 9368 620084 597722 65646312 2187924 0 373080 2196696 -` - -func TestReadDisk(t *testing.T) { - d, err := readDisk(strings.NewReader(diskTestValues)) - if err != nil { - t.Error("Unable to read test values") - } - - expectedLen := 26 - if len(d) != expectedLen { - t.Errorf("Expected %d disk items, actual was %d", expectedLen, len(d)) - } -} - -func TestParseDiskValues(t *testing.T) { - const testLine = " 253 0 vda 36472 5 1433914 9388 620084 597722 65646312 2187924 0 373096 2196720" - - d, err := parseDisk(testLine) - if err != nil { - t.Errorf("Unexpected error occurred while parsing \"%s\" error=%s", testLine, err) - } - - dr := reflect.ValueOf(d) - - var diskTestValues = []struct { - n string - expected uint64 - }{ - {"MajorNumber", 253}, - {"MinorNumber", 0}, - {"ReadsCompleted", 36472}, - {"ReadsMerged", 5}, - {"SectorsRead", 1433914}, - {"TimeSpentReading", 9388}, - {"WritesCompleted", 620084}, - {"WritesMerged", 597722}, - {"SectorsWritten", 65646312}, - {"TimeSpendWriting", 2187924}, - {"IOInProgress", 0}, - {"TimeSpentDoingIO", 373096}, - {"WeightedTimeSpentDoingIO", 2196720}, - } - - for _, dt := range diskTestValues { - actual := reflect.Indirect(dr).FieldByName(dt.n).Uint() - if actual != dt.expected { - t.Errorf("Disk.%s:: expected %d, actual %d", dt.n, dt.expected, actual) - } - } - - expectedDeviceName := "vda" - if d.DeviceName != expectedDeviceName { - t.Errorf("Disk.DeviceName: expected %s, actual %s", expectedDeviceName, d.DeviceName) - } -} - -func TestParseDiskFail(t *testing.T) { - const testFailLine = " 253 0 vda 36472 5 1433914 9388 620084 597722 65646312 2187924 0 373096" - - _, err := parseDisk(testFailLine) - if err == nil { - t.Errorf("Expected error did not occur while parsing \"%s\", there aren't enough fields", testFailLine) - } -} diff --git a/procfs/loadavg.go b/procfs/loadavg.go deleted file mode 100644 index 25ac3a75..00000000 --- a/procfs/loadavg.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -const loadPathSuffix = "loadavg" - -// Load contains the data exposed by the /proc/loadavg psudo-file -// system file. -type Load struct { - Load1 float64 //number of jobs in the run queue or waiting averged over 1 minute - Load5 float64 //number of jobs in the run queue or waiting averged over 5 minutes - Load15 float64 //number of jobs in the run queue or waiting averged over 15 minutes - RunningProcs uint64 //Count of currently running processes - TotalProcs uint64 //Count of total processes - LastPIDUsed uint64 //Last process id used -} - -// Loader is a collection of Load metrics exposed by the -// procfs. -type Loader interface { - NewLoad() (Load, error) -} - -// Path returns the relative procfs location. -func loadPath() string { - return fmt.Sprintf("%s/%s", ProcPath, loadPathSuffix) -} - -// NewLoad collects data from the /proc/loadavg psudo-file system file -// and converts it into a Load structure. -func NewLoad() (Load, error) { - f, err := os.Open(loadPath()) - if err != nil { - err = fmt.Errorf("Unable to collect load metrics from %s - error: %s", loadPath(), err) - return Load{}, err - } - defer f.Close() - - return readLoad(f) -} - -func readLoad(f io.Reader) (Load, error) { - scanner := bufio.NewScanner(f) - - scanner.Scan() - line := scanner.Text() - - return parseLoad(line) -} - -// parseLoad parses a string and returns a Load if the string is in -// the expected format. -func parseLoad(line string) (Load, error) { - lineArray := strings.Fields(line) - - if len(lineArray) < 5 { - err := fmt.Errorf("Unsupported %s format: %s", loadPath(), line) - return Load{}, err - } - - load := Load{} - - load.Load1, _ = strconv.ParseFloat(lineArray[0], 64) - load.Load5, _ = strconv.ParseFloat(lineArray[1], 64) - load.Load15, _ = strconv.ParseFloat(lineArray[2], 64) - - procsArray := strings.Split(lineArray[3], "/") - - load.RunningProcs, _ = strconv.ParseUint(procsArray[0], 10, 64) - load.TotalProcs, _ = strconv.ParseUint(procsArray[1], 10, 64) - load.LastPIDUsed, _ = strconv.ParseUint(lineArray[4], 10, 64) - - return load, nil -} diff --git a/procfs/loadavg_test.go b/procfs/loadavg_test.go deleted file mode 100644 index e64e6829..00000000 --- a/procfs/loadavg_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "reflect" - "strings" - "testing" -) - -var loadTestValues = `0.04 0.03 0.05 1/84 1269` - -func TestReadLoad(t *testing.T) { - l, err := readLoad(strings.NewReader(loadTestValues)) - if err != nil { - t.Error("Unable to read test values") - } - - if l.Load1 != 0.04 || l.Load5 != 0.03 || l.Load15 != 0.05 { - t.Errorf("Expected values not found in Load=%+v", l) - } -} - -func TestParseLoadValues(t *testing.T) { - const testLine = "0.04 0.03 0.05 1/84 1269" - - l, err := parseLoad(testLine) - if err != nil { - t.Errorf("Unexpected error occurred while parsing \"%s\" error=%s", testLine, err) - } - - lr := reflect.ValueOf(l) - - var loadTestValues = []struct { - n string - expected float64 - }{ - {"Load1", 0.04}, - {"Load5", 0.03}, - {"Load15", 0.05}, - } - - var loadProcTestValues = []struct { - n string - expected uint64 - }{ - {"RunningProcs", 1}, - {"TotalProcs", 84}, - {"LastPIDUsed", 1269}, - } - - for _, lt := range loadTestValues { - actual := reflect.Indirect(lr).FieldByName(lt.n).Float() - if actual != lt.expected { - t.Errorf("Load.%s: expected %f, actual %f", lt.n, lt.expected, actual) - } - } - - for _, lpt := range loadProcTestValues { - actual := reflect.Indirect(lr).FieldByName(lpt.n).Uint() - if actual != lpt.expected { - t.Errorf("Load.%s: expected %d, actual %d", lpt.n, lpt.expected, actual) - } - } -} - -func TestParseLoad(t *testing.T) { - const testFailLine = "0.04 0.03 0.05 1/84" - - _, err := parseLoad(testFailLine) - if err == nil { - t.Errorf("Expected error did not occur while parsing \"%s\", there aren't enough fields", testFailLine) - } -} diff --git a/procfs/meminfo.go b/procfs/meminfo.go deleted file mode 100644 index 5f763912..00000000 --- a/procfs/meminfo.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" - - "github.com/digitalocean/do-agent/log" -) - -const memoryPathSuffix = "meminfo" - -// Memory contains the data exposed by the /proc/meminfo pseudo-file -// system file in kb. -type Memory struct { - MemTotal float64 // total physical ram kb - MemFree float64 // unused physical ram kb - MemAvailable float64 - Buffers float64 // physical ram used for buffers kb - Cached float64 // physical ram used as cache - SwapCached float64 // swap size used as cache - Active float64 - Inactive float64 - ActiveAnon float64 - InactiveAnon float64 - ActiveFile float64 - InactiveFile float64 - Unevictable float64 - Mlocked float64 - SwapTotal float64 - SwapFree float64 - Dirty float64 - Writeback float64 - AnonPages float64 - Mapped float64 - Shmem float64 - Slab float64 - SReclaimable float64 - SUnreclaim float64 - KernelStack float64 - PageTables float64 - NFSUnstable float64 - Bounce float64 - WritebackTmp float64 - CommitLimit float64 - CommittedAS float64 - VmallocTotal float64 - VmallocUsed float64 - VmallocChunk float64 - HardwareCorrupted float64 - AnonHugePages float64 - CmaTotal float64 - CmaFree float64 - HugePagesTotal float64 - HugePagesFree float64 - HugePagesRsvd float64 - HugePagesSurp float64 - Hugepagesize float64 - DirectMap4k float64 - DirectMap2M float64 - DirectMap1G float64 -} - -type memoryLine struct { - field string - value float64 -} - -type meminfoFieldMap map[string]*float64 - -// Memoryer is a collection of memory metrics exposed by the -// procfs. -type Memoryer interface { - NewMemory() (Memory, error) -} - -// Path returns the relative procfs location. -func memoryPath() string { - return fmt.Sprintf("%s/%s", ProcPath, memoryPathSuffix) -} - -// NewMemory collects data from the /proc/meminfo system file and -// converts it into a Memory structure. -func NewMemory() (Memory, error) { - f, err := os.Open(memoryPath()) - if err != nil { - err = fmt.Errorf("Unable to collect memory metrics from %s - error: %s", memoryPath(), err) - return Memory{}, err - } - defer f.Close() - - return readMemory(f) -} - -func readMemory(f io.Reader) (Memory, error) { - scanner := bufio.NewScanner(f) - memory := Memory{} - memoryMap := getMeminfoFieldMap(&memory) - - for scanner.Scan() { - line := scanner.Text() - - ml, err := parseMemory(line) - if err != nil { - return memory, err - } - - if memItem, ok := memoryMap[ml.field]; ok { - *memItem = ml.value - } else { - log.Debugf("meminfo field not recognized: %s", ml.field) - } - } - return memory, scanner.Err() -} - -// parseMemory parses single line strings from /proc/meminfo and -// creates a memoryLine struct from them. An error is created if there -// is it fails to parse the line or convert the value into a float64. -func parseMemory(line string) (memoryLine, error) { - lineArray := strings.Fields(line) - if len(lineArray) < 2 { - err := fmt.Errorf("meminfo line contains less than two fields: %s", line) - return memoryLine{}, err - } - - ml := memoryLine{} - - ml.field = lineArray[0][0 : len(lineArray[0])-1] - - value, err := strconv.ParseFloat(lineArray[1], 64) - if err != nil { - err = fmt.Errorf("unable to convert meminfo value to float64: %s", line) - return memoryLine{}, err - } - ml.value = value - return ml, err -} - -func getMeminfoFieldMap(memory *Memory) meminfoFieldMap { - memoryMap := map[string]*float64{ - "MemTotal": &memory.MemTotal, - "MemFree": &memory.MemFree, - "MemAvailable": &memory.MemAvailable, - "Buffers": &memory.Buffers, - "Cached": &memory.Cached, - "SwapCached": &memory.SwapCached, - "Active": &memory.Active, - "Inactive": &memory.Inactive, - "Active(anon)": &memory.ActiveAnon, - "Inactive(anon)": &memory.InactiveAnon, - "Active(file)": &memory.ActiveFile, - "Inactive(file)": &memory.InactiveFile, - "Unevictable": &memory.Unevictable, - "Mlocked": &memory.Mlocked, - "SwapTotal": &memory.SwapTotal, - "SwapFree": &memory.SwapFree, - "Dirty": &memory.Dirty, - "Writeback": &memory.Writeback, - "AnonPages": &memory.AnonPages, - "Mapped": &memory.Mapped, - "Shmem": &memory.Shmem, - "Slab": &memory.Slab, - "SReclaimable": &memory.SReclaimable, - "SUnreclaim": &memory.SUnreclaim, - "KernelStack": &memory.KernelStack, - "PageTables": &memory.PageTables, - "NFS_Unstable": &memory.NFSUnstable, - "Bounce": &memory.Bounce, - "WritebackTmp": &memory.WritebackTmp, - "CommitLimit": &memory.CommitLimit, - "Committed_AS": &memory.CommittedAS, - "VmallocTotal": &memory.VmallocTotal, - "VmallocUsed": &memory.VmallocUsed, - "VmallocChunk": &memory.VmallocChunk, - "HardwareCorrupted": &memory.HardwareCorrupted, - "AnonHugePages": &memory.AnonHugePages, - "CmaTotal": &memory.CmaTotal, - "CmaFree": &memory.CmaFree, - "HugePages_Total": &memory.HugePagesTotal, - "HugePages_Free": &memory.HugePagesFree, - "HugePages_Rsvd": &memory.HugePagesRsvd, - "HugePages_Surp": &memory.HugePagesSurp, - "Hugepagesize": &memory.Hugepagesize, - "DirectMap4k": &memory.DirectMap4k, - "DirectMap2M": &memory.DirectMap2M, - "DirectMap1G": &memory.DirectMap1G, - } - return memoryMap -} diff --git a/procfs/meminfo_test.go b/procfs/meminfo_test.go deleted file mode 100644 index 765163ec..00000000 --- a/procfs/meminfo_test.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "reflect" - "strings" - "testing" -) - -const testMemoryValues = `MemTotal: 374256 kB -MemFree: 273784 kB -MemAvailable: 1 kB -Buffers: 10112 kB -Cached: 38816 kB -SwapCached: 0 kB -Active: 33920 kB -Inactive: 31516 kB -Active(anon): 16532 kB -Inactive(anon): 564 kB -Active(file): 17388 kB -Inactive(file): 30952 kB -Unevictable: 0 kB -Mlocked: 0 kB -SwapTotal: 786428 kB -SwapFree: 786428 kB -Dirty: 0 kB -Writeback: 0 kB -AnonPages: 16480 kB -Mapped: 6652 kB -Shmem: 592 kB -Slab: 19628 kB -SReclaimable: 9360 kB -SUnreclaim: 10268 kB -KernelStack: 696 kB -PageTables: 1824 kB -NFS_Unstable: 0 kB -Bounce: 0 kB -WritebackTmp: 0 kB -CommitLimit: 973556 kB -Committed_AS: 55892 kB -VmallocTotal: 34359738367 kB -VmallocUsed: 9136 kB -VmallocChunk: 34359725884 kB -HardwareCorrupted: 0 kB -AnonHugePages: 0 kB -CmaTotal: 2 kB -CmaFree: 3 kB -HugePages_Total: 0 -HugePages_Free: 0 -HugePages_Rsvd: 0 -HugePages_Surp: 0 -Hugepagesize: 2048 kB -DirectMap4k: 57280 kB -DirectMap2M: 335872 kB -` - -func TestNewMemory(t *testing.T) { - m, err := readMemory(strings.NewReader(testMemoryValues)) - if err != nil { - t.Errorf("Unable to read test values") - } - - // Spot checking - if m.MemTotal != 374256 { - t.Errorf("Expected values not found in Memory=%+v", m) - } - - if m.VmallocChunk != 34359725884 { - t.Errorf("Expected values not found in Memory=%+v", m) - } -} - -func TestParseMemoryValues(t *testing.T) { - m, err := readMemory(strings.NewReader(testMemoryValues)) - if err != nil { - t.Errorf("Unable to open test file %s", memoryPath()) - } - - mr := reflect.ValueOf(m) - - var memoryTestValues = []struct { - n string - expected float64 - }{ - {"MemTotal", 374256.0}, - {"MemFree", 273784.0}, - {"Buffers", 10112.0}, - {"Cached", 38816.0}, - {"SwapCached", 0.0}, - {"Active", 33920.0}, - {"Inactive", 31516.0}, - {"ActiveAnon", 16532.0}, - {"InactiveAnon", 564.0}, - {"ActiveFile", 17388.0}, - {"InactiveFile", 30952.0}, - {"Unevictable", 0.0}, - {"Mlocked", 0.0}, - {"SwapTotal", 786428.0}, - {"SwapFree", 786428.0}, - {"Dirty", 0.0}, - {"Writeback", 0.0}, - {"AnonPages", 16480.0}, - {"Mapped", 6652.0}, - {"Shmem", 592.0}, - {"Slab", 19628.0}, - {"SReclaimable", 9360.0}, - {"SUnreclaim", 10268.0}, - {"KernelStack", 696.0}, - {"PageTables", 1824.0}, - {"NFSUnstable", 0.0}, - {"Bounce", 0.0}, - {"WritebackTmp", 0.0}, - {"CommitLimit", 973556.0}, - {"CommittedAS", 55892.0}, - {"VmallocTotal", 34359738367.0}, - {"VmallocUsed", 9136.0}, - {"VmallocChunk", 34359725884.0}, - {"HardwareCorrupted", 0.0}, - {"AnonHugePages", 0.0}, - {"HugePagesTotal", 0.0}, - {"HugePagesFree", 0.0}, - {"HugePagesRsvd", 0.0}, - {"HugePagesSurp", 0.0}, - {"Hugepagesize", 2048.0}, - {"DirectMap4k", 57280.0}, - {"DirectMap2M", 335872.0}, - {"DirectMap1G", 0.0}, - {"MemAvailable", 1.0}, - {"CmaFree", 3.0}, - {"CmaTotal", 2.0}, - } - - for _, mt := range memoryTestValues { - actual := reflect.Indirect(mr).FieldByName(mt.n).Float() - if actual != mt.expected { - t.Errorf("Memory.%s: expected %f, actual %f", mt.n, mt.expected, actual) - } - } -} - -func TestParseMemory(t *testing.T) { - const testLine1 = "MemTotal: 374256 kB" - const testLine2 = "VmallocChunk: 34359725884 kB" - const testLine3 = "HugePages_Total: 0" - const testFailLine = "HugePages_Total:" - - ml, err := parseMemory(testLine1) - if ml.field != "MemTotal" || ml.value != 374256 || err != nil { - t.Errorf("Unexpected error parsing line=%s", testLine1) - } - - ml, err = parseMemory(testLine2) - if ml.field != "VmallocChunk" || ml.value != 34359725884 || err != nil { - t.Errorf("Unexpected error parsing line=%s", testLine2) - } - - ml, err = parseMemory(testLine3) - if ml.field != "HugePages_Total" || ml.value != 0 || err != nil { - t.Errorf("Unexpected error parsing line=%s", testLine3) - } - - _, err = parseMemory(testFailLine) - if err == nil { - t.Errorf("error should be present for line=%s", testFailLine) - } -} - -func TestMeminfoFieldMap(t *testing.T) { - memory := Memory{} - memoryMap := getMeminfoFieldMap(&memory) - - expectedPtr := memoryMap["MemTotal"] - if &memory.MemTotal != expectedPtr { - t.Errorf("pointers should be equal: actual=%p expected=%p", &memory.MemTotal, expectedPtr) - } - - expectedPtr = memoryMap["Bounce"] - if &memory.Bounce != expectedPtr { - t.Errorf("pointers should be equal: actual=%p expected=%p", &memory.Bounce, expectedPtr) - } -} diff --git a/procfs/mounts.go b/procfs/mounts.go deleted file mode 100644 index 08c07ffd..00000000 --- a/procfs/mounts.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strings" -) - -const mountPathSuffix = "mounts" - -// Mount contains the data exposed by the /proc/mounts pseudo-file -// system file. -type Mount struct { - Device string - MountPoint string - FSType string -} - -// Mounter is a collection of mount metrics exposed by the -// procfs. -type Mounter interface { - NewMount() ([]Mount, error) -} - -// mountPath returns the relative procfs location. -func mountPath() string { - return fmt.Sprintf("%s/%s", ProcPath, mountPathSuffix) -} - -// NewMount collects data from the /proc/mounts system file and -// converts it into a slice of Mounts. -func NewMount() ([]Mount, error) { - f, err := os.Open(mountPath()) - if err != nil { - err = fmt.Errorf("Unable to collect mount metrics from %s - error: %s", mountPath(), err) - return []Mount{}, err - } - defer f.Close() - - return readMount(f) -} - -func readMount(f io.Reader) ([]Mount, error) { - scanner := bufio.NewScanner(f) - - var mounts []Mount - - for scanner.Scan() { - line := scanner.Text() - - mount, err := parseMount(line) - if err != nil { - return []Mount{}, err - } - mounts = append(mounts, mount) - } - return mounts, nil -} - -// parseMount parses a string and returns a Mount if the string is in -// the expected format. -func parseMount(line string) (Mount, error) { - lineArray := strings.Fields(line) - - if len(lineArray) != 6 || len(lineArray) < 3 { - err := fmt.Errorf("Unsupported %s format: %s", mountPath(), line) - return Mount{}, err - } - - return Mount{ - Device: lineArray[0], - MountPoint: lineArray[1], - FSType: lineArray[2], - }, nil -} diff --git a/procfs/mounts_test.go b/procfs/mounts_test.go deleted file mode 100644 index 79d17ce6..00000000 --- a/procfs/mounts_test.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "strings" - "testing" -) - -const testMountValues = `rootfs / rootfs rw 0 0 -sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0 -proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 -udev /dev devtmpfs rw,relatime,size=178072k,nr_inodes=44518,mode=755 0 0 -devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 -tmpfs /run tmpfs rw,nosuid,relatime,size=74852k,mode=755 0 0 -/dev/mapper/precise64-root / ext4 rw,relatime,errors=remount-ro,user_xattr,barrier=1,data=ordered 0 0 -none /sys/fs/fuse/connections fusectl rw,relatime 0 0 -none /sys/kernel/debug debugfs rw,relatime 0 0 -none /sys/kernel/security securityfs rw,relatime 0 0 -none /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0 -none /run/shm tmpfs rw,nosuid,nodev,relatime 0 0 -/dev/sda1 /boot ext2 rw,relatime,errors=continue 0 0 -rpc_pipefs /run/rpc_pipefs rpc_pipefs rw,relatime 0 0 -none /vagrant vboxsf rw,nodev,relatime 0 0 -` - -func TestNewMount(t *testing.T) { - m, err := readMount(strings.NewReader(testMountValues)) - if err != nil { - t.Errorf("Unable to read test values") - } - - // Spot checking - if m[0].Device != "rootfs" { - t.Errorf("device not set properly: expected=%s actual=%s", "rootfs", m[0].Device) - } - - if m[1].MountPoint != "/sys" { - t.Errorf("mount point not set properly: expected=%s actual=%s", "/sys", m[1].MountPoint) - } - - if m[2].FSType != "proc" { - t.Errorf("file system type not set properly: expected=%s actual=%s", "proc", m[2].FSType) - } -} - -func TestParseMount(t *testing.T) { - const testLine = "/dev/yyy1 /boot ext2 rw,relatime,errors=continue 0 0" - - m, err := parseMount(testLine) - if err != nil { - t.Errorf("error should not be present for line=%s", testLine) - } - - if m.Device != "/dev/yyy1" { - t.Errorf("device not set properly: expected=%s actual=%s", "/dev/yyy1", m.Device) - } - - if m.MountPoint != "/boot" { - t.Errorf("mount point not set properly: expected=%s actual=%s", "/boot", m.MountPoint) - } - - if m.FSType != "ext2" { - t.Errorf("file system type not set properly: expected=%s actual=%s", "ext2", m.FSType) - } -} - -func TestParseMountFail(t *testing.T) { - const testLine = "/dev/zzz1 /boot" - - _, err := parseMount(testLine) - if err == nil { - t.Errorf("error should be present for line=%s", testLine) - } -} diff --git a/procfs/netdev.go b/procfs/netdev.go deleted file mode 100644 index 52e3d0da..00000000 --- a/procfs/netdev.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -const networkPathSuffix = "net/dev" - -// Network contains the data exposed by the /proc/net/dev psudo-file -// system file. -type Network struct { - Interface string - RXBytes uint64 - RXPackets uint64 - RXErrs uint64 - RXDrop uint64 - RXFifo uint64 - RXFrame uint64 - RXCompressed uint64 - RXMulticast uint64 - TXBytes uint64 - TXPackets uint64 - TXErrs uint64 - TXDrop uint64 - TXFifo uint64 - TXColls uint64 - TXCarrier uint64 - TXCompressed uint64 -} - -// Networker is a collection of network metrics exposed by the -// procfs. -type Networker interface { - NewNetwork() ([]Network, error) -} - -// networkPath returns the relative procfs location. -func networkPath() string { - return fmt.Sprintf("%s/%s", ProcPath, networkPathSuffix) -} - -// NewNetwork collects data from the /proc/net/dev pseudo-file system -// file and converts it into a Network struct. -func NewNetwork() ([]Network, error) { - f, err := os.Open(networkPath()) - if err != nil { - err = fmt.Errorf("Unable to collect network metrics from %s - error: %s", networkPath(), err) - return []Network{}, err - } - defer f.Close() - - return readNetwork(f) -} - -func readNetwork(f io.Reader) ([]Network, error) { - scanner := bufio.NewScanner(f) - - var networks []Network - - //Ignore the first two lines - scanner.Scan() - scanner.Scan() - for scanner.Scan() { - line := scanner.Text() - - network, err := parseNetwork(line) - if err != nil { - return []Network{}, err - } - networks = append(networks, network) - } - return networks, nil -} - -// parseNetwork parses a string and returns a Network if the string is -// in the expected format. -func parseNetwork(line string) (Network, error) { - fields := strings.FieldsFunc(line, func(c rune) bool { - cStr := string(c) - return cStr == " " || cStr == ":" - }) - - if len(fields) != 17 { - return Network{}, errors.New("Field mismatch error while parsing: " + networkPath()) - } - - network := Network{} - network.Interface = fields[0] - network.RXBytes, _ = strconv.ParseUint(fields[1], 10, 64) - network.RXPackets, _ = strconv.ParseUint(fields[2], 10, 64) - network.RXErrs, _ = strconv.ParseUint(fields[3], 10, 64) - network.RXDrop, _ = strconv.ParseUint(fields[4], 10, 64) - network.RXFifo, _ = strconv.ParseUint(fields[5], 10, 64) - network.RXFrame, _ = strconv.ParseUint(fields[6], 10, 64) - network.RXCompressed, _ = strconv.ParseUint(fields[7], 10, 64) - network.RXMulticast, _ = strconv.ParseUint(fields[8], 10, 64) - network.TXBytes, _ = strconv.ParseUint(fields[9], 10, 64) - network.TXPackets, _ = strconv.ParseUint(fields[10], 10, 64) - network.TXErrs, _ = strconv.ParseUint(fields[11], 10, 64) - network.TXDrop, _ = strconv.ParseUint(fields[12], 10, 64) - network.TXFifo, _ = strconv.ParseUint(fields[13], 10, 64) - network.TXColls, _ = strconv.ParseUint(fields[14], 10, 64) - network.TXCarrier, _ = strconv.ParseUint(fields[15], 10, 64) - network.TXCompressed, _ = strconv.ParseUint(fields[16], 10, 64) - - return network, nil -} diff --git a/procfs/netdev_test.go b/procfs/netdev_test.go deleted file mode 100644 index f443b2d1..00000000 --- a/procfs/netdev_test.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "reflect" - "strings" - "testing" -) - -const testNetworkValues = `Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed - lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - eth0: 258319 2330 0 0 0 0 0 0 186144 1875 0 0 0 0 0 0 -` - -const testNetworkValuesModSpace = `Inter-| Receive | Transmit -face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed -lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -eth0:2345774233 3330352 0 0 0 0 0 0 1345496555 2356444 0 0 0 2 0 0 -` - -func TestNewNetwork(t *testing.T) { - testCases := []struct { - label string - values string - wantLen int - }{ - {"baseline", testNetworkValues, 2}, - {"modSpacing", testNetworkValuesModSpace, 2}, - } - - for _, tc := range testCases { - t.Run(tc.label, func(t *testing.T) { - n, err := readNetwork(strings.NewReader(tc.values)) - if err != nil { - t.Fatal("unable to read test values") - } - - if tc.wantLen != len(n) { - t.Errorf("got %d; want %d", len(n), tc.wantLen) - } - }) - } -} - -func TestParseNetworkValues(t *testing.T) { - const line = " lo: 107 1 2 3 4 5 6 7 8 89 9 10 11 12 13 33" - const lineModSpace = "eth0:1247774233 1260352 0 0 0 0 0 0 1345496819 2356759 0 0 0 0 0 0" - - type testCase struct { - n string - expected uint64 - } - - testCases := []struct { - label string - line string - netInt string - values []testCase - }{ - { - "baseline", - line, - "lo", - []testCase{ - {"RXBytes", 107}, - {"RXPackets", 1}, - {"RXErrs", 2}, - {"RXDrop", 3}, - {"RXFifo", 4}, - {"RXFrame", 5}, - {"RXCompressed", 6}, - {"RXMulticast", 7}, - {"TXBytes", 8}, - {"TXPackets", 89}, - {"TXErrs", 9}, - {"TXDrop", 10}, - {"TXFifo", 11}, - {"TXColls", 12}, - {"TXCarrier", 13}, - {"TXCompressed", 33}, - }, - }, - { - "baseline", - lineModSpace, - "eth0", - []testCase{ - {"RXBytes", 1247774233}, - {"RXPackets", 1260352}, - {"RXErrs", 0}, - {"RXDrop", 0}, - {"RXFifo", 0}, - {"RXFrame", 0}, - {"RXCompressed", 0}, - {"RXMulticast", 0}, - {"TXBytes", 1345496819}, - {"TXPackets", 2356759}, - {"TXErrs", 0}, - {"TXDrop", 0}, - {"TXFifo", 0}, - {"TXColls", 0}, - {"TXCarrier", 0}, - {"TXCompressed", 0}, - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.label, func(t *testing.T) { - - net, err := parseNetwork(tc.line) - if err != nil { - t.Fatal("Unable to parse") - } - - netr := reflect.ValueOf(net) - - for _, nt := range tc.values { - actual := reflect.Indirect(netr).FieldByName(nt.n).Uint() - if actual != nt.expected { - t.Errorf("want %d; got %d", nt.expected, actual) - } - } - - if net.Interface != tc.netInt { - t.Errorf("want %s; got %s", tc.netInt, net.Interface) - } - }) - } -} - -func TestParseNetworkFail(t *testing.T) { - const testFailLine = " lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0" - - _, err := parseNetwork(testFailLine) - if err == nil { - t.Error("expected error did not occur") - } -} diff --git a/procfs/osrelease.go b/procfs/osrelease.go deleted file mode 100644 index 34506693..00000000 --- a/procfs/osrelease.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" -) - -const osReleaseSuffix = "sys/kernel/osrelease" - -// OSRelease contains the data exposed by the /proc/sys/kernel/osrelease psudo-file -// system file. -type OSRelease string - -// OSReleaser is a collection of os release metrics exposed by the -// procfs. -type OSReleaser interface { - NewOSRelease() (OSRelease, error) -} - -// Path returns the relative procfs location. -func osReleasePath() string { - return fmt.Sprintf("%s/%s", ProcPath, osReleaseSuffix) -} - -// NewOSRelease collects data from the /proc/sys/kernel/osrelease psudo-file system file -// and converts it into a OSRelease. -func NewOSRelease() (OSRelease, error) { - f, err := os.Open(osReleasePath()) - if err != nil { - err = fmt.Errorf("Unable to collect kernel version from %s - error: %s", osReleasePath(), err) - return "", err - } - defer f.Close() - - return readOSRelease(f) -} - -func readOSRelease(f io.Reader) (OSRelease, error) { - scanner := bufio.NewScanner(f) - - scanner.Scan() - release := scanner.Text() - return OSRelease(release), nil - -} diff --git a/procfs/proc.go b/procfs/proc.go deleted file mode 100644 index bb4f4aaf..00000000 --- a/procfs/proc.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "sync" - - "github.com/prometheus/procfs" -) - -// ProcProc contains the data exposed by various proc files in the -// pseudo-file system. -type ProcProc struct { - PID int - CPUUtilization float64 //value in % (0.0 ~ 1.0) - ResidentMemory int //value in bytes - VirtualMemory int //value in bytes - Comm string - CmdLine []string -} - -// Procer is a collection of process metrics exposed by the -// procfs. -type Procer interface { - NewProcProc() ([]ProcProc, error) -} - -var state struct { - lock sync.Mutex - cpuTally map[int]uint64 - totalCPUTime uint64 -} - -// NewProcProc collects data from various proc pseudo-file system files -// and converts it into a ProcProc structure. -func NewProcProc() ([]ProcProc, error) { - allProcs, err := procfs.AllProcs() - if err != nil { - return []ProcProc{}, err - } - - output := []ProcProc{} - newCPUTally := map[int]uint64{} - newTotalCPUTime, err := totalCPUTime() - - if err != nil { - return []ProcProc{}, err - } - - for _, proc := range allProcs { - cli, err := proc.CmdLine() - if err != nil || len(cli) == 0 { - continue - } - - comm, err := proc.Comm() - if err != nil { - continue - } - - stat, err := proc.NewStat() - if err != nil { - continue - } - - var utilization float64 - newProcCPUTime := uint64(stat.UTime + stat.STime) - newCPUTally[proc.PID] = newProcCPUTime - if _, exists := state.cpuTally[proc.PID]; exists { - utilization = float64(newProcCPUTime-state.cpuTally[proc.PID]) / float64(newTotalCPUTime-state.totalCPUTime) - } - - output = append(output, ProcProc{ - CmdLine: cli, - PID: proc.PID, - Comm: comm, - VirtualMemory: stat.VirtualMemory(), - ResidentMemory: stat.ResidentMemory(), - CPUUtilization: utilization, - }) - } - - state.lock.Lock() - defer state.lock.Unlock() - state.cpuTally = newCPUTally - state.totalCPUTime = newTotalCPUTime - - return output, nil -} - -func totalCPUTime() (uint64, error) { - stats, err := NewStat() - if err != nil { - return 0, err - } - - var aggregateCPU *CPU - for _, stat := range stats.CPUS { - if stat.CPU == "cpu" { - aggregateCPU = &stat - } - } - - if aggregateCPU == nil { - return 0, fmt.Errorf("could not find 'cpu' line in /proc/stat") - } - - return aggregateCPU.TotalTime(), nil -} diff --git a/procfs/stat.go b/procfs/stat.go deleted file mode 100644 index 039d81a1..00000000 --- a/procfs/stat.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -const statPathSuffix = "stat" - -// CPU contains the data exposed by the /proc/stat pseudo-file system -// file for cpus. -type CPU struct { - CPU string - User uint64 - Nice uint64 - System uint64 - Idle uint64 - Iowait uint64 // since Linux 2.5.41 - Irq uint64 // since Linux 2.6.0-test4 - Softirq uint64 // since Linux 2.6.0-test4 - Steal uint64 // since Linux 2.6.11 - Guest uint64 // since Linux 2.6.24 - GuestNice uint64 // since Linux 2.6.33 -} - -// statPath returns the relative procfs location. -func statPath() string { - return fmt.Sprintf("%s/%s", ProcPath, statPathSuffix) -} - -// TotalTime (in jiffies) executed by this CPU -func (c CPU) TotalTime() uint64 { - return c.User + c.Nice + c.System + c.Idle + c.Iowait + c.Irq + c.Softirq + c.Steal + c.Guest + c.GuestNice -} - -// Stat contains the data exposed by the /proc/stat pseudo-file system -// file. -type Stat struct { - CPUS []CPU - Interrupt uint64 - ContextSwitch uint64 - Processes uint64 - ProcessesRunning uint64 - ProcessesBlocked uint64 -} - -// Stater is a collection of CPU and scheduler metrics exposed by the -// procfs. -type Stater interface { - NewStat() (Stat, error) -} - -// NewStat collects data from the /proc/stat pseudo-file system file -// and converts it into a stat struct. -func NewStat() (Stat, error) { - f, err := os.Open(statPath()) - if err != nil { - err = fmt.Errorf("Unable to collect stat metrics from %s - error: %s", statPath(), err) - return Stat{}, err - } - defer f.Close() - - return readStat(f) -} - -func readStat(f io.Reader) (Stat, error) { - var stat Stat - - scanner := bufio.NewScanner(f) - - for scanner.Scan() { - line := scanner.Text() - - if strings.HasPrefix(line, "cpu") { - cpu, err := parseCPU(line) - if err != nil { - return stat, err - } - stat.CPUS = append(stat.CPUS, cpu) - } else { - err := parseStat(line, &stat) - if err != nil { - return stat, err - } - } - } - return stat, nil - -} - -// parseCPU parses a string and returns a CPU if the string is in the -// expected format. -func parseCPU(line string) (CPU, error) { - lineArray := strings.Fields(line) - - if len(lineArray) < 5 { - err := fmt.Errorf("Unsupported %s format: %s", statPath(), line) - return CPU{}, err - } - - for len(lineArray) < 11 { - lineArray = append(lineArray, "0") - } - - user, _ := strconv.ParseUint(lineArray[1], 10, 64) - nice, _ := strconv.ParseUint(lineArray[2], 10, 64) - system, _ := strconv.ParseUint(lineArray[3], 10, 64) - idle, _ := strconv.ParseUint(lineArray[4], 10, 64) - iowait, _ := strconv.ParseUint(lineArray[5], 10, 64) - irq, _ := strconv.ParseUint(lineArray[6], 10, 64) - softirq, _ := strconv.ParseUint(lineArray[7], 10, 64) - steal, _ := strconv.ParseUint(lineArray[8], 10, 64) - guest, _ := strconv.ParseUint(lineArray[9], 10, 64) - guestNice, _ := strconv.ParseUint(lineArray[10], 10, 64) - - metric := CPU{ - CPU: lineArray[0], - User: user, - Nice: nice, - System: system, - Idle: idle, - Iowait: iowait, - Irq: irq, - Softirq: softirq, - Steal: steal, - Guest: guest, - GuestNice: guestNice, - } - return metric, nil -} - -// parseStat parses a string and returns a Stat if the string is in -// the expected format. -func parseStat(line string, statMetric *Stat) error { - lineArray := strings.Fields(line) - if len(lineArray) < 2 { - err := fmt.Errorf("Invalid line format: \"%s\"", line) - return err - } - - switch lineArray[0] { - case "intr": - statMetric.Interrupt, _ = strconv.ParseUint(lineArray[1], 10, 64) - case "ctxt": - statMetric.ContextSwitch, _ = strconv.ParseUint(lineArray[1], 10, 64) - case "processes": - statMetric.Processes, _ = strconv.ParseUint(lineArray[1], 10, 64) - case "procs_running": - statMetric.ProcessesRunning, _ = strconv.ParseUint(lineArray[1], 10, 64) - case "procs_blocked": - statMetric.ProcessesBlocked, _ = strconv.ParseUint(lineArray[1], 10, 64) - } - //Default omitted due to unsupported fields. - return nil -} diff --git a/procfs/stat_test.go b/procfs/stat_test.go deleted file mode 100644 index 15fc6d36..00000000 --- a/procfs/stat_test.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "reflect" - "strings" - "testing" -) - -const testStatValues = `cpu 433 1 653 4451143 183 0 130 0 0 0 -cpu0 185 0 345 2224578 102 0 106 0 0 0 -cpu1 248 1 308 2226565 80 0 23 0 0 0 -intr 339569 44 9 0 0 0 0 0 0 0 0 0 0 133 0 20059 20881 0 0 0 15068 2271 6850 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ctxt 450500 -btime 1447251166 -processes 1693 -procs_running 1 -procs_blocked 0 -softirq 274487 0 86947 4456 14978 25431 0 3 83095 239 59338 -` - -func TestNewStat(t *testing.T) { - s, err := readStat(strings.NewReader(testStatValues)) - if err != nil { - t.Errorf("Unable to read test values") - t.Error(err) - } - - expectedLen := 3 - if len(s.CPUS) != expectedLen { - t.Errorf("Expected %d cpus items, actual was %d", expectedLen, len(s.CPUS)) - } - - expectedInterrupt := uint64(339569) - if s.Interrupt != expectedInterrupt { - t.Errorf("Expected %d cpus items, actual was %d", expectedInterrupt, s.Interrupt) - } -} - -func TestParseCPUValues(t *testing.T) { - const testLine = "cpu0 185 1 345 2224578 102 2 106 3 4 5" - - c, err := parseCPU(testLine) - if err != nil { - t.Errorf("Unexpected error occurred while parsing \"%s\" error=%s", testLine, err) - } - - cr := reflect.ValueOf(c) - - var cpuTestValues = []struct { - n string - expected uint64 - }{ - {"User", 185}, - {"Nice", 1}, - {"System", 345}, - {"Idle", 2224578}, - {"Iowait", 102}, - {"Irq", 2}, - {"Softirq", 106}, - {"Steal", 3}, - {"Guest", 4}, - {"GuestNice", 5}, - } - - for _, ct := range cpuTestValues { - actual := reflect.Indirect(cr).FieldByName(ct.n).Uint() - if actual != ct.expected { - t.Errorf("CPU.%s: expected %d, actual %d", ct.n, ct.expected, actual) - } - } - - expectedCPU := "cpu0" - if c.CPU != expectedCPU { - t.Errorf("CPU.CPU: expected %s, actual %s", expectedCPU, c.CPU) - } -} - -func TestParseCPUFail(t *testing.T) { - const testFailLine = "cpu1 248 1 308" - - _, err := parseCPU(testFailLine) - if err == nil { - t.Errorf("Expected error did not occur while parsing \"%s\", there aren't enough fields", testFailLine) - } -} - -func TestParseStat(t *testing.T) { - const testLine1 = "ctxt 450500" - const testLine2 = "procs_running 1" - const testFailLine = "btime" - - s := Stat{} - - err := parseStat(testLine1, &s) - if err != nil { - t.Errorf("Unexpected error occurred while parsing \"%s\" error=%s", testLine1, err) - } - - expectedCTXT := uint64(450500) - if s.ContextSwitch != expectedCTXT { - t.Errorf("Expected context switches %d, actual was %d", expectedCTXT, s.ContextSwitch) - } - - err = parseStat(testLine2, &s) - if err != nil { - t.Errorf("Unexpected error occurred while parsing \"%s\" error=%s", testLine2, err) - } - - expectedProcessesRunning := uint64(1) - if s.ProcessesRunning != expectedProcessesRunning { - t.Errorf("Expected running processes %d, actual was %d", expectedProcessesRunning, s.ProcessesRunning) - } - - err = parseStat(testFailLine, &s) - if err == nil { - t.Errorf("Expected error did not occur while parsing \"%s\", there aren't enough fields", testFailLine) - } -} diff --git a/scripts/deploy.sh b/scripts/deploy.sh new file mode 100755 index 00000000..67bac66d --- /dev/null +++ b/scripts/deploy.sh @@ -0,0 +1,498 @@ +#!/usr/bin/env bash +set -ueo pipefail +#set -x + +ME=$(basename "$0") +PROJECT_ROOT="$(git rev-parse --show-toplevel)" +GPG_PRIVATE_KEY="$PROJECT_ROOT/sonar-agent.key" +DOCKER_IMAGE="docker.io/digitalocean/do-agent" +VERSION=${VERSION:-$(cat target/VERSION || true)} + +CI_LOG_URL="" +if [ -n "${CI_BASE_URL:-}" ]; then + CI_LOG_URL="${CI_BASE_URL}/tab/build/detail/${GO_PIPELINE_NAME}/${GO_PIPELINE_COUNTER}/${GO_STAGE_NAME}/${GO_STAGE_COUNTER}/${GO_JOB_NAME}" +fi + +# display usage for this script +function usage() { + cat <<-EOF + NAME: + + $ME + + SYNOPSIS: + + $ME + + DESCRIPTION: + + The purpose of this script is to publish build artifacts + to Github, docker.io, and apt/yum repositories. + + ENVIRONMENT: + + VERSION (required) + The version to publish + + GITHUB_AUTH_USER, GITHUB_AUTH_TOKEN (required) + Github access credentials + + DOCKER_USER, DOCKER_PASSWORD (required) + Docker hub access credentials + + SPACES_ACCESS_KEY_ID, SPACES_SECRET_ACCESS_KEY (required) + DigitalOcean Spaces access credentials + + SLACK_WEBHOOK_URL (optional) + Webhook URL to send notifications. Enables Slack + notifications + + COMMANDS: + + github + push target/ assets to github + + docker + push docker builds to hub.docker.io + + spaces + publish artifacts to DigitalOcean Spaces + + all + deploy all builds + + EOF +} + +function main() { + cmd=${1:-} + + case "$cmd" in + github) + check_version + check_target_files + check_released + deploy_github + ;; + spaces) + check_version + check_target_files + check_released + deploy_spaces + ;; + docker) + check_version + check_target_files + check_released + deploy_docker + ;; + promote) + check_version + check_released + promote_github + promote_spaces + promote_docker + ;; + all) + check_version + check_target_files + check_released + deploy_spaces + deploy_github + deploy_docker + ;; + help|--help|-h) + usage + exit 0 + ;; + *) + abort "Unknown command '$cmd'. See $ME --help for help" + ;; + esac +} + +# verify the VERSION env var +function check_version() { + [[ "${VERSION:-}" =~ [0-9]+\.[0-9]+\.[0-9]+ ]] || \ + abort "VERSION is required and should be semver format (e.g. 1.2.34)" +} + +# if a release with the VERSION tag is already published then we cannot deploy +# this version over the previous release. +function check_released() { + if [[ "${FORCE_RELEASE:-}" =~ ^(y|yes|1|true)$ ]]; then + echo + echo "WARNING! forcing a release of $VERSION" + echo + return 0 + fi + + status=$(curl -LISsL "https://insights.nyc3.digitaloceanspaces.com/yum-beta/x86_64/do-agent.${VERSION}.amd64.rpm" \ + | grep 'HTTP/1.1') + + case "$status" in + *'HTTP/1.1 404 Not Found'*) + return 0 + ;; + *'HTTP/1.1 200 OK'*) + abort "'$VERSION' has already been released. Add a new git tag or use pass FORCE_RELEASE=1." + ;; + *) + abort "Failed to check if a stable version already exists. Try again? got -> $status" + ;; + esac +} + +function verify_gpg_key() { + stat "$GPG_PRIVATE_KEY" > /dev/null || abort "$GPG_PRIVATE_KEY is required" +} + +function deploy_spaces() { + pull_spaces + + anounce "Copying apt and yum packages" + + target_files | grep -P '\.deb$' | while IFS= read -r file; do + cp -Luv "$file" repos/apt/pool/beta/main/d/do-agent/ + done + + target_files | grep -P '\.rpm$' | while IFS= read -r file; do + dest=repos/yum-beta/x86_64/ + [[ "$file" =~ "i386" ]] && \ + dest=repos/yum-beta/i386/ + cp -Luv "$file" "$dest" + done + + rebuild_apt_packages + rebuild_yum_packages + + push_spaces +} + +function rebuild_apt_packages() { + verify_gpg_key + anounce "Rebuilding apt package indexes" + docker run \ + --rm -i \ + --net=host \ + -v "${PROJECT_ROOT}/repos/apt:/work/apt" \ + -v "${PROJECT_ROOT}/sonar-agent.key:/work/sonar-agent.key:ro" \ + -w /work \ + "docker.internal.digitalocean.com/eng-insights/agent-packager-apt" \ + || abort "Failed to rebuild apt package indexes" +} + +function rebuild_yum_packages() { + verify_gpg_key + anounce "Rebuilding yum package indexes" + docker run \ + --rm -i \ + --net=host \ + -v "${PROJECT_ROOT}/repos/yum:/work/yum" \ + -v "${PROJECT_ROOT}/repos/yum-beta:/work/yum-beta" \ + -v "${PROJECT_ROOT}/sonar-agent.key:/work/sonar-agent.key:ro" \ + -w /work \ + "docker.internal.digitalocean.com/eng-insights/agent-packager-yum" \ + || abort "Failed to rebuild yum package indexes" +} + +function pull_spaces() { + anounce "Pulling remote packages" + aws s3 \ + --endpoint-url https://nyc3.digitaloceanspaces.com \ + sync \ + s3://insights/ \ + ./repos/ \ + --quiet \ + --delete \ + --acl public-read +} + +function push_spaces() { + anounce "Pushing package changes" + aws s3 \ + --endpoint-url https://nyc3.digitaloceanspaces.com \ + sync \ + ./repos/ \ + s3://insights/ \ + --acl public-read +} + +# interact with the awscli via docker +function aws() { + docker run \ + --rm -i \ + -e "AWS_ACCESS_KEY_ID=${SPACES_ACCESS_KEY_ID}" \ + -e "AWS_SECRET_ACCESS_KEY=${SPACES_SECRET_ACCESS_KEY}" \ + -e "AWS_DEFAULT_REGION=nyc3" \ + -v "$PROJECT_ROOT:/project" \ + -w /project \ + -u "$(id -u)" \ + mesosphere/aws-cli \ + "$@" +} + + +# deploy the compiled binaries and packages to github releases +function deploy_github() { + if ! create_github_release ; then + abort "Github deploy failed" + fi + + upload_url=$(github_asset_upload_url) + + for file in $(target_files); do + name=$(basename "$file") + + echo "Uploading $name to github" + github_curl \ + -X "POST" \ + -H "Content-Type: $(content_type_for "$file")" \ + --data-binary "@${file}" \ + "$upload_url?name=$name" \ + | jq -r '. | "Success: \(.name)"' & + done + wait +} + +function promote_spaces() { + pull_spaces + + anounce "Copying deb and rpm packages to main channels" + cp -Luv "$PROJECT_ROOT/repos/apt/pool/beta/main/d/do-agent/do-agent_${VERSION}_.deb" "$PROJECT_ROOT/repos/apt/pool/main/main/d/do-agent/" + cp -Luv "$PROJECT_ROOT/repos/yum-beta/i386/do-agent.${VERSION}.i386.rpm" "$PROJECT_ROOT/repos/yum/i386/" + cp -Luv "$PROJECT_ROOT/repos/yum-beta/x86_64/do-agent.${VERSION}.amd64.rpm" "$PROJECT_ROOT/repos/yum/x86_64/" + + rebuild_apt_packages + rebuild_yum_packages + + push_spaces +} + +function promote_github() { + anounce "Removing prerelease flag from github release" + + github_curl \ + -D /dev/stderr \ + -X PATCH \ + --data-binary '{"prerelease":false}' \ + "$(github_release_url)" +} + +function promote_docker() { + anounce "Tagging docker $VERSION-rc as $VERSION" + + docker_login + local rc="$DOCKER_IMAGE:$VERSION-rc" + IFS=. read -r major minor _ <<<"$VERSION" + + docker pull "$rc" + + tags="latest $major $major.$minor $VERSION" + for tag in $tags; do + docker tag "$rc" "$DOCKER_IMAGE:$tag" + docker push "$DOCKER_IMAGE:$tag" + done +} + +# print the content type header for the provided file +function content_type_for() { + file=${1:-} + [ -z "$file" ] && abort "Usage: ${FUNCNAME[0]} " + case $file in + *.deb) echo "application/vnd.debian.binary-package" ;; + *.rpm) echo "application/x-rpm" ;; + *.tar.gz) echo "application/gzip" ;; + *) echo "application/octet-stream" + esac +} + +# get the asset upload URL for VERSION +function github_asset_upload_url() { + if base=$(github_release_url); then + echo "${base/api/uploads}/assets" + else + return 1 + fi +} + +# get the base release url for VERSION +function github_release_url() { + github_curl \ + "https://api.github.com/repos/digitalocean/do-agent/releases/tags/$VERSION" \ + | jq -r '. | "https://api.github.com/repos/digitalocean/do-agent/releases/\(.id)"' \ + | grep . +} + + +function rm_old_assets() { + assets=$(github_curl \ + "https://api.github.com/repos/digitalocean/do-agent/releases/tags/$VERSION" \ + | jq -r '.assets[].url') + for asset in $assets; do + echo "Removing old asset $asset" + github_curl \ + -X DELETE \ + "$asset" & + wait + done +} + +# create a github release for VERSION +function create_github_release() { + if github_release_url; then + echo "Github release exists $VERSION" + # we cannot upload the same asset twice so we have to delete + # the old assets before we can commense with uploads + rm_old_assets || abort "failed to purge Github release assets" + return 0 + fi + + echo "Creating Github release $VERSION" + + data=$(cat <<-EOF + { "tag_name": "$VERSION", "prerelease": true, "target_commitish": "beta" } + EOF + ) + echo "$data" + github_curl \ + -o /dev/null \ + -X POST \ + -H 'Content-Type: application/json' \ + -d "$data" \ + https://api.github.com/repos/digitalocean/do-agent/releases +} + +function docker_login() { + # gocd has an old version of docker that does not have --pasword-stdin + docker login -u "$DOCKER_USER" -p "$DOCKER_PASSWORD" +} + +# build and push the RC docker hub image. This image is considered unstable +# and should only be used for testing purposes +function deploy_docker() { + anounce "Pushing docker images" + docker_login + + docker build . -t "$DOCKER_IMAGE:unstable" + tags="${VERSION}-rc" + + for tag in $tags; do + docker tag "$DOCKER_IMAGE:unstable" "$DOCKER_IMAGE:$tag" + done + + for tag in $tags unstable; do + docker push "$DOCKER_IMAGE:$tag" + done +} + +# list the artifacts within the target/ directory +function target_files() { + find target/pkg -type f -iname "do-agent[._]${VERSION}[._]*" | grep . +} + +function check_target_files() { + target_files || abort "No packages for $VERSION were found in target/. Did you forget to run make?" +} + +# call CURL with github authentication +function github_curl() { + # if user and token are empty then bash will exit because of unbound vars + curl -SsL \ + --fail \ + -u "${GITHUB_AUTH_USER}:${GITHUB_AUTH_TOKEN}" \ + "$@" +} + +# abort with an error message +function abort() { + read -r line func file <<< "$(caller 0)" + echo "ERROR in $file:$func:$line: $1" > /dev/stderr + exit 1 +} + +# print something to STDOUT with formatting +# Usage: anounce "Some message" +# +# Examples: +# anounce "Begin execution of something" +# anounce "All is well" +# +function anounce() { + msg=${1:-} + [ -z "$msg" ] && abort "Usage: ${FUNCNAME[0]} " + echo ":::::::::::::::::::::::::::::::::::::::::::::::::: $msg ::::::::::::::::::::::::::::::::::::::::::::::::::" > /dev/stderr +} + + +# send a slack notification or fallback to STDERR +# Usage: notify [link] +# +# Examples: +# notify 0 "Deployed to Github failed!" +# notify "true" "Success!" "https://github.com/" +# +function notify() { + success=${1:-} msg=${2:-} link=${3:-} + + [ -z "${SLACK_WEBHOOK_URL:-}" ] && return 0 + + color="green" + [[ "$success" =~ ^(false|0|no)$ ]] && color="red" + + payload=$(cat <<-EOF + { + "attachments": [ + { + "fallback": "${msg}", + "color": "${color}", + "title": "${msg}", + "title_link": "${link}", + "text": "${msg}", + "fields": [ + { + "title": "App", + "value": "do-agent", + "short": true + }, + { + "title": "Version", + "value": "${VERSION}", + "short": true + }, + { + "title": "User", + "value": "${USER}", + "short": true + }, + { + "title": "Source", + "value": "$(hostname -s)", + "short": true + } + ] + } + ] + } + EOF + ) + + curl -sS -X POST \ + --fail \ + --data-binary "$payload" \ + "${SLACK_WEBHOOK_URL}" > /dev/null + + # always pass to prevent pipefailures + return 0 +} + +function notify_exit() { + if [ "$1" != "0" ]; then + notify 0 "Deploy failed" "${CI_LOG_URL:-}" + else + notify 1 "Deploy succeeded" + fi +} +trap 'notify_exit $?' ERR EXIT INT TERM + +main "$@" diff --git a/scripts/install.sh b/scripts/install.sh new file mode 100755 index 00000000..bdcbc261 --- /dev/null +++ b/scripts/install.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +set -ueo pipefail + +# REPO="do-agent" +# TODO use metadata api to determine beta +REPO="do-agent-beta" +dist="" + +function main() { + check_dist + kind=$( { [[ "$dist" =~ debian|ubuntu ]] && echo "deb"; } \ + || echo "rpm") + + require_package curl + + curl -s "https://packagecloud.io/install/repositories/digitalocean-insights/${REPO}/script.$kind.sh" \ + | sudo bash + + install_package do-agent +} + +function install_package() { + [ -z "${1:-}" ] && \ + abort "Usage: ${FUNCNAME[0]} " + + case "$dist" in + debian|ubuntu) + apt-get install -q -y "$1" + ;; + centos|fedora) + yum -q -y install "$1" + ;; + *) + not_supported + ;; + esac +} + +function update_package_info() { + case "$dist" in + debian|ubuntu) + apt-get update + ;; + centos|fedora) + return + ;; + *) + not_supported + ;; + esac +} + + +function check_dist() { + if [ -f /etc/os-release ]; then + dist=$(awk -F= '$1 == "ID" {gsub("\"", ""); print$2}' /etc/os-release) + elif [ -f /etc/redhat-release ]; then + dist=$(awk '{print tolower($1)}' /etc/redhat-release) + else + not_supported + fi +} + +function not_supported() { + abort "unsupported distribution. If you feel this is an error contact support@digitalocean.com" +} + +function require_package() { + [ -z "${1:-}" ] && abort "Usage: ${FUNCNAME[0]} " + + pkg="$1" + + if ! command -v "$pkg" 2&> /dev/null; then + update_package_info + install_package "$pkg" + fi +} + +function abort() { + read -r line func file <<< "$(caller 0)" + echo "ERROR in $file.$func:$line: $1" > /dev/stderr + exit 1 +} + +# leave this last to prevent any partial executions +main diff --git a/scripts/licensecheck.sh b/scripts/licensecheck.sh deleted file mode 100755 index 3f7621cc..00000000 --- a/scripts/licensecheck.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -# Verify that the correct license block is present in all Go source -# files. -read -r -d '' EXPECTED < +# Purpose: Provide simple UAT tasks for creating/updating/deleting droplets +# configured with do-agent. To add another method to this script simply +# create a new function called 'function command_'. It will automatically +# get picked up as a new command. + +set -ue + +# team context in the URL of the browser +CONTEXT=14661f +OS=$(uname | tr '[:upper:]' '[:lower:]') +TAG=do-agent-uat-${USER} +SUPPORTED_IMAGES="centos-6-x32 centos-6-x64 centos-7-x64 debian-8-x32 debian-8-x64 \ + debian-9-x64 fedora-27-x64 fedora-28-x64 ubuntu-14-04-x32 ubuntu-14-04-x64 \ + ubuntu-16-04-x32 ubuntu-16-04-x64 ubuntu-18-04-x64 ubuntu-18-10-x64" + +JONES_SSH_FINGERPRINT="a1:bc:00:38:56:1f:d2:b1:8e:0d:4f:9c:f0:dd:66:6d" +THOR_SSH_FINGERPRINT="c6:c6:01:e8:71:0a:58:02:2c:b3:e5:95:0e:b1:46:06" +EVAN_SSH_FINGERPRINT="b9:40:22:bd:fb:d8:fa:fa:4e:11:d9:8e:58:e9:41:73" +SNYDER_SSH_FINGERPRINT="47:31:9b:8b:87:a7:2d:26:79:17:87:83:53:65:d4:b4" + +# disabling literal '\n' error in shellcheck since that is the expected +# behavior because it will be added to the JSON request body and +# executed/expanded on the server +# shellcheck disable=SC1117 +USER_DATA_DEB="#!/usr/bin/env bash \n\ +[ -z \`command -v curl\` ] && apt-get -qq update && apt-get install -q -y curl \n\ +curl -sL https://insights.nyc3.cdn.digitaloceanspaces.com/do-agent-install.sh | sudo bash" + +# shellcheck disable=SC1117 +USER_DATA_RPM="#!/usr/bin/env bash \n\ +[ -z \`command -v curl\` ] && yum -y install curl \n\ +curl -sL https://insights.nyc3.cdn.digitaloceanspaces.com/do-agent-install.sh | sudo bash" + + +function main() { + cmd=${1:-} + [ -z "$cmd" ] && usage && exit 0 + shift + fn=command_$cmd + # disable requirement to quote 'fn' which would break this code + # shellcheck disable=SC2086 + if [ "$(type -t ${fn})" = function ]; then + ${fn} "$@" + else + usage + exit 1 + fi +} + +# show the help text +function command_help() { + usage +} + +# show script usage. This parses every function named command_ from this script +# and displays it as a possible command +function usage() { + echo + echo "Usage: $0 [command]" + echo + echo "Possible commands: " + grep -P '^function command_' "$0" \ + | sed 's,function command_,,g' \ + | sed 's,() {,,g' \ + | sort \ + | xargs -n1 -I{} echo " {}" + echo +} + +# delete all droplets tagged with $TAG +function command_delete() { + confirm "Are you sure you want to delete all droplets with the tag ${TAG}?" \ + || (echo "Aborted" && return 1) + + echo "Deleting..." + request DELETE "/droplets?tag_name=$TAG" \ + | jq . +} + +# list all droplet IP addresses tagged with $TAG +function command_list_ips() { + list_ips +} + +# list all droplet IDs tagged with $TAG +function command_list_ids() { + list_ids +} + +# list all droplets with all of their formatted metadata +function command_list() { + list | jq . +} + +# open the browser to show the list of droplets associated with $TAG +function command_browse() { + launch "https://cloud.digitalocean.com/tags/$TAG?i=${CONTEXT}" +} + +# graphs all droplets in the browser +function command_graphs() { + urls=$(command_list_ids | xargs -n1 -I{} echo https://cloud.digitalocean.com/droplets/{}/graphs?i=${CONTEXT} | tee /dev/stderr) + if confirm "Open these urls?"; then + for u in $urls; do + launch "$u" + done + else + echo "Aborting" + fi +} + +# create a droplet for every SUPPORTED_IMAGE and automatically install do-agent +# using either apt or yum +function command_create() { + for i in $SUPPORTED_IMAGES; do + create_image "$i" & + done + wait + + if confirm "Open the tag list page?"; then + launch "https://cloud.digitalocean.com/tags/$TAG?i=${CONTEXT}" + fi +} + +# ssh to all droplets and run status do-agent to verify +# that it is indeed running +function command_status() { + command_exec "if command -v systemctl 2&>/dev/null; then \ + systemctl is-active do-agent; \ + else \ + initctl status do-agent; \ + fi" +} + +# ssh to all droplets and run yum/apt update to upgrade to the latest published +# version of do-agent +function command_update() { + command_exec "/bin/bash /opt/digitalocean/do-agent/scripts/update.sh" +} + +# ssh to all droplets and execute a command +function command_exec() { + [ -z "$*" ] && abort "Usage: $0 exec " + exec_ips "$(list_ips)" "$*" +} + +# ssh to all debian-based droplets (ubuntu/debian) and execute a command +function command_exec_deb() { + exec_deb "$*" +} + +# ssh to all rpm-based droplets (centos/fedora) and execute a command +function command_exec_rpm() { + exec_rpm "$*" +} + +# list droplet IP addresses for deb based distros +function command_list_ips_deb() { + list_ips_deb +} + +# list droplet IP addresses for rpm based distros +function command_list_ips_rpm() { + list_ips_rpm +} + +# execute a command against a list of IP addresses +# Usage: exec_ips +# Example: exec_ips "$(list_ips_rpm)" "yum update DO agent" +function exec_ips() { + { [ -z "${1:-}" ] || [ -z "${2:-}" ]; } \ + && abort "Usage: ${FUNCNAME[0]} " + + ips=$1 + shift + script="hostname -s; { $*; }" + echo "Dispatching..." + for ip in $ips; do + # shellcheck disable=SC2029 + echo "$(echo + echo -n "$(tput setaf 2)>>>> $ip: $(tput sgr 0)" + ssh -o "StrictHostKeyChecking no" \ + -o "LogLevel=ERROR" \ + "root@${ip}" "${script}" 2>/dev/stdout || true + )" & + done + wait +} + +# ssh to each droplet one after another. After each connection you will be +# connected to the next in the list unless you press CTRL-C +function command_ssh() { + for ip in $(list_ips); do + echo -n ">>>> $ip: " + # shellcheck disable=SC2029 + ssh -o "StrictHostKeyChecking no" "root@${ip}" + sleep 0.2 + done +} + +# show version information about remote installed versions +function command_versions() { + exec_deb 'apt-cache policy do-agent | head -n3' + exec_rpm 'yum --cacheonly list do-agent' +} + +function command_create_status() { + list \ + | jq -r '.droplets[] | "\(.id) [\(.name)] \(.status)"' \ + | GREP_COLOR='1;31' grep -P --color=yes 'new|$' \ + | GREP_COLOR='1;32' grep -P --color=yes 'active|$' +} + +# scp a file to every host +function command_scp() { + src=${1:-}; dest=${2:-} + if [ -z "$src" ] || [ -z "$dest" ]; then + abort "Usage: $0 scp " + fi + + for ip in $(list_ips); do + # shellcheck disable=SC2029 + scp "$src" root@"${ip}":"$dest" & + done + wait +} + + +# ssh to all debian-based droplets (ubuntu/debian) and execute a command +function exec_deb() { + [ -z "$*" ] && abort "Usage: $0 exec_deb " + exec_ips "$(list_ips_deb)" "$*" +} + +# ssh to all rpm-based droplets (centos/fedora) and execute a command +function exec_rpm() { + [ -z "$*" ] && abort "Usage: $0 exec_rpm " + exec_ips "$(list_ips_rpm)" "$*" +} + +# list all droplets without formatting +function list() { + request GET "/droplets?tag_name=$TAG" +} + +function list_ips() { + list | jq -r '.droplets[].networks.v4[] | select(.type=="public") | .ip_address' +} + +function list_ids() { + list | jq -r '.droplets[].id' +} + +function list_ips_deb() { + list | \ + jq -r '.droplets[] + | select( + .image.distribution=="Debian" + or + .image.distribution=="Ubuntu" + ) + | .networks.v4[] + | select(.type=="public") + | .ip_address' +} + +function list_ips_rpm() { + list | \ + jq -r '.droplets[] + | select( + .image.distribution=="CentOS" + or + .image.distribution=="Fedora" + ) + | .networks.v4[] + | select(.type=="public") + | .ip_address' +} + +# create a droplet with the provided image +function create_image() { + image=$1 + if [ -z "$image" ]; then + abort "Usage: ${FUNCNAME[0]} " + else + echo "Creating image $image..." + fi + + user_data=${USER_DATA_RPM} + [[ "$image" =~ debian|ubuntu ]] && user_data=${USER_DATA_DEB} + + body=$(mktemp) + cat < "$body" + { + "name": "$image", + "region": "nyc3", + "size": "s-1vcpu-1gb", + "image": "$image", + "ssh_keys": [ + "${JONES_SSH_FINGERPRINT}", + "${THOR_SSH_FINGERPRINT}", + "${EVAN_SSH_FINGERPRINT}", + "${SNYDER_SSH_FINGERPRINT}" + ], + "backups": false, + "ipv6": false, + "user_data": "${user_data}", + "tags": [ "${TAG}" ] + } +EOF + + request POST "/droplets" "@${body}" \ + | jq -r '.droplet | "Created: \(.id): \(.name)"' + +} + + +# Make an HTTP request to the API. The DATA param is optional +# +# Usage: request [METHOD] [PATH] [DATA] +# +# Examples: +# request "GET" "/droplets" +# request "POST" "/droplets" "@some-file.json" +# request "POST" "/droplets" '{"some": "data"}' +# request "DELETE" "/droplets/1234567" +function request() { + [ -z "${AUTH_TOKEN:-}" ] && abort "AUTH_TOKEN is not set" + + METHOD=${1:-} + URL=${2:-} + DATA=${3:-} + + [ -z "$METHOD" ] && abort "Usage: ${FUNCNAME[0]} [METHOD] [PATH] [DATA]" + + if [[ ! "$URL" =~ ^/ ]] || [[ "$URL" =~ /v2 ]]; then + abort "URL param should be a relative path not including v2 (e.g. /droplets). Got '$URL'" + fi + + + curl -SsL \ + -X "$METHOD" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer ${AUTH_TOKEN}" \ + -d "$DATA" \ + "https://api.digitalocean.com/v2$URL" +} + +# ask the user for input +function ask() { + question=${1:-} + [ -z "$question" ] && abort "Usage: ${FUNCNAME[0]} " + read -p "$question " -n 1 -r + echo -n "$REPLY" +} + +# ask the user for a yes or no answer. Returns 0 for yes or 1 for no. +function confirm() { + question="$1 (y/n)" + yn=$(ask "$question") + echo + [[ $yn =~ ^[Yy]$ ]] && return 0 + return 1 +} + +# launch a uri with the system's default application (browser) +function launch() { + uri=${1:-} + [ -z "$uri" ] && abort "Usage: ${FUNCNAME[0]} " + + if [[ "$OS" =~ linux ]]; then + xdg-open "$uri" + else + open "$uri" + fi +} + +# abort with an error message +function abort() { + read -r line func file <<< "$(caller 0)" + echo "ERROR in $file:$func:$line: $1" > /dev/stderr + exit 1 +} + +# never put anything below this line. This is to prevent any partial execution +# if curl ever interrupts the download prematurely. In that case, this script +# will not execute since this is the last line in the script. +err_report() { echo "Error on line $1"; } +trap 'err_report $LINENO' ERR +main "$@" diff --git a/scripts/update.sh b/scripts/update.sh new file mode 100755 index 00000000..a9286f3e --- /dev/null +++ b/scripts/update.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# vim: noexpandtab + +main() { + if command -v apt-get 2&>/dev/null; then + apt-get update -qq + apt-get install -q -y --only-upgrade do-agent + elif command -v yum 2&>/dev/null; then + yum -q -y update do-agent + fi +} + +main diff --git a/update/errors.go b/update/errors.go deleted file mode 100644 index 9df9c031..00000000 --- a/update/errors.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package update - -import ( - "errors" - "fmt" -) - -var ( - // ErrRootKeyParseFailed invalid root key format error - ErrRootKeyParseFailed = errors.New("invalid root key format") - - // ErrReadFileFailed read file error - ErrReadFileFailed = errors.New("unable to read file") - - // ErrWriteFileFailed write file error - ErrWriteFileFailed = errors.New("unable to write to file") - - // ErrExecuteBinary inability to execute binary error - ErrExecuteBinary = errors.New("unable to execute binary") - - // ErrBackupBinary binary backup error - ErrBackupBinary = errors.New("unable to backup binary") - - // ErrInvalidVersionFormat version parseing error - ErrInvalidVersionFormat = errors.New("unable to parse version") - - // ErrUnableToRetrieveTargets target list retrieval failure - ErrUnableToRetrieveTargets = errors.New("unable to retrieve target file list") - - // ErrUpdateNotAvailable could not find any updates - ErrUpdateNotAvailable = errors.New("no updates available") -) - -// ErrUnableToCreateLocalStore creating repository error -type ErrUnableToCreateLocalStore struct { - Path string -} - -func (e ErrUnableToCreateLocalStore) Error() string { - return fmt.Sprintf("Update: Unable to create repository file: %s", e.Path) -} - -// ErrUnableToQueryRemoteStore querying repository error -type ErrUnableToQueryRemoteStore struct { - StoreURL string -} - -func (e ErrUnableToQueryRemoteStore) Error() string { - return fmt.Sprintf("Update: Unable to query repository file: %s", e.StoreURL) -} - -// ErrUnableToCreateTempfile creating temp file error -type ErrUnableToCreateTempfile struct { - Path string -} - -func (e ErrUnableToCreateTempfile) Error() string { - return fmt.Sprintf("Update: Unable to create temp file: %s", e.Path) -} - -// ErrUnableToInitializeRepo initializing repository error -type ErrUnableToInitializeRepo struct { - Reason string -} - -func (e ErrUnableToInitializeRepo) Error() string { - return fmt.Sprintf("Update: Error initializing repository: %s", e.Reason) -} - -//ErrUnableToUpdateRepo updating repository error -type ErrUnableToUpdateRepo struct { - Reason string -} - -func (e ErrUnableToUpdateRepo) Error() string { - return fmt.Sprintf("Update: Error updating repository: %s", e.Reason) -} - -// ErrDownloadingTarget downloading target error -type ErrDownloadingTarget struct { - Reason string -} - -func (e ErrDownloadingTarget) Error() string { - return fmt.Sprintf("Update: Error downloading target: %s", e.Reason) -} - -// ErrUnableToDetermineRunningProcess determining running process error -type ErrUnableToDetermineRunningProcess struct { - Reason string -} - -func (e ErrUnableToDetermineRunningProcess) Error() string { - return fmt.Sprintf("Update: Error determining running process: %s", e.Reason) -} diff --git a/update/files.go b/update/files.go deleted file mode 100644 index 8f13d427..00000000 --- a/update/files.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package update - -import ( - "io" - "io/ioutil" - "os" -) - -// Destination is a required interface for the tuf client download function -type Destination interface { - io.Writer - Delete() error - Name() string -} - -type tempFile struct { - *os.File -} - -// NewTempFile creates a temporary file that implements the -// Deestination interface required by go-tuf. It is a transient file -// that will be used as a temporary buffer for tuf targets. -func NewTempFile(path, prefix string) (Destination, error) { - file, err := ioutil.TempFile(path, prefix) - if err != nil { - return nil, err - } - return &tempFile{file}, nil -} - -func (t tempFile) Delete() error { - t.File.Close() - return os.Remove(t.Name()) -} - -func (t tempFile) Close() error { - return t.Delete() -} diff --git a/update/update.go b/update/update.go deleted file mode 100644 index 75ee6c5b..00000000 --- a/update/update.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package update - -import ( - "errors" - "fmt" - "os" - "runtime" - "strings" - - "github.com/digitalocean/do-agent/config" - - "github.com/flynn/go-tuf/client" - tufdata "github.com/flynn/go-tuf/data" -) - -const ( - rootKey = `[{"keytype":"ed25519","keyval":{"public":"288a52b43e69e1c1a1a5552707aa50900e59cfead3870d2d17f9a49a48d9407c"}}]` - - // RepoURL is the remote TUF repository URL - RepoURL = "https://repos.sonar.digitalocean.com/tuf" - - // RepoLocalStore is the local repository path - RepoLocalStore = "/var/opt/digitalocean/do-agent" - - backupExt = ".bak" -) - -// Updater is an interface that checks for updates of the running -// binary and exec's the new binary once updated. A boolean value is -// provided as a flag to force updates from development versions to -// whatever production version is available in the repositories. -type Updater interface { - FetchLatestAndExec(bool) error - FetchLatest(bool) error -} - -// Update manages the communication with the local repository, the -// remote tuf repository and the file manipulations that happen. -type update struct { - localStorePath string - repositoryURL string - client *client.Client -} - -// NewUpdate returns an Update object which has the components for a tuf client. -func NewUpdate(localStorePath, repositoryURL string) Updater { - return &update{ - localStorePath: localStorePath, - repositoryURL: repositoryURL, - } -} - -func (u *update) createTufClient() (*client.Client, error) { - if u.client != nil { - return u.client, nil - } - - localStoreFile := fmt.Sprintf("%s%s", u.localStorePath, "/tufLocalStore") - ls, err := client.FileLocalStore(localStoreFile) - if err != nil { - return nil, ErrUnableToCreateLocalStore{Path: localStoreFile} - } - - rs, err := client.HTTPRemoteStore(u.repositoryURL, nil) - if err != nil { - return nil, ErrUnableToQueryRemoteStore{StoreURL: u.repositoryURL} - } - - tc := client.NewClient(ls, rs) - u.client = tc - return tc, nil -} - -func (u *update) downloadTarget(target string, tmp Destination) error { - err := u.client.Download(target, tmp) - if err != nil { - return ErrDownloadingTarget{Reason: err.Error()} - } - return nil -} - -func (u *update) prepareTufRepository(rootKeys []*tufdata.Key) error { - err := u.client.Init(rootKeys, len(rootKeys)) - if err != nil { - return ErrUnableToInitializeRepo{Reason: err.Error()} - } - - if _, err = u.client.Update(); err != nil && !client.IsLatestSnapshot(err) { - return ErrUnableToUpdateRepo{Reason: err.Error()} - } - return nil -} - -func (u *update) findUpdates(forceUpdate bool) (string, error) { - baseVersion := fmt.Sprintf("/do-agent/do-agent_%s_%s_", runtime.GOOS, runtime.GOARCH) - - targets, err := u.client.Targets() - if err != nil { - return "", ErrUnableToRetrieveTargets - } - - for target := range targets { - if strings.Contains(target, baseVersion) { - newVersion := strings.TrimLeft(target, baseVersion) - if upgradeVersion(config.Version(), newVersion, forceUpdate) { - return target, nil - } - } - } - return "", ErrUpdateNotAvailable -} - -// FetchLatestAndExec fetches the lastest do-agent binary, replaces -// the running binary and calls exec on the new binary. -func (u *update) FetchLatestAndExec(forceUpdate bool) error { - // record bin path before update - binPathOrig, err := currentExecPath() - if err != nil { - return ErrUnableToDetermineRunningProcess{Reason: err.Error()} - } - - binBackupPath := fmt.Sprintf("%s%s", binPathOrig, backupExt) - - // delete any pre-existing backup files - if _, err := os.Stat(binBackupPath); err == nil { - _ = os.Remove(binBackupPath) - } - - if err := u.FetchLatest(forceUpdate); err != nil { - return err - } - - binPath, err := currentExecPath() - if err != nil { - return ErrUnableToDetermineRunningProcess{Reason: err.Error()} - } - - if err := executeBinary(binPathOrig); err == nil { - return nil - } - - // when downloaded binary fails to exec, replace it with the - // backed up binary. Update the running binary path. - if err := os.Rename(binBackupPath, binPath); err != nil { - return errors.New("restoring backed up file") - } - return ErrExecuteBinary -} - -// FetchLatest fetches the lastest do-agent binary and replace the -// running binary. In the event of an error it attempts to rollback to -// the previous version of do-agent. -func (u *update) FetchLatest(forceUpdate bool) error { - if _, err := u.createTufClient(); err != nil { - return err - } - - tempFile, err := NewTempFile(u.localStorePath, "temp_tuf") - if err != nil { - return ErrUnableToCreateTempfile{Path: err.Error()} - } - defer tempFile.Delete() - - rootKeys, err := parseKeys(rootKey) - if err != nil { - return err - } - - if err = u.prepareTufRepository(rootKeys); err != nil { - return err - } - - upgradeTarget, err := u.findUpdates(forceUpdate) - if err != nil { - return err - } - - if err := u.downloadTarget(upgradeTarget, tempFile); err != nil { - return err - } - - curFilePath, err := currentExecPath() - if err != nil { - return ErrUnableToDetermineRunningProcess{Reason: err.Error()} - } - - curFilePathBackup := curFilePath + backupExt - - // move current file to backup location - if err := os.Rename(curFilePath, curFilePathBackup); err != nil { - return err - } - - // copy downloaded file to current binary location - if err := copyFile(tempFile.Name(), curFilePath); err == nil { - return nil - } - - return nil -} diff --git a/update/update_test.go b/update/update_test.go deleted file mode 100644 index da515c4e..00000000 --- a/update/update_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package update - -import "testing" - -func TestCreateTufClient(t *testing.T) { - u := &update{ - localStorePath: "/tmp", - repositoryURL: "not a url", - client: nil, - } - - _, err := u.createTufClient() - if err == nil { - t.Error("expected error, received nil") - } - - u2 := &update{ - localStorePath: "/tmp/aalllallalalallal", - repositoryURL: "http://www.digitalocean.com", - client: nil, - } - - _, err2 := u2.createTufClient() - if err2 == nil { - t.Error("expected error, received nil") - } -} diff --git a/update/util.go b/update/util.go deleted file mode 100644 index 4f93ee85..00000000 --- a/update/util.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package update - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "strconv" - "strings" - "syscall" - - tufdata "github.com/flynn/go-tuf/data" -) - -const deletedTag = " (deleted)" - -// currentExecPath returns the path of the current running executable -func currentExecPath() (string, error) { - path, err := os.Readlink("/proc/self/exe") - if err != nil { - return "", err - } - path = strings.TrimSuffix(path, deletedTag) - path = strings.TrimPrefix(path, deletedTag) - return path, nil -} - -func parseKeys(rootKeyJSON string) ([]*tufdata.Key, error) { - var rootKeys []*tufdata.Key - - if err := json.Unmarshal([]byte(rootKeyJSON), &rootKeys); err != nil { - return nil, ErrRootKeyParseFailed - } - return rootKeys, nil -} - -func runningTarget() string { - return fmt.Sprintf("/do-agent/do-agent_%s_%s", runtime.GOOS, runtime.GOARCH) -} - -func copyFile(srcPath, dstPath string) error { - buf, err := ioutil.ReadFile(srcPath) - if err != nil { - return ErrReadFileFailed - } - - if err := ioutil.WriteFile(dstPath, buf, 0755); err != nil { - if err == io.ErrShortWrite { - return ErrWriteFileFailed - } - return err - } - return nil -} - -// executeBinary calls exec() on the file located in the path. The binary -// found at that path will replace the current running process. -func executeBinary(path string) error { - args := os.Args - args[0] = path - env := os.Environ() - - err := syscall.Exec(path, args, env) - if err != nil { - return ErrExecuteBinary - } - return nil -} - -// Version represents a semantic version -type Version struct { - major int64 - minor int64 - patch int64 -} - -func (v *Version) String() string { - return fmt.Sprintf("%d.%d.%d", v.major, v.minor, v.patch) -} - -func upgradeVersion(current string, new string, overrideDev bool) bool { - if overrideDev && new != "dev" { - return true - } - - if current == "dev" || new == "dev" { - return false - } - - c, err := parseVersion(current) - if err != nil { - return false - } - n, err := parseVersion(new) - if err != nil { - return false - } - - if c.major < n.major { - return true - } - if c.major == n.major && c.minor < n.minor { - return true - } - if c.major == n.major && c.minor == n.minor && c.patch < n.patch { - return true - } - - return false -} - -func parseVersion(version string) (*Version, error) { - parts := strings.Split(version, ".") - if len(parts) != 3 { - return nil, ErrInvalidVersionFormat - } - - major, err := strconv.ParseInt(parts[0], 10, 64) - if err != nil { - return nil, ErrInvalidVersionFormat - } - minor, err := strconv.ParseInt(parts[1], 10, 64) - if err != nil { - return nil, ErrInvalidVersionFormat - } - patch, err := strconv.ParseInt(parts[2], 10, 64) - if err != nil { - return nil, ErrInvalidVersionFormat - } - - return &Version{ - major: major, - minor: minor, - patch: patch, - }, nil -} diff --git a/update/util_test.go b/update/util_test.go deleted file mode 100644 index 017ee370..00000000 --- a/update/util_test.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2016 DigitalOcean -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package update - -import ( - "fmt" - "io/ioutil" - "os" - "runtime" - "testing" -) - -func TestCurrentExecPath(t *testing.T) { - if _, err := os.Stat("/proc/self/exe"); err != nil { - _, err2 := currentExecPath() - if err2 == nil { - t.Error("expected an error, got nil") - } - } -} - -func TestParseKeys(t *testing.T) { - const ( - key1 = "" - ) - _, err := parseKeys(key1) - if err == nil { - t.Error("expected an error, got nil") - } -} - -func TestRunningTarget(t *testing.T) { - te := fmt.Sprintf("/do-agent/do-agent_%s_%s", runtime.GOOS, runtime.GOARCH) - ta := runningTarget() - - if ta != te { - t.Errorf("expected %s, got %s", te, ta) - } -} - -func TestCopyFile(t *testing.T) { - src, err := ioutil.TempFile("/tmp", "agent_test") - if err != nil { - t.Error("unable to create file for test.") - } - defer os.Remove(src.Name()) - - dst, err := ioutil.TempFile("/tmp", "agent_test") - if err != nil { - t.Error("unable to create file for test.") - } - defer os.Remove(dst.Name()) - - if err := ioutil.WriteFile(src.Name(), []byte("hello"), 0555); err != nil { - t.Error("unable to write to test file.") - } - - if err := copyFile(src.Name(), dst.Name()); err != nil { - t.Error("copy file failed") - } - - b, err := ioutil.ReadFile(dst.Name()) - if err != nil { - t.Error("unable to read file contents") - } - - if string(b) != "hello" { - t.Error("contents written do not match contents read") - } -} - -func TestUpgradeVersion(t *testing.T) { - var upgradeValues = []struct { - cVersion string - nVersion string - fUpdate bool - result bool - }{ - {"0.0.0", "1.0.0", false, true}, - {"0.0.0", "0.1.0", false, true}, - {"0.0.0", "0.0.1", false, true}, - {"9.0.0", "0.0.0", false, false}, - {"0.9.0", "0.0.0", false, false}, - {"0.0.9", "0.0.0", false, false}, - {"1.0.0", "1.1.0", false, true}, - {"0.1.0", "0.1.1", false, true}, - {"0.0.1", "0.0.1", false, false}, - {"1.9.9", "0.10.10", false, false}, - {"1.3.2", "1.3.2", false, false}, - {"dev", "1.2.3", false, false}, - {"dev", "1.2.3", true, true}, - {"1.2.4", "dev", false, false}, - {"dev", "dev", false, false}, - {"dev", "dev", true, false}, - } - - for _, tt := range upgradeValues { - r := upgradeVersion(tt.cVersion, tt.nVersion, tt.fUpdate) - if r != tt.result { - t.Errorf("version: %s new_version: %s force: %+v expected %t got %t", tt.cVersion, tt.nVersion, tt.fUpdate, tt.result, r) - } - } -} - -func TestParseVersion(t *testing.T) { - var eMajor, eMinor, ePatch int64 = 1, 2, 3 - - v, err := parseVersion("1.2.3") - if v.major != eMajor { - t.Errorf("expected %d got %d", eMajor, v.major) - } - if v.minor != eMinor { - t.Errorf("expected %d got %d", eMinor, v.minor) - } - if v.patch != ePatch { - t.Errorf("expected %d got %d", ePatch, v.patch) - } - - if err != nil { - t.Error("expected nil error") - } - - _, err = parseVersion("...") - if err == nil { - t.Error("expected error") - } - -} diff --git a/vendor/github.com/alecthomas/template/LICENSE b/vendor/github.com/alecthomas/template/LICENSE new file mode 100644 index 00000000..74487567 --- /dev/null +++ b/vendor/github.com/alecthomas/template/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/alecthomas/template/README.md b/vendor/github.com/alecthomas/template/README.md new file mode 100644 index 00000000..ef6a8ee3 --- /dev/null +++ b/vendor/github.com/alecthomas/template/README.md @@ -0,0 +1,25 @@ +# Go's `text/template` package with newline elision + +This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline. + +eg. + +``` +{{if true}}\ +hello +{{end}}\ +``` + +Will result in: + +``` +hello\n +``` + +Rather than: + +``` +\n +hello\n +\n +``` diff --git a/vendor/github.com/alecthomas/template/doc.go b/vendor/github.com/alecthomas/template/doc.go new file mode 100644 index 00000000..223c595c --- /dev/null +++ b/vendor/github.com/alecthomas/template/doc.go @@ -0,0 +1,406 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package template implements data-driven templates for generating textual output. + +To generate HTML output, see package html/template, which has the same interface +as this package but automatically secures HTML output against certain attacks. + +Templates are executed by applying them to a data structure. Annotations in the +template refer to elements of the data structure (typically a field of a struct +or a key in a map) to control execution and derive values to be displayed. +Execution of the template walks the structure and sets the cursor, represented +by a period '.' and called "dot", to the value at the current location in the +structure as execution proceeds. + +The input text for a template is UTF-8-encoded text in any format. +"Actions"--data evaluations or control structures--are delimited by +"{{" and "}}"; all text outside actions is copied to the output unchanged. +Actions may not span newlines, although comments can. + +Once parsed, a template may be executed safely in parallel. + +Here is a trivial example that prints "17 items are made of wool". + + type Inventory struct { + Material string + Count uint + } + sweaters := Inventory{"wool", 17} + tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}") + if err != nil { panic(err) } + err = tmpl.Execute(os.Stdout, sweaters) + if err != nil { panic(err) } + +More intricate examples appear below. + +Actions + +Here is the list of actions. "Arguments" and "pipelines" are evaluations of +data, defined in detail below. + +*/ +// {{/* a comment */}} +// A comment; discarded. May contain newlines. +// Comments do not nest and must start and end at the +// delimiters, as shown here. +/* + + {{pipeline}} + The default textual representation of the value of the pipeline + is copied to the output. + + {{if pipeline}} T1 {{end}} + If the value of the pipeline is empty, no output is generated; + otherwise, T1 is executed. The empty values are false, 0, any + nil pointer or interface value, and any array, slice, map, or + string of length zero. + Dot is unaffected. + + {{if pipeline}} T1 {{else}} T0 {{end}} + If the value of the pipeline is empty, T0 is executed; + otherwise, T1 is executed. Dot is unaffected. + + {{if pipeline}} T1 {{else if pipeline}} T0 {{end}} + To simplify the appearance of if-else chains, the else action + of an if may include another if directly; the effect is exactly + the same as writing + {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}} + + {{range pipeline}} T1 {{end}} + The value of the pipeline must be an array, slice, map, or channel. + If the value of the pipeline has length zero, nothing is output; + otherwise, dot is set to the successive elements of the array, + slice, or map and T1 is executed. If the value is a map and the + keys are of basic type with a defined order ("comparable"), the + elements will be visited in sorted key order. + + {{range pipeline}} T1 {{else}} T0 {{end}} + The value of the pipeline must be an array, slice, map, or channel. + If the value of the pipeline has length zero, dot is unaffected and + T0 is executed; otherwise, dot is set to the successive elements + of the array, slice, or map and T1 is executed. + + {{template "name"}} + The template with the specified name is executed with nil data. + + {{template "name" pipeline}} + The template with the specified name is executed with dot set + to the value of the pipeline. + + {{with pipeline}} T1 {{end}} + If the value of the pipeline is empty, no output is generated; + otherwise, dot is set to the value of the pipeline and T1 is + executed. + + {{with pipeline}} T1 {{else}} T0 {{end}} + If the value of the pipeline is empty, dot is unaffected and T0 + is executed; otherwise, dot is set to the value of the pipeline + and T1 is executed. + +Arguments + +An argument is a simple value, denoted by one of the following. + + - A boolean, string, character, integer, floating-point, imaginary + or complex constant in Go syntax. These behave like Go's untyped + constants, although raw strings may not span newlines. + - The keyword nil, representing an untyped Go nil. + - The character '.' (period): + . + The result is the value of dot. + - A variable name, which is a (possibly empty) alphanumeric string + preceded by a dollar sign, such as + $piOver2 + or + $ + The result is the value of the variable. + Variables are described below. + - The name of a field of the data, which must be a struct, preceded + by a period, such as + .Field + The result is the value of the field. Field invocations may be + chained: + .Field1.Field2 + Fields can also be evaluated on variables, including chaining: + $x.Field1.Field2 + - The name of a key of the data, which must be a map, preceded + by a period, such as + .Key + The result is the map element value indexed by the key. + Key invocations may be chained and combined with fields to any + depth: + .Field1.Key1.Field2.Key2 + Although the key must be an alphanumeric identifier, unlike with + field names they do not need to start with an upper case letter. + Keys can also be evaluated on variables, including chaining: + $x.key1.key2 + - The name of a niladic method of the data, preceded by a period, + such as + .Method + The result is the value of invoking the method with dot as the + receiver, dot.Method(). Such a method must have one return value (of + any type) or two return values, the second of which is an error. + If it has two and the returned error is non-nil, execution terminates + and an error is returned to the caller as the value of Execute. + Method invocations may be chained and combined with fields and keys + to any depth: + .Field1.Key1.Method1.Field2.Key2.Method2 + Methods can also be evaluated on variables, including chaining: + $x.Method1.Field + - The name of a niladic function, such as + fun + The result is the value of invoking the function, fun(). The return + types and values behave as in methods. Functions and function + names are described below. + - A parenthesized instance of one the above, for grouping. The result + may be accessed by a field or map key invocation. + print (.F1 arg1) (.F2 arg2) + (.StructValuedMethod "arg").Field + +Arguments may evaluate to any type; if they are pointers the implementation +automatically indirects to the base type when required. +If an evaluation yields a function value, such as a function-valued +field of a struct, the function is not invoked automatically, but it +can be used as a truth value for an if action and the like. To invoke +it, use the call function, defined below. + +A pipeline is a possibly chained sequence of "commands". A command is a simple +value (argument) or a function or method call, possibly with multiple arguments: + + Argument + The result is the value of evaluating the argument. + .Method [Argument...] + The method can be alone or the last element of a chain but, + unlike methods in the middle of a chain, it can take arguments. + The result is the value of calling the method with the + arguments: + dot.Method(Argument1, etc.) + functionName [Argument...] + The result is the value of calling the function associated + with the name: + function(Argument1, etc.) + Functions and function names are described below. + +Pipelines + +A pipeline may be "chained" by separating a sequence of commands with pipeline +characters '|'. In a chained pipeline, the result of the each command is +passed as the last argument of the following command. The output of the final +command in the pipeline is the value of the pipeline. + +The output of a command will be either one value or two values, the second of +which has type error. If that second value is present and evaluates to +non-nil, execution terminates and the error is returned to the caller of +Execute. + +Variables + +A pipeline inside an action may initialize a variable to capture the result. +The initialization has syntax + + $variable := pipeline + +where $variable is the name of the variable. An action that declares a +variable produces no output. + +If a "range" action initializes a variable, the variable is set to the +successive elements of the iteration. Also, a "range" may declare two +variables, separated by a comma: + + range $index, $element := pipeline + +in which case $index and $element are set to the successive values of the +array/slice index or map key and element, respectively. Note that if there is +only one variable, it is assigned the element; this is opposite to the +convention in Go range clauses. + +A variable's scope extends to the "end" action of the control structure ("if", +"with", or "range") in which it is declared, or to the end of the template if +there is no such control structure. A template invocation does not inherit +variables from the point of its invocation. + +When execution begins, $ is set to the data argument passed to Execute, that is, +to the starting value of dot. + +Examples + +Here are some example one-line templates demonstrating pipelines and variables. +All produce the quoted word "output": + + {{"\"output\""}} + A string constant. + {{`"output"`}} + A raw string constant. + {{printf "%q" "output"}} + A function call. + {{"output" | printf "%q"}} + A function call whose final argument comes from the previous + command. + {{printf "%q" (print "out" "put")}} + A parenthesized argument. + {{"put" | printf "%s%s" "out" | printf "%q"}} + A more elaborate call. + {{"output" | printf "%s" | printf "%q"}} + A longer chain. + {{with "output"}}{{printf "%q" .}}{{end}} + A with action using dot. + {{with $x := "output" | printf "%q"}}{{$x}}{{end}} + A with action that creates and uses a variable. + {{with $x := "output"}}{{printf "%q" $x}}{{end}} + A with action that uses the variable in another action. + {{with $x := "output"}}{{$x | printf "%q"}}{{end}} + The same, but pipelined. + +Functions + +During execution functions are found in two function maps: first in the +template, then in the global function map. By default, no functions are defined +in the template but the Funcs method can be used to add them. + +Predefined global functions are named as follows. + + and + Returns the boolean AND of its arguments by returning the + first empty argument or the last argument, that is, + "and x y" behaves as "if x then y else x". All the + arguments are evaluated. + call + Returns the result of calling the first argument, which + must be a function, with the remaining arguments as parameters. + Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where + Y is a func-valued field, map entry, or the like. + The first argument must be the result of an evaluation + that yields a value of function type (as distinct from + a predefined function such as print). The function must + return either one or two result values, the second of which + is of type error. If the arguments don't match the function + or the returned error value is non-nil, execution stops. + html + Returns the escaped HTML equivalent of the textual + representation of its arguments. + index + Returns the result of indexing its first argument by the + following arguments. Thus "index x 1 2 3" is, in Go syntax, + x[1][2][3]. Each indexed item must be a map, slice, or array. + js + Returns the escaped JavaScript equivalent of the textual + representation of its arguments. + len + Returns the integer length of its argument. + not + Returns the boolean negation of its single argument. + or + Returns the boolean OR of its arguments by returning the + first non-empty argument or the last argument, that is, + "or x y" behaves as "if x then x else y". All the + arguments are evaluated. + print + An alias for fmt.Sprint + printf + An alias for fmt.Sprintf + println + An alias for fmt.Sprintln + urlquery + Returns the escaped value of the textual representation of + its arguments in a form suitable for embedding in a URL query. + +The boolean functions take any zero value to be false and a non-zero +value to be true. + +There is also a set of binary comparison operators defined as +functions: + + eq + Returns the boolean truth of arg1 == arg2 + ne + Returns the boolean truth of arg1 != arg2 + lt + Returns the boolean truth of arg1 < arg2 + le + Returns the boolean truth of arg1 <= arg2 + gt + Returns the boolean truth of arg1 > arg2 + ge + Returns the boolean truth of arg1 >= arg2 + +For simpler multi-way equality tests, eq (only) accepts two or more +arguments and compares the second and subsequent to the first, +returning in effect + + arg1==arg2 || arg1==arg3 || arg1==arg4 ... + +(Unlike with || in Go, however, eq is a function call and all the +arguments will be evaluated.) + +The comparison functions work on basic types only (or named basic +types, such as "type Celsius float32"). They implement the Go rules +for comparison of values, except that size and exact type are +ignored, so any integer value, signed or unsigned, may be compared +with any other integer value. (The arithmetic value is compared, +not the bit pattern, so all negative integers are less than all +unsigned integers.) However, as usual, one may not compare an int +with a float32 and so on. + +Associated templates + +Each template is named by a string specified when it is created. Also, each +template is associated with zero or more other templates that it may invoke by +name; such associations are transitive and form a name space of templates. + +A template may use a template invocation to instantiate another associated +template; see the explanation of the "template" action above. The name must be +that of a template associated with the template that contains the invocation. + +Nested template definitions + +When parsing a template, another template may be defined and associated with the +template being parsed. Template definitions must appear at the top level of the +template, much like global variables in a Go program. + +The syntax of such definitions is to surround each template declaration with a +"define" and "end" action. + +The define action names the template being created by providing a string +constant. Here is a simple example: + + `{{define "T1"}}ONE{{end}} + {{define "T2"}}TWO{{end}} + {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}} + {{template "T3"}}` + +This defines two templates, T1 and T2, and a third T3 that invokes the other two +when it is executed. Finally it invokes T3. If executed this template will +produce the text + + ONE TWO + +By construction, a template may reside in only one association. If it's +necessary to have a template addressable from multiple associations, the +template definition must be parsed multiple times to create distinct *Template +values, or must be copied with the Clone or AddParseTree method. + +Parse may be called multiple times to assemble the various associated templates; +see the ParseFiles and ParseGlob functions and methods for simple ways to parse +related templates stored in files. + +A template may be executed directly or through ExecuteTemplate, which executes +an associated template identified by name. To invoke our example above, we +might write, + + err := tmpl.Execute(os.Stdout, "no data needed") + if err != nil { + log.Fatalf("execution failed: %s", err) + } + +or to invoke a particular template explicitly by name, + + err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed") + if err != nil { + log.Fatalf("execution failed: %s", err) + } + +*/ +package template diff --git a/vendor/github.com/alecthomas/template/exec.go b/vendor/github.com/alecthomas/template/exec.go new file mode 100644 index 00000000..c3078e5d --- /dev/null +++ b/vendor/github.com/alecthomas/template/exec.go @@ -0,0 +1,845 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "bytes" + "fmt" + "io" + "reflect" + "runtime" + "sort" + "strings" + + "github.com/alecthomas/template/parse" +) + +// state represents the state of an execution. It's not part of the +// template so that multiple executions of the same template +// can execute in parallel. +type state struct { + tmpl *Template + wr io.Writer + node parse.Node // current node, for errors + vars []variable // push-down stack of variable values. +} + +// variable holds the dynamic value of a variable such as $, $x etc. +type variable struct { + name string + value reflect.Value +} + +// push pushes a new variable on the stack. +func (s *state) push(name string, value reflect.Value) { + s.vars = append(s.vars, variable{name, value}) +} + +// mark returns the length of the variable stack. +func (s *state) mark() int { + return len(s.vars) +} + +// pop pops the variable stack up to the mark. +func (s *state) pop(mark int) { + s.vars = s.vars[0:mark] +} + +// setVar overwrites the top-nth variable on the stack. Used by range iterations. +func (s *state) setVar(n int, value reflect.Value) { + s.vars[len(s.vars)-n].value = value +} + +// varValue returns the value of the named variable. +func (s *state) varValue(name string) reflect.Value { + for i := s.mark() - 1; i >= 0; i-- { + if s.vars[i].name == name { + return s.vars[i].value + } + } + s.errorf("undefined variable: %s", name) + return zero +} + +var zero reflect.Value + +// at marks the state to be on node n, for error reporting. +func (s *state) at(node parse.Node) { + s.node = node +} + +// doublePercent returns the string with %'s replaced by %%, if necessary, +// so it can be used safely inside a Printf format string. +func doublePercent(str string) string { + if strings.Contains(str, "%") { + str = strings.Replace(str, "%", "%%", -1) + } + return str +} + +// errorf formats the error and terminates processing. +func (s *state) errorf(format string, args ...interface{}) { + name := doublePercent(s.tmpl.Name()) + if s.node == nil { + format = fmt.Sprintf("template: %s: %s", name, format) + } else { + location, context := s.tmpl.ErrorContext(s.node) + format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format) + } + panic(fmt.Errorf(format, args...)) +} + +// errRecover is the handler that turns panics into returns from the top +// level of Parse. +func errRecover(errp *error) { + e := recover() + if e != nil { + switch err := e.(type) { + case runtime.Error: + panic(e) + case error: + *errp = err + default: + panic(e) + } + } +} + +// ExecuteTemplate applies the template associated with t that has the given name +// to the specified data object and writes the output to wr. +// If an error occurs executing the template or writing its output, +// execution stops, but partial results may already have been written to +// the output writer. +// A template may be executed safely in parallel. +func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error { + tmpl := t.tmpl[name] + if tmpl == nil { + return fmt.Errorf("template: no template %q associated with template %q", name, t.name) + } + return tmpl.Execute(wr, data) +} + +// Execute applies a parsed template to the specified data object, +// and writes the output to wr. +// If an error occurs executing the template or writing its output, +// execution stops, but partial results may already have been written to +// the output writer. +// A template may be executed safely in parallel. +func (t *Template) Execute(wr io.Writer, data interface{}) (err error) { + defer errRecover(&err) + value := reflect.ValueOf(data) + state := &state{ + tmpl: t, + wr: wr, + vars: []variable{{"$", value}}, + } + t.init() + if t.Tree == nil || t.Root == nil { + var b bytes.Buffer + for name, tmpl := range t.tmpl { + if tmpl.Tree == nil || tmpl.Root == nil { + continue + } + if b.Len() > 0 { + b.WriteString(", ") + } + fmt.Fprintf(&b, "%q", name) + } + var s string + if b.Len() > 0 { + s = "; defined templates are: " + b.String() + } + state.errorf("%q is an incomplete or empty template%s", t.Name(), s) + } + state.walk(value, t.Root) + return +} + +// Walk functions step through the major pieces of the template structure, +// generating output as they go. +func (s *state) walk(dot reflect.Value, node parse.Node) { + s.at(node) + switch node := node.(type) { + case *parse.ActionNode: + // Do not pop variables so they persist until next end. + // Also, if the action declares variables, don't print the result. + val := s.evalPipeline(dot, node.Pipe) + if len(node.Pipe.Decl) == 0 { + s.printValue(node, val) + } + case *parse.IfNode: + s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList) + case *parse.ListNode: + for _, node := range node.Nodes { + s.walk(dot, node) + } + case *parse.RangeNode: + s.walkRange(dot, node) + case *parse.TemplateNode: + s.walkTemplate(dot, node) + case *parse.TextNode: + if _, err := s.wr.Write(node.Text); err != nil { + s.errorf("%s", err) + } + case *parse.WithNode: + s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList) + default: + s.errorf("unknown node: %s", node) + } +} + +// walkIfOrWith walks an 'if' or 'with' node. The two control structures +// are identical in behavior except that 'with' sets dot. +func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) { + defer s.pop(s.mark()) + val := s.evalPipeline(dot, pipe) + truth, ok := isTrue(val) + if !ok { + s.errorf("if/with can't use %v", val) + } + if truth { + if typ == parse.NodeWith { + s.walk(val, list) + } else { + s.walk(dot, list) + } + } else if elseList != nil { + s.walk(dot, elseList) + } +} + +// isTrue reports whether the value is 'true', in the sense of not the zero of its type, +// and whether the value has a meaningful truth value. +func isTrue(val reflect.Value) (truth, ok bool) { + if !val.IsValid() { + // Something like var x interface{}, never set. It's a form of nil. + return false, true + } + switch val.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + truth = val.Len() > 0 + case reflect.Bool: + truth = val.Bool() + case reflect.Complex64, reflect.Complex128: + truth = val.Complex() != 0 + case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: + truth = !val.IsNil() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + truth = val.Int() != 0 + case reflect.Float32, reflect.Float64: + truth = val.Float() != 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + truth = val.Uint() != 0 + case reflect.Struct: + truth = true // Struct values are always true. + default: + return + } + return truth, true +} + +func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) { + s.at(r) + defer s.pop(s.mark()) + val, _ := indirect(s.evalPipeline(dot, r.Pipe)) + // mark top of stack before any variables in the body are pushed. + mark := s.mark() + oneIteration := func(index, elem reflect.Value) { + // Set top var (lexically the second if there are two) to the element. + if len(r.Pipe.Decl) > 0 { + s.setVar(1, elem) + } + // Set next var (lexically the first if there are two) to the index. + if len(r.Pipe.Decl) > 1 { + s.setVar(2, index) + } + s.walk(elem, r.List) + s.pop(mark) + } + switch val.Kind() { + case reflect.Array, reflect.Slice: + if val.Len() == 0 { + break + } + for i := 0; i < val.Len(); i++ { + oneIteration(reflect.ValueOf(i), val.Index(i)) + } + return + case reflect.Map: + if val.Len() == 0 { + break + } + for _, key := range sortKeys(val.MapKeys()) { + oneIteration(key, val.MapIndex(key)) + } + return + case reflect.Chan: + if val.IsNil() { + break + } + i := 0 + for ; ; i++ { + elem, ok := val.Recv() + if !ok { + break + } + oneIteration(reflect.ValueOf(i), elem) + } + if i == 0 { + break + } + return + case reflect.Invalid: + break // An invalid value is likely a nil map, etc. and acts like an empty map. + default: + s.errorf("range can't iterate over %v", val) + } + if r.ElseList != nil { + s.walk(dot, r.ElseList) + } +} + +func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) { + s.at(t) + tmpl := s.tmpl.tmpl[t.Name] + if tmpl == nil { + s.errorf("template %q not defined", t.Name) + } + // Variables declared by the pipeline persist. + dot = s.evalPipeline(dot, t.Pipe) + newState := *s + newState.tmpl = tmpl + // No dynamic scoping: template invocations inherit no variables. + newState.vars = []variable{{"$", dot}} + newState.walk(dot, tmpl.Root) +} + +// Eval functions evaluate pipelines, commands, and their elements and extract +// values from the data structure by examining fields, calling methods, and so on. +// The printing of those values happens only through walk functions. + +// evalPipeline returns the value acquired by evaluating a pipeline. If the +// pipeline has a variable declaration, the variable will be pushed on the +// stack. Callers should therefore pop the stack after they are finished +// executing commands depending on the pipeline value. +func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) { + if pipe == nil { + return + } + s.at(pipe) + for _, cmd := range pipe.Cmds { + value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg. + // If the object has type interface{}, dig down one level to the thing inside. + if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 { + value = reflect.ValueOf(value.Interface()) // lovely! + } + } + for _, variable := range pipe.Decl { + s.push(variable.Ident[0], value) + } + return value +} + +func (s *state) notAFunction(args []parse.Node, final reflect.Value) { + if len(args) > 1 || final.IsValid() { + s.errorf("can't give argument to non-function %s", args[0]) + } +} + +func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value { + firstWord := cmd.Args[0] + switch n := firstWord.(type) { + case *parse.FieldNode: + return s.evalFieldNode(dot, n, cmd.Args, final) + case *parse.ChainNode: + return s.evalChainNode(dot, n, cmd.Args, final) + case *parse.IdentifierNode: + // Must be a function. + return s.evalFunction(dot, n, cmd, cmd.Args, final) + case *parse.PipeNode: + // Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored. + return s.evalPipeline(dot, n) + case *parse.VariableNode: + return s.evalVariableNode(dot, n, cmd.Args, final) + } + s.at(firstWord) + s.notAFunction(cmd.Args, final) + switch word := firstWord.(type) { + case *parse.BoolNode: + return reflect.ValueOf(word.True) + case *parse.DotNode: + return dot + case *parse.NilNode: + s.errorf("nil is not a command") + case *parse.NumberNode: + return s.idealConstant(word) + case *parse.StringNode: + return reflect.ValueOf(word.Text) + } + s.errorf("can't evaluate command %q", firstWord) + panic("not reached") +} + +// idealConstant is called to return the value of a number in a context where +// we don't know the type. In that case, the syntax of the number tells us +// its type, and we use Go rules to resolve. Note there is no such thing as +// a uint ideal constant in this situation - the value must be of int type. +func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value { + // These are ideal constants but we don't know the type + // and we have no context. (If it was a method argument, + // we'd know what we need.) The syntax guides us to some extent. + s.at(constant) + switch { + case constant.IsComplex: + return reflect.ValueOf(constant.Complex128) // incontrovertible. + case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0: + return reflect.ValueOf(constant.Float64) + case constant.IsInt: + n := int(constant.Int64) + if int64(n) != constant.Int64 { + s.errorf("%s overflows int", constant.Text) + } + return reflect.ValueOf(n) + case constant.IsUint: + s.errorf("%s overflows int", constant.Text) + } + return zero +} + +func isHexConstant(s string) bool { + return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') +} + +func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value { + s.at(field) + return s.evalFieldChain(dot, dot, field, field.Ident, args, final) +} + +func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value { + s.at(chain) + // (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields. + pipe := s.evalArg(dot, nil, chain.Node) + if len(chain.Field) == 0 { + s.errorf("internal error: no fields in evalChainNode") + } + return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final) +} + +func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value { + // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields. + s.at(variable) + value := s.varValue(variable.Ident[0]) + if len(variable.Ident) == 1 { + s.notAFunction(args, final) + return value + } + return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final) +} + +// evalFieldChain evaluates .X.Y.Z possibly followed by arguments. +// dot is the environment in which to evaluate arguments, while +// receiver is the value being walked along the chain. +func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value { + n := len(ident) + for i := 0; i < n-1; i++ { + receiver = s.evalField(dot, ident[i], node, nil, zero, receiver) + } + // Now if it's a method, it gets the arguments. + return s.evalField(dot, ident[n-1], node, args, final, receiver) +} + +func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value { + s.at(node) + name := node.Ident + function, ok := findFunction(name, s.tmpl) + if !ok { + s.errorf("%q is not a defined function", name) + } + return s.evalCall(dot, function, cmd, name, args, final) +} + +// evalField evaluates an expression like (.Field) or (.Field arg1 arg2). +// The 'final' argument represents the return value from the preceding +// value of the pipeline, if any. +func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value { + if !receiver.IsValid() { + return zero + } + typ := receiver.Type() + receiver, _ = indirect(receiver) + // Unless it's an interface, need to get to a value of type *T to guarantee + // we see all methods of T and *T. + ptr := receiver + if ptr.Kind() != reflect.Interface && ptr.CanAddr() { + ptr = ptr.Addr() + } + if method := ptr.MethodByName(fieldName); method.IsValid() { + return s.evalCall(dot, method, node, fieldName, args, final) + } + hasArgs := len(args) > 1 || final.IsValid() + // It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil. + receiver, isNil := indirect(receiver) + if isNil { + s.errorf("nil pointer evaluating %s.%s", typ, fieldName) + } + switch receiver.Kind() { + case reflect.Struct: + tField, ok := receiver.Type().FieldByName(fieldName) + if ok { + field := receiver.FieldByIndex(tField.Index) + if tField.PkgPath != "" { // field is unexported + s.errorf("%s is an unexported field of struct type %s", fieldName, typ) + } + // If it's a function, we must call it. + if hasArgs { + s.errorf("%s has arguments but cannot be invoked as function", fieldName) + } + return field + } + s.errorf("%s is not a field of struct type %s", fieldName, typ) + case reflect.Map: + // If it's a map, attempt to use the field name as a key. + nameVal := reflect.ValueOf(fieldName) + if nameVal.Type().AssignableTo(receiver.Type().Key()) { + if hasArgs { + s.errorf("%s is not a method but has arguments", fieldName) + } + return receiver.MapIndex(nameVal) + } + } + s.errorf("can't evaluate field %s in type %s", fieldName, typ) + panic("not reached") +} + +var ( + errorType = reflect.TypeOf((*error)(nil)).Elem() + fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() +) + +// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so +// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0] +// as the function itself. +func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value { + if args != nil { + args = args[1:] // Zeroth arg is function name/node; not passed to function. + } + typ := fun.Type() + numIn := len(args) + if final.IsValid() { + numIn++ + } + numFixed := len(args) + if typ.IsVariadic() { + numFixed = typ.NumIn() - 1 // last arg is the variadic one. + if numIn < numFixed { + s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args)) + } + } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() { + s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args)) + } + if !goodFunc(typ) { + // TODO: This could still be a confusing error; maybe goodFunc should provide info. + s.errorf("can't call method/function %q with %d results", name, typ.NumOut()) + } + // Build the arg list. + argv := make([]reflect.Value, numIn) + // Args must be evaluated. Fixed args first. + i := 0 + for ; i < numFixed && i < len(args); i++ { + argv[i] = s.evalArg(dot, typ.In(i), args[i]) + } + // Now the ... args. + if typ.IsVariadic() { + argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice. + for ; i < len(args); i++ { + argv[i] = s.evalArg(dot, argType, args[i]) + } + } + // Add final value if necessary. + if final.IsValid() { + t := typ.In(typ.NumIn() - 1) + if typ.IsVariadic() { + t = t.Elem() + } + argv[i] = s.validateType(final, t) + } + result := fun.Call(argv) + // If we have an error that is not nil, stop execution and return that error to the caller. + if len(result) == 2 && !result[1].IsNil() { + s.at(node) + s.errorf("error calling %s: %s", name, result[1].Interface().(error)) + } + return result[0] +} + +// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. +func canBeNil(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return true + } + return false +} + +// validateType guarantees that the value is valid and assignable to the type. +func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value { + if !value.IsValid() { + if typ == nil || canBeNil(typ) { + // An untyped nil interface{}. Accept as a proper nil value. + return reflect.Zero(typ) + } + s.errorf("invalid value; expected %s", typ) + } + if typ != nil && !value.Type().AssignableTo(typ) { + if value.Kind() == reflect.Interface && !value.IsNil() { + value = value.Elem() + if value.Type().AssignableTo(typ) { + return value + } + // fallthrough + } + // Does one dereference or indirection work? We could do more, as we + // do with method receivers, but that gets messy and method receivers + // are much more constrained, so it makes more sense there than here. + // Besides, one is almost always all you need. + switch { + case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ): + value = value.Elem() + if !value.IsValid() { + s.errorf("dereference of nil pointer of type %s", typ) + } + case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr(): + value = value.Addr() + default: + s.errorf("wrong type for value; expected %s; got %s", typ, value.Type()) + } + } + return value +} + +func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + switch arg := n.(type) { + case *parse.DotNode: + return s.validateType(dot, typ) + case *parse.NilNode: + if canBeNil(typ) { + return reflect.Zero(typ) + } + s.errorf("cannot assign nil to %s", typ) + case *parse.FieldNode: + return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ) + case *parse.VariableNode: + return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ) + case *parse.PipeNode: + return s.validateType(s.evalPipeline(dot, arg), typ) + case *parse.IdentifierNode: + return s.evalFunction(dot, arg, arg, nil, zero) + case *parse.ChainNode: + return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ) + } + switch typ.Kind() { + case reflect.Bool: + return s.evalBool(typ, n) + case reflect.Complex64, reflect.Complex128: + return s.evalComplex(typ, n) + case reflect.Float32, reflect.Float64: + return s.evalFloat(typ, n) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return s.evalInteger(typ, n) + case reflect.Interface: + if typ.NumMethod() == 0 { + return s.evalEmptyInterface(dot, n) + } + case reflect.String: + return s.evalString(typ, n) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return s.evalUnsignedInteger(typ, n) + } + s.errorf("can't handle %s for arg of type %s", n, typ) + panic("not reached") +} + +func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.BoolNode); ok { + value := reflect.New(typ).Elem() + value.SetBool(n.True) + return value + } + s.errorf("expected bool; found %s", n) + panic("not reached") +} + +func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.StringNode); ok { + value := reflect.New(typ).Elem() + value.SetString(n.Text) + return value + } + s.errorf("expected string; found %s", n) + panic("not reached") +} + +func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsInt { + value := reflect.New(typ).Elem() + value.SetInt(n.Int64) + return value + } + s.errorf("expected integer; found %s", n) + panic("not reached") +} + +func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsUint { + value := reflect.New(typ).Elem() + value.SetUint(n.Uint64) + return value + } + s.errorf("expected unsigned integer; found %s", n) + panic("not reached") +} + +func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsFloat { + value := reflect.New(typ).Elem() + value.SetFloat(n.Float64) + return value + } + s.errorf("expected float; found %s", n) + panic("not reached") +} + +func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value { + if n, ok := n.(*parse.NumberNode); ok && n.IsComplex { + value := reflect.New(typ).Elem() + value.SetComplex(n.Complex128) + return value + } + s.errorf("expected complex; found %s", n) + panic("not reached") +} + +func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value { + s.at(n) + switch n := n.(type) { + case *parse.BoolNode: + return reflect.ValueOf(n.True) + case *parse.DotNode: + return dot + case *parse.FieldNode: + return s.evalFieldNode(dot, n, nil, zero) + case *parse.IdentifierNode: + return s.evalFunction(dot, n, n, nil, zero) + case *parse.NilNode: + // NilNode is handled in evalArg, the only place that calls here. + s.errorf("evalEmptyInterface: nil (can't happen)") + case *parse.NumberNode: + return s.idealConstant(n) + case *parse.StringNode: + return reflect.ValueOf(n.Text) + case *parse.VariableNode: + return s.evalVariableNode(dot, n, nil, zero) + case *parse.PipeNode: + return s.evalPipeline(dot, n) + } + s.errorf("can't handle assignment of %s to empty interface argument", n) + panic("not reached") +} + +// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. +// We indirect through pointers and empty interfaces (only) because +// non-empty interfaces have methods we might need. +func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { + for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { + if v.IsNil() { + return v, true + } + if v.Kind() == reflect.Interface && v.NumMethod() > 0 { + break + } + } + return v, false +} + +// printValue writes the textual representation of the value to the output of +// the template. +func (s *state) printValue(n parse.Node, v reflect.Value) { + s.at(n) + iface, ok := printableValue(v) + if !ok { + s.errorf("can't print %s of type %s", n, v.Type()) + } + fmt.Fprint(s.wr, iface) +} + +// printableValue returns the, possibly indirected, interface value inside v that +// is best for a call to formatted printer. +func printableValue(v reflect.Value) (interface{}, bool) { + if v.Kind() == reflect.Ptr { + v, _ = indirect(v) // fmt.Fprint handles nil. + } + if !v.IsValid() { + return "", true + } + + if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { + if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { + v = v.Addr() + } else { + switch v.Kind() { + case reflect.Chan, reflect.Func: + return nil, false + } + } + } + return v.Interface(), true +} + +// Types to help sort the keys in a map for reproducible output. + +type rvs []reflect.Value + +func (x rvs) Len() int { return len(x) } +func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +type rvInts struct{ rvs } + +func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() } + +type rvUints struct{ rvs } + +func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() } + +type rvFloats struct{ rvs } + +func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() } + +type rvStrings struct{ rvs } + +func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() } + +// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys. +func sortKeys(v []reflect.Value) []reflect.Value { + if len(v) <= 1 { + return v + } + switch v[0].Kind() { + case reflect.Float32, reflect.Float64: + sort.Sort(rvFloats{v}) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + sort.Sort(rvInts{v}) + case reflect.String: + sort.Sort(rvStrings{v}) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + sort.Sort(rvUints{v}) + } + return v +} diff --git a/vendor/github.com/alecthomas/template/funcs.go b/vendor/github.com/alecthomas/template/funcs.go new file mode 100644 index 00000000..39ee5ed6 --- /dev/null +++ b/vendor/github.com/alecthomas/template/funcs.go @@ -0,0 +1,598 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/url" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// FuncMap is the type of the map defining the mapping from names to functions. +// Each function must have either a single return value, or two return values of +// which the second has type error. In that case, if the second (error) +// return value evaluates to non-nil during execution, execution terminates and +// Execute returns that error. +type FuncMap map[string]interface{} + +var builtins = FuncMap{ + "and": and, + "call": call, + "html": HTMLEscaper, + "index": index, + "js": JSEscaper, + "len": length, + "not": not, + "or": or, + "print": fmt.Sprint, + "printf": fmt.Sprintf, + "println": fmt.Sprintln, + "urlquery": URLQueryEscaper, + + // Comparisons + "eq": eq, // == + "ge": ge, // >= + "gt": gt, // > + "le": le, // <= + "lt": lt, // < + "ne": ne, // != +} + +var builtinFuncs = createValueFuncs(builtins) + +// createValueFuncs turns a FuncMap into a map[string]reflect.Value +func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { + m := make(map[string]reflect.Value) + addValueFuncs(m, funcMap) + return m +} + +// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. +func addValueFuncs(out map[string]reflect.Value, in FuncMap) { + for name, fn := range in { + v := reflect.ValueOf(fn) + if v.Kind() != reflect.Func { + panic("value for " + name + " not a function") + } + if !goodFunc(v.Type()) { + panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) + } + out[name] = v + } +} + +// addFuncs adds to values the functions in funcs. It does no checking of the input - +// call addValueFuncs first. +func addFuncs(out, in FuncMap) { + for name, fn := range in { + out[name] = fn + } +} + +// goodFunc checks that the function or method has the right result signature. +func goodFunc(typ reflect.Type) bool { + // We allow functions with 1 result or 2 results where the second is an error. + switch { + case typ.NumOut() == 1: + return true + case typ.NumOut() == 2 && typ.Out(1) == errorType: + return true + } + return false +} + +// findFunction looks for a function in the template, and global map. +func findFunction(name string, tmpl *Template) (reflect.Value, bool) { + if tmpl != nil && tmpl.common != nil { + if fn := tmpl.execFuncs[name]; fn.IsValid() { + return fn, true + } + } + if fn := builtinFuncs[name]; fn.IsValid() { + return fn, true + } + return reflect.Value{}, false +} + +// Indexing. + +// index returns the result of indexing its first argument by the following +// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each +// indexed item must be a map, slice, or array. +func index(item interface{}, indices ...interface{}) (interface{}, error) { + v := reflect.ValueOf(item) + for _, i := range indices { + index := reflect.ValueOf(i) + var isNil bool + if v, isNil = indirect(v); isNil { + return nil, fmt.Errorf("index of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.String: + var x int64 + switch index.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x = index.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x = int64(index.Uint()) + default: + return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) + } + if x < 0 || x >= int64(v.Len()) { + return nil, fmt.Errorf("index out of range: %d", x) + } + v = v.Index(int(x)) + case reflect.Map: + if !index.IsValid() { + index = reflect.Zero(v.Type().Key()) + } + if !index.Type().AssignableTo(v.Type().Key()) { + return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) + } + if x := v.MapIndex(index); x.IsValid() { + v = x + } else { + v = reflect.Zero(v.Type().Elem()) + } + default: + return nil, fmt.Errorf("can't index item of type %s", v.Type()) + } + } + return v.Interface(), nil +} + +// Length + +// length returns the length of the item, with an error if it has no defined length. +func length(item interface{}) (int, error) { + v, isNil := indirect(reflect.ValueOf(item)) + if isNil { + return 0, fmt.Errorf("len of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return v.Len(), nil + } + return 0, fmt.Errorf("len of type %s", v.Type()) +} + +// Function invocation + +// call returns the result of evaluating the first argument as a function. +// The function must return 1 result, or 2 results, the second of which is an error. +func call(fn interface{}, args ...interface{}) (interface{}, error) { + v := reflect.ValueOf(fn) + typ := v.Type() + if typ.Kind() != reflect.Func { + return nil, fmt.Errorf("non-function of type %s", typ) + } + if !goodFunc(typ) { + return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) + } + numIn := typ.NumIn() + var dddType reflect.Type + if typ.IsVariadic() { + if len(args) < numIn-1 { + return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) + } + dddType = typ.In(numIn - 1).Elem() + } else { + if len(args) != numIn { + return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) + } + } + argv := make([]reflect.Value, len(args)) + for i, arg := range args { + value := reflect.ValueOf(arg) + // Compute the expected type. Clumsy because of variadics. + var argType reflect.Type + if !typ.IsVariadic() || i < numIn-1 { + argType = typ.In(i) + } else { + argType = dddType + } + if !value.IsValid() && canBeNil(argType) { + value = reflect.Zero(argType) + } + if !value.Type().AssignableTo(argType) { + return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) + } + argv[i] = value + } + result := v.Call(argv) + if len(result) == 2 && !result[1].IsNil() { + return result[0].Interface(), result[1].Interface().(error) + } + return result[0].Interface(), nil +} + +// Boolean logic. + +func truth(a interface{}) bool { + t, _ := isTrue(reflect.ValueOf(a)) + return t +} + +// and computes the Boolean AND of its arguments, returning +// the first false argument it encounters, or the last argument. +func and(arg0 interface{}, args ...interface{}) interface{} { + if !truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if !truth(arg0) { + break + } + } + return arg0 +} + +// or computes the Boolean OR of its arguments, returning +// the first true argument it encounters, or the last argument. +func or(arg0 interface{}, args ...interface{}) interface{} { + if truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if truth(arg0) { + break + } + } + return arg0 +} + +// not returns the Boolean negation of its argument. +func not(arg interface{}) (truth bool) { + truth, _ = isTrue(reflect.ValueOf(arg)) + return !truth +} + +// Comparison. + +// TODO: Perhaps allow comparison between signed and unsigned integers. + +var ( + errBadComparisonType = errors.New("invalid type for comparison") + errBadComparison = errors.New("incompatible types for comparison") + errNoComparison = errors.New("missing argument for comparison") +) + +type kind int + +const ( + invalidKind kind = iota + boolKind + complexKind + intKind + floatKind + integerKind + stringKind + uintKind +) + +func basicKind(v reflect.Value) (kind, error) { + switch v.Kind() { + case reflect.Bool: + return boolKind, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intKind, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintKind, nil + case reflect.Float32, reflect.Float64: + return floatKind, nil + case reflect.Complex64, reflect.Complex128: + return complexKind, nil + case reflect.String: + return stringKind, nil + } + return invalidKind, errBadComparisonType +} + +// eq evaluates the comparison a == b || a == c || ... +func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + if len(arg2) == 0 { + return false, errNoComparison + } + for _, arg := range arg2 { + v2 := reflect.ValueOf(arg) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind: + truth = v1.Bool() == v2.Bool() + case complexKind: + truth = v1.Complex() == v2.Complex() + case floatKind: + truth = v1.Float() == v2.Float() + case intKind: + truth = v1.Int() == v2.Int() + case stringKind: + truth = v1.String() == v2.String() + case uintKind: + truth = v1.Uint() == v2.Uint() + default: + panic("invalid kind") + } + } + if truth { + return true, nil + } + } + return false, nil +} + +// ne evaluates the comparison a != b. +func ne(arg1, arg2 interface{}) (bool, error) { + // != is the inverse of ==. + equal, err := eq(arg1, arg2) + return !equal, err +} + +// lt evaluates the comparison a < b. +func lt(arg1, arg2 interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + v2 := reflect.ValueOf(arg2) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind, complexKind: + return false, errBadComparisonType + case floatKind: + truth = v1.Float() < v2.Float() + case intKind: + truth = v1.Int() < v2.Int() + case stringKind: + truth = v1.String() < v2.String() + case uintKind: + truth = v1.Uint() < v2.Uint() + default: + panic("invalid kind") + } + } + return truth, nil +} + +// le evaluates the comparison <= b. +func le(arg1, arg2 interface{}) (bool, error) { + // <= is < or ==. + lessThan, err := lt(arg1, arg2) + if lessThan || err != nil { + return lessThan, err + } + return eq(arg1, arg2) +} + +// gt evaluates the comparison a > b. +func gt(arg1, arg2 interface{}) (bool, error) { + // > is the inverse of <=. + lessOrEqual, err := le(arg1, arg2) + if err != nil { + return false, err + } + return !lessOrEqual, nil +} + +// ge evaluates the comparison a >= b. +func ge(arg1, arg2 interface{}) (bool, error) { + // >= is the inverse of <. + lessThan, err := lt(arg1, arg2) + if err != nil { + return false, err + } + return !lessThan, nil +} + +// HTML escaping. + +var ( + htmlQuot = []byte(""") // shorter than """ + htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 + htmlAmp = []byte("&") + htmlLt = []byte("<") + htmlGt = []byte(">") +) + +// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. +func HTMLEscape(w io.Writer, b []byte) { + last := 0 + for i, c := range b { + var html []byte + switch c { + case '"': + html = htmlQuot + case '\'': + html = htmlApos + case '&': + html = htmlAmp + case '<': + html = htmlLt + case '>': + html = htmlGt + default: + continue + } + w.Write(b[last:i]) + w.Write(html) + last = i + 1 + } + w.Write(b[last:]) +} + +// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. +func HTMLEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexAny(s, `'"&<>`) < 0 { + return s + } + var b bytes.Buffer + HTMLEscape(&b, []byte(s)) + return b.String() +} + +// HTMLEscaper returns the escaped HTML equivalent of the textual +// representation of its arguments. +func HTMLEscaper(args ...interface{}) string { + return HTMLEscapeString(evalArgs(args)) +} + +// JavaScript escaping. + +var ( + jsLowUni = []byte(`\u00`) + hex = []byte("0123456789ABCDEF") + + jsBackslash = []byte(`\\`) + jsApos = []byte(`\'`) + jsQuot = []byte(`\"`) + jsLt = []byte(`\x3C`) + jsGt = []byte(`\x3E`) +) + +// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. +func JSEscape(w io.Writer, b []byte) { + last := 0 + for i := 0; i < len(b); i++ { + c := b[i] + + if !jsIsSpecial(rune(c)) { + // fast path: nothing to do + continue + } + w.Write(b[last:i]) + + if c < utf8.RuneSelf { + // Quotes, slashes and angle brackets get quoted. + // Control characters get written as \u00XX. + switch c { + case '\\': + w.Write(jsBackslash) + case '\'': + w.Write(jsApos) + case '"': + w.Write(jsQuot) + case '<': + w.Write(jsLt) + case '>': + w.Write(jsGt) + default: + w.Write(jsLowUni) + t, b := c>>4, c&0x0f + w.Write(hex[t : t+1]) + w.Write(hex[b : b+1]) + } + } else { + // Unicode rune. + r, size := utf8.DecodeRune(b[i:]) + if unicode.IsPrint(r) { + w.Write(b[i : i+size]) + } else { + fmt.Fprintf(w, "\\u%04X", r) + } + i += size - 1 + } + last = i + 1 + } + w.Write(b[last:]) +} + +// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. +func JSEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexFunc(s, jsIsSpecial) < 0 { + return s + } + var b bytes.Buffer + JSEscape(&b, []byte(s)) + return b.String() +} + +func jsIsSpecial(r rune) bool { + switch r { + case '\\', '\'', '"', '<', '>': + return true + } + return r < ' ' || utf8.RuneSelf <= r +} + +// JSEscaper returns the escaped JavaScript equivalent of the textual +// representation of its arguments. +func JSEscaper(args ...interface{}) string { + return JSEscapeString(evalArgs(args)) +} + +// URLQueryEscaper returns the escaped value of the textual representation of +// its arguments in a form suitable for embedding in a URL query. +func URLQueryEscaper(args ...interface{}) string { + return url.QueryEscape(evalArgs(args)) +} + +// evalArgs formats the list of arguments into a string. It is therefore equivalent to +// fmt.Sprint(args...) +// except that each argument is indirected (if a pointer), as required, +// using the same rules as the default string evaluation during template +// execution. +func evalArgs(args []interface{}) string { + ok := false + var s string + // Fast path for simple common case. + if len(args) == 1 { + s, ok = args[0].(string) + } + if !ok { + for i, arg := range args { + a, ok := printableValue(reflect.ValueOf(arg)) + if ok { + args[i] = a + } // else left fmt do its thing + } + s = fmt.Sprint(args...) + } + return s +} diff --git a/vendor/github.com/alecthomas/template/helper.go b/vendor/github.com/alecthomas/template/helper.go new file mode 100644 index 00000000..3636fb54 --- /dev/null +++ b/vendor/github.com/alecthomas/template/helper.go @@ -0,0 +1,108 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Helper functions to make constructing templates easier. + +package template + +import ( + "fmt" + "io/ioutil" + "path/filepath" +) + +// Functions and methods to parse templates. + +// Must is a helper that wraps a call to a function returning (*Template, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// var t = template.Must(template.New("name").Parse("text")) +func Must(t *Template, err error) *Template { + if err != nil { + panic(err) + } + return t +} + +// ParseFiles creates a new Template and parses the template definitions from +// the named files. The returned template's name will have the (base) name and +// (parsed) contents of the first file. There must be at least one file. +// If an error occurs, parsing stops and the returned *Template is nil. +func ParseFiles(filenames ...string) (*Template, error) { + return parseFiles(nil, filenames...) +} + +// ParseFiles parses the named files and associates the resulting templates with +// t. If an error occurs, parsing stops and the returned template is nil; +// otherwise it is t. There must be at least one file. +func (t *Template) ParseFiles(filenames ...string) (*Template, error) { + return parseFiles(t, filenames...) +} + +// parseFiles is the helper for the method and function. If the argument +// template is nil, it is created from the first file. +func parseFiles(t *Template, filenames ...string) (*Template, error) { + if len(filenames) == 0 { + // Not really a problem, but be consistent. + return nil, fmt.Errorf("template: no files named in call to ParseFiles") + } + for _, filename := range filenames { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + s := string(b) + name := filepath.Base(filename) + // First template becomes return value if not already defined, + // and we use that one for subsequent New calls to associate + // all the templates together. Also, if this file has the same name + // as t, this file becomes the contents of t, so + // t, err := New(name).Funcs(xxx).ParseFiles(name) + // works. Otherwise we create a new template associated with t. + var tmpl *Template + if t == nil { + t = New(name) + } + if name == t.Name() { + tmpl = t + } else { + tmpl = t.New(name) + } + _, err = tmpl.Parse(s) + if err != nil { + return nil, err + } + } + return t, nil +} + +// ParseGlob creates a new Template and parses the template definitions from the +// files identified by the pattern, which must match at least one file. The +// returned template will have the (base) name and (parsed) contents of the +// first file matched by the pattern. ParseGlob is equivalent to calling +// ParseFiles with the list of files matched by the pattern. +func ParseGlob(pattern string) (*Template, error) { + return parseGlob(nil, pattern) +} + +// ParseGlob parses the template definitions in the files identified by the +// pattern and associates the resulting templates with t. The pattern is +// processed by filepath.Glob and must match at least one file. ParseGlob is +// equivalent to calling t.ParseFiles with the list of files matched by the +// pattern. +func (t *Template) ParseGlob(pattern string) (*Template, error) { + return parseGlob(t, pattern) +} + +// parseGlob is the implementation of the function and method ParseGlob. +func parseGlob(t *Template, pattern string) (*Template, error) { + filenames, err := filepath.Glob(pattern) + if err != nil { + return nil, err + } + if len(filenames) == 0 { + return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) + } + return parseFiles(t, filenames...) +} diff --git a/vendor/github.com/alecthomas/template/parse/lex.go b/vendor/github.com/alecthomas/template/parse/lex.go new file mode 100644 index 00000000..55f1c051 --- /dev/null +++ b/vendor/github.com/alecthomas/template/parse/lex.go @@ -0,0 +1,556 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package parse + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +// item represents a token or text string returned from the scanner. +type item struct { + typ itemType // The type of this item. + pos Pos // The starting position, in bytes, of this item in the input string. + val string // The value of this item. +} + +func (i item) String() string { + switch { + case i.typ == itemEOF: + return "EOF" + case i.typ == itemError: + return i.val + case i.typ > itemKeyword: + return fmt.Sprintf("<%s>", i.val) + case len(i.val) > 10: + return fmt.Sprintf("%.10q...", i.val) + } + return fmt.Sprintf("%q", i.val) +} + +// itemType identifies the type of lex items. +type itemType int + +const ( + itemError itemType = iota // error occurred; value is text of error + itemBool // boolean constant + itemChar // printable ASCII character; grab bag for comma etc. + itemCharConstant // character constant + itemComplex // complex constant (1+2i); imaginary is just a number + itemColonEquals // colon-equals (':=') introducing a declaration + itemEOF + itemField // alphanumeric identifier starting with '.' + itemIdentifier // alphanumeric identifier not starting with '.' + itemLeftDelim // left action delimiter + itemLeftParen // '(' inside action + itemNumber // simple number, including imaginary + itemPipe // pipe symbol + itemRawString // raw quoted string (includes quotes) + itemRightDelim // right action delimiter + itemElideNewline // elide newline after right delim + itemRightParen // ')' inside action + itemSpace // run of spaces separating arguments + itemString // quoted string (includes quotes) + itemText // plain text + itemVariable // variable starting with '$', such as '$' or '$1' or '$hello' + // Keywords appear after all the rest. + itemKeyword // used only to delimit the keywords + itemDot // the cursor, spelled '.' + itemDefine // define keyword + itemElse // else keyword + itemEnd // end keyword + itemIf // if keyword + itemNil // the untyped nil constant, easiest to treat as a keyword + itemRange // range keyword + itemTemplate // template keyword + itemWith // with keyword +) + +var key = map[string]itemType{ + ".": itemDot, + "define": itemDefine, + "else": itemElse, + "end": itemEnd, + "if": itemIf, + "range": itemRange, + "nil": itemNil, + "template": itemTemplate, + "with": itemWith, +} + +const eof = -1 + +// stateFn represents the state of the scanner as a function that returns the next state. +type stateFn func(*lexer) stateFn + +// lexer holds the state of the scanner. +type lexer struct { + name string // the name of the input; used only for error reports + input string // the string being scanned + leftDelim string // start of action + rightDelim string // end of action + state stateFn // the next lexing function to enter + pos Pos // current position in the input + start Pos // start position of this item + width Pos // width of last rune read from input + lastPos Pos // position of most recent item returned by nextItem + items chan item // channel of scanned items + parenDepth int // nesting depth of ( ) exprs +} + +// next returns the next rune in the input. +func (l *lexer) next() rune { + if int(l.pos) >= len(l.input) { + l.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(l.input[l.pos:]) + l.width = Pos(w) + l.pos += l.width + return r +} + +// peek returns but does not consume the next rune in the input. +func (l *lexer) peek() rune { + r := l.next() + l.backup() + return r +} + +// backup steps back one rune. Can only be called once per call of next. +func (l *lexer) backup() { + l.pos -= l.width +} + +// emit passes an item back to the client. +func (l *lexer) emit(t itemType) { + l.items <- item{t, l.start, l.input[l.start:l.pos]} + l.start = l.pos +} + +// ignore skips over the pending input before this point. +func (l *lexer) ignore() { + l.start = l.pos +} + +// accept consumes the next rune if it's from the valid set. +func (l *lexer) accept(valid string) bool { + if strings.IndexRune(valid, l.next()) >= 0 { + return true + } + l.backup() + return false +} + +// acceptRun consumes a run of runes from the valid set. +func (l *lexer) acceptRun(valid string) { + for strings.IndexRune(valid, l.next()) >= 0 { + } + l.backup() +} + +// lineNumber reports which line we're on, based on the position of +// the previous item returned by nextItem. Doing it this way +// means we don't have to worry about peek double counting. +func (l *lexer) lineNumber() int { + return 1 + strings.Count(l.input[:l.lastPos], "\n") +} + +// errorf returns an error token and terminates the scan by passing +// back a nil pointer that will be the next state, terminating l.nextItem. +func (l *lexer) errorf(format string, args ...interface{}) stateFn { + l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} + return nil +} + +// nextItem returns the next item from the input. +func (l *lexer) nextItem() item { + item := <-l.items + l.lastPos = item.pos + return item +} + +// lex creates a new scanner for the input string. +func lex(name, input, left, right string) *lexer { + if left == "" { + left = leftDelim + } + if right == "" { + right = rightDelim + } + l := &lexer{ + name: name, + input: input, + leftDelim: left, + rightDelim: right, + items: make(chan item), + } + go l.run() + return l +} + +// run runs the state machine for the lexer. +func (l *lexer) run() { + for l.state = lexText; l.state != nil; { + l.state = l.state(l) + } +} + +// state functions + +const ( + leftDelim = "{{" + rightDelim = "}}" + leftComment = "/*" + rightComment = "*/" +) + +// lexText scans until an opening action delimiter, "{{". +func lexText(l *lexer) stateFn { + for { + if strings.HasPrefix(l.input[l.pos:], l.leftDelim) { + if l.pos > l.start { + l.emit(itemText) + } + return lexLeftDelim + } + if l.next() == eof { + break + } + } + // Correctly reached EOF. + if l.pos > l.start { + l.emit(itemText) + } + l.emit(itemEOF) + return nil +} + +// lexLeftDelim scans the left delimiter, which is known to be present. +func lexLeftDelim(l *lexer) stateFn { + l.pos += Pos(len(l.leftDelim)) + if strings.HasPrefix(l.input[l.pos:], leftComment) { + return lexComment + } + l.emit(itemLeftDelim) + l.parenDepth = 0 + return lexInsideAction +} + +// lexComment scans a comment. The left comment marker is known to be present. +func lexComment(l *lexer) stateFn { + l.pos += Pos(len(leftComment)) + i := strings.Index(l.input[l.pos:], rightComment) + if i < 0 { + return l.errorf("unclosed comment") + } + l.pos += Pos(i + len(rightComment)) + if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) { + return l.errorf("comment ends before closing delimiter") + + } + l.pos += Pos(len(l.rightDelim)) + l.ignore() + return lexText +} + +// lexRightDelim scans the right delimiter, which is known to be present. +func lexRightDelim(l *lexer) stateFn { + l.pos += Pos(len(l.rightDelim)) + l.emit(itemRightDelim) + if l.peek() == '\\' { + l.pos++ + l.emit(itemElideNewline) + } + return lexText +} + +// lexInsideAction scans the elements inside action delimiters. +func lexInsideAction(l *lexer) stateFn { + // Either number, quoted string, or identifier. + // Spaces separate arguments; runs of spaces turn into itemSpace. + // Pipe symbols separate and are emitted. + if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) { + if l.parenDepth == 0 { + return lexRightDelim + } + return l.errorf("unclosed left paren") + } + switch r := l.next(); { + case r == eof || isEndOfLine(r): + return l.errorf("unclosed action") + case isSpace(r): + return lexSpace + case r == ':': + if l.next() != '=' { + return l.errorf("expected :=") + } + l.emit(itemColonEquals) + case r == '|': + l.emit(itemPipe) + case r == '"': + return lexQuote + case r == '`': + return lexRawQuote + case r == '$': + return lexVariable + case r == '\'': + return lexChar + case r == '.': + // special look-ahead for ".field" so we don't break l.backup(). + if l.pos < Pos(len(l.input)) { + r := l.input[l.pos] + if r < '0' || '9' < r { + return lexField + } + } + fallthrough // '.' can start a number. + case r == '+' || r == '-' || ('0' <= r && r <= '9'): + l.backup() + return lexNumber + case isAlphaNumeric(r): + l.backup() + return lexIdentifier + case r == '(': + l.emit(itemLeftParen) + l.parenDepth++ + return lexInsideAction + case r == ')': + l.emit(itemRightParen) + l.parenDepth-- + if l.parenDepth < 0 { + return l.errorf("unexpected right paren %#U", r) + } + return lexInsideAction + case r <= unicode.MaxASCII && unicode.IsPrint(r): + l.emit(itemChar) + return lexInsideAction + default: + return l.errorf("unrecognized character in action: %#U", r) + } + return lexInsideAction +} + +// lexSpace scans a run of space characters. +// One space has already been seen. +func lexSpace(l *lexer) stateFn { + for isSpace(l.peek()) { + l.next() + } + l.emit(itemSpace) + return lexInsideAction +} + +// lexIdentifier scans an alphanumeric. +func lexIdentifier(l *lexer) stateFn { +Loop: + for { + switch r := l.next(); { + case isAlphaNumeric(r): + // absorb. + default: + l.backup() + word := l.input[l.start:l.pos] + if !l.atTerminator() { + return l.errorf("bad character %#U", r) + } + switch { + case key[word] > itemKeyword: + l.emit(key[word]) + case word[0] == '.': + l.emit(itemField) + case word == "true", word == "false": + l.emit(itemBool) + default: + l.emit(itemIdentifier) + } + break Loop + } + } + return lexInsideAction +} + +// lexField scans a field: .Alphanumeric. +// The . has been scanned. +func lexField(l *lexer) stateFn { + return lexFieldOrVariable(l, itemField) +} + +// lexVariable scans a Variable: $Alphanumeric. +// The $ has been scanned. +func lexVariable(l *lexer) stateFn { + if l.atTerminator() { // Nothing interesting follows -> "$". + l.emit(itemVariable) + return lexInsideAction + } + return lexFieldOrVariable(l, itemVariable) +} + +// lexVariable scans a field or variable: [.$]Alphanumeric. +// The . or $ has been scanned. +func lexFieldOrVariable(l *lexer, typ itemType) stateFn { + if l.atTerminator() { // Nothing interesting follows -> "." or "$". + if typ == itemVariable { + l.emit(itemVariable) + } else { + l.emit(itemDot) + } + return lexInsideAction + } + var r rune + for { + r = l.next() + if !isAlphaNumeric(r) { + l.backup() + break + } + } + if !l.atTerminator() { + return l.errorf("bad character %#U", r) + } + l.emit(typ) + return lexInsideAction +} + +// atTerminator reports whether the input is at valid termination character to +// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases +// like "$x+2" not being acceptable without a space, in case we decide one +// day to implement arithmetic. +func (l *lexer) atTerminator() bool { + r := l.peek() + if isSpace(r) || isEndOfLine(r) { + return true + } + switch r { + case eof, '.', ',', '|', ':', ')', '(': + return true + } + // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will + // succeed but should fail) but only in extremely rare cases caused by willfully + // bad choice of delimiter. + if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r { + return true + } + return false +} + +// lexChar scans a character constant. The initial quote is already +// scanned. Syntax checking is done by the parser. +func lexChar(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != eof && r != '\n' { + break + } + fallthrough + case eof, '\n': + return l.errorf("unterminated character constant") + case '\'': + break Loop + } + } + l.emit(itemCharConstant) + return lexInsideAction +} + +// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This +// isn't a perfect number scanner - for instance it accepts "." and "0x0.2" +// and "089" - but when it's wrong the input is invalid and the parser (via +// strconv) will notice. +func lexNumber(l *lexer) stateFn { + if !l.scanNumber() { + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + if sign := l.peek(); sign == '+' || sign == '-' { + // Complex: 1+2i. No spaces, must end in 'i'. + if !l.scanNumber() || l.input[l.pos-1] != 'i' { + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + l.emit(itemComplex) + } else { + l.emit(itemNumber) + } + return lexInsideAction +} + +func (l *lexer) scanNumber() bool { + // Optional leading sign. + l.accept("+-") + // Is it hex? + digits := "0123456789" + if l.accept("0") && l.accept("xX") { + digits = "0123456789abcdefABCDEF" + } + l.acceptRun(digits) + if l.accept(".") { + l.acceptRun(digits) + } + if l.accept("eE") { + l.accept("+-") + l.acceptRun("0123456789") + } + // Is it imaginary? + l.accept("i") + // Next thing mustn't be alphanumeric. + if isAlphaNumeric(l.peek()) { + l.next() + return false + } + return true +} + +// lexQuote scans a quoted string. +func lexQuote(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != eof && r != '\n' { + break + } + fallthrough + case eof, '\n': + return l.errorf("unterminated quoted string") + case '"': + break Loop + } + } + l.emit(itemString) + return lexInsideAction +} + +// lexRawQuote scans a raw quoted string. +func lexRawQuote(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case eof, '\n': + return l.errorf("unterminated raw quoted string") + case '`': + break Loop + } + } + l.emit(itemRawString) + return lexInsideAction +} + +// isSpace reports whether r is a space character. +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +// isEndOfLine reports whether r is an end-of-line character. +func isEndOfLine(r rune) bool { + return r == '\r' || r == '\n' +} + +// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. +func isAlphaNumeric(r rune) bool { + return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) +} diff --git a/vendor/github.com/alecthomas/template/parse/node.go b/vendor/github.com/alecthomas/template/parse/node.go new file mode 100644 index 00000000..55c37f6d --- /dev/null +++ b/vendor/github.com/alecthomas/template/parse/node.go @@ -0,0 +1,834 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Parse nodes. + +package parse + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +var textFormat = "%s" // Changed to "%q" in tests for better error messages. + +// A Node is an element in the parse tree. The interface is trivial. +// The interface contains an unexported method so that only +// types local to this package can satisfy it. +type Node interface { + Type() NodeType + String() string + // Copy does a deep copy of the Node and all its components. + // To avoid type assertions, some XxxNodes also have specialized + // CopyXxx methods that return *XxxNode. + Copy() Node + Position() Pos // byte position of start of node in full original input string + // tree returns the containing *Tree. + // It is unexported so all implementations of Node are in this package. + tree() *Tree +} + +// NodeType identifies the type of a parse tree node. +type NodeType int + +// Pos represents a byte position in the original input text from which +// this template was parsed. +type Pos int + +func (p Pos) Position() Pos { + return p +} + +// Type returns itself and provides an easy default implementation +// for embedding in a Node. Embedded in all non-trivial Nodes. +func (t NodeType) Type() NodeType { + return t +} + +const ( + NodeText NodeType = iota // Plain text. + NodeAction // A non-control action such as a field evaluation. + NodeBool // A boolean constant. + NodeChain // A sequence of field accesses. + NodeCommand // An element of a pipeline. + NodeDot // The cursor, dot. + nodeElse // An else action. Not added to tree. + nodeEnd // An end action. Not added to tree. + NodeField // A field or method name. + NodeIdentifier // An identifier; always a function name. + NodeIf // An if action. + NodeList // A list of Nodes. + NodeNil // An untyped nil constant. + NodeNumber // A numerical constant. + NodePipe // A pipeline of commands. + NodeRange // A range action. + NodeString // A string constant. + NodeTemplate // A template invocation action. + NodeVariable // A $ variable. + NodeWith // A with action. +) + +// Nodes. + +// ListNode holds a sequence of nodes. +type ListNode struct { + NodeType + Pos + tr *Tree + Nodes []Node // The element nodes in lexical order. +} + +func (t *Tree) newList(pos Pos) *ListNode { + return &ListNode{tr: t, NodeType: NodeList, Pos: pos} +} + +func (l *ListNode) append(n Node) { + l.Nodes = append(l.Nodes, n) +} + +func (l *ListNode) tree() *Tree { + return l.tr +} + +func (l *ListNode) String() string { + b := new(bytes.Buffer) + for _, n := range l.Nodes { + fmt.Fprint(b, n) + } + return b.String() +} + +func (l *ListNode) CopyList() *ListNode { + if l == nil { + return l + } + n := l.tr.newList(l.Pos) + for _, elem := range l.Nodes { + n.append(elem.Copy()) + } + return n +} + +func (l *ListNode) Copy() Node { + return l.CopyList() +} + +// TextNode holds plain text. +type TextNode struct { + NodeType + Pos + tr *Tree + Text []byte // The text; may span newlines. +} + +func (t *Tree) newText(pos Pos, text string) *TextNode { + return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)} +} + +func (t *TextNode) String() string { + return fmt.Sprintf(textFormat, t.Text) +} + +func (t *TextNode) tree() *Tree { + return t.tr +} + +func (t *TextNode) Copy() Node { + return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)} +} + +// PipeNode holds a pipeline with optional declaration +type PipeNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Decl []*VariableNode // Variable declarations in lexical order. + Cmds []*CommandNode // The commands in lexical order. +} + +func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode { + return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl} +} + +func (p *PipeNode) append(command *CommandNode) { + p.Cmds = append(p.Cmds, command) +} + +func (p *PipeNode) String() string { + s := "" + if len(p.Decl) > 0 { + for i, v := range p.Decl { + if i > 0 { + s += ", " + } + s += v.String() + } + s += " := " + } + for i, c := range p.Cmds { + if i > 0 { + s += " | " + } + s += c.String() + } + return s +} + +func (p *PipeNode) tree() *Tree { + return p.tr +} + +func (p *PipeNode) CopyPipe() *PipeNode { + if p == nil { + return p + } + var decl []*VariableNode + for _, d := range p.Decl { + decl = append(decl, d.Copy().(*VariableNode)) + } + n := p.tr.newPipeline(p.Pos, p.Line, decl) + for _, c := range p.Cmds { + n.append(c.Copy().(*CommandNode)) + } + return n +} + +func (p *PipeNode) Copy() Node { + return p.CopyPipe() +} + +// ActionNode holds an action (something bounded by delimiters). +// Control actions have their own nodes; ActionNode represents simple +// ones such as field evaluations and parenthesized pipelines. +type ActionNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Pipe *PipeNode // The pipeline in the action. +} + +func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode { + return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe} +} + +func (a *ActionNode) String() string { + return fmt.Sprintf("{{%s}}", a.Pipe) + +} + +func (a *ActionNode) tree() *Tree { + return a.tr +} + +func (a *ActionNode) Copy() Node { + return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe()) + +} + +// CommandNode holds a command (a pipeline inside an evaluating action). +type CommandNode struct { + NodeType + Pos + tr *Tree + Args []Node // Arguments in lexical order: Identifier, field, or constant. +} + +func (t *Tree) newCommand(pos Pos) *CommandNode { + return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos} +} + +func (c *CommandNode) append(arg Node) { + c.Args = append(c.Args, arg) +} + +func (c *CommandNode) String() string { + s := "" + for i, arg := range c.Args { + if i > 0 { + s += " " + } + if arg, ok := arg.(*PipeNode); ok { + s += "(" + arg.String() + ")" + continue + } + s += arg.String() + } + return s +} + +func (c *CommandNode) tree() *Tree { + return c.tr +} + +func (c *CommandNode) Copy() Node { + if c == nil { + return c + } + n := c.tr.newCommand(c.Pos) + for _, c := range c.Args { + n.append(c.Copy()) + } + return n +} + +// IdentifierNode holds an identifier. +type IdentifierNode struct { + NodeType + Pos + tr *Tree + Ident string // The identifier's name. +} + +// NewIdentifier returns a new IdentifierNode with the given identifier name. +func NewIdentifier(ident string) *IdentifierNode { + return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident} +} + +// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature. +// Chained for convenience. +// TODO: fix one day? +func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode { + i.Pos = pos + return i +} + +// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature. +// Chained for convenience. +// TODO: fix one day? +func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode { + i.tr = t + return i +} + +func (i *IdentifierNode) String() string { + return i.Ident +} + +func (i *IdentifierNode) tree() *Tree { + return i.tr +} + +func (i *IdentifierNode) Copy() Node { + return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos) +} + +// VariableNode holds a list of variable names, possibly with chained field +// accesses. The dollar sign is part of the (first) name. +type VariableNode struct { + NodeType + Pos + tr *Tree + Ident []string // Variable name and fields in lexical order. +} + +func (t *Tree) newVariable(pos Pos, ident string) *VariableNode { + return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")} +} + +func (v *VariableNode) String() string { + s := "" + for i, id := range v.Ident { + if i > 0 { + s += "." + } + s += id + } + return s +} + +func (v *VariableNode) tree() *Tree { + return v.tr +} + +func (v *VariableNode) Copy() Node { + return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)} +} + +// DotNode holds the special identifier '.'. +type DotNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newDot(pos Pos) *DotNode { + return &DotNode{tr: t, NodeType: NodeDot, Pos: pos} +} + +func (d *DotNode) Type() NodeType { + // Override method on embedded NodeType for API compatibility. + // TODO: Not really a problem; could change API without effect but + // api tool complains. + return NodeDot +} + +func (d *DotNode) String() string { + return "." +} + +func (d *DotNode) tree() *Tree { + return d.tr +} + +func (d *DotNode) Copy() Node { + return d.tr.newDot(d.Pos) +} + +// NilNode holds the special identifier 'nil' representing an untyped nil constant. +type NilNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newNil(pos Pos) *NilNode { + return &NilNode{tr: t, NodeType: NodeNil, Pos: pos} +} + +func (n *NilNode) Type() NodeType { + // Override method on embedded NodeType for API compatibility. + // TODO: Not really a problem; could change API without effect but + // api tool complains. + return NodeNil +} + +func (n *NilNode) String() string { + return "nil" +} + +func (n *NilNode) tree() *Tree { + return n.tr +} + +func (n *NilNode) Copy() Node { + return n.tr.newNil(n.Pos) +} + +// FieldNode holds a field (identifier starting with '.'). +// The names may be chained ('.x.y'). +// The period is dropped from each ident. +type FieldNode struct { + NodeType + Pos + tr *Tree + Ident []string // The identifiers in lexical order. +} + +func (t *Tree) newField(pos Pos, ident string) *FieldNode { + return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period +} + +func (f *FieldNode) String() string { + s := "" + for _, id := range f.Ident { + s += "." + id + } + return s +} + +func (f *FieldNode) tree() *Tree { + return f.tr +} + +func (f *FieldNode) Copy() Node { + return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)} +} + +// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.'). +// The names may be chained ('.x.y'). +// The periods are dropped from each ident. +type ChainNode struct { + NodeType + Pos + tr *Tree + Node Node + Field []string // The identifiers in lexical order. +} + +func (t *Tree) newChain(pos Pos, node Node) *ChainNode { + return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node} +} + +// Add adds the named field (which should start with a period) to the end of the chain. +func (c *ChainNode) Add(field string) { + if len(field) == 0 || field[0] != '.' { + panic("no dot in field") + } + field = field[1:] // Remove leading dot. + if field == "" { + panic("empty field") + } + c.Field = append(c.Field, field) +} + +func (c *ChainNode) String() string { + s := c.Node.String() + if _, ok := c.Node.(*PipeNode); ok { + s = "(" + s + ")" + } + for _, field := range c.Field { + s += "." + field + } + return s +} + +func (c *ChainNode) tree() *Tree { + return c.tr +} + +func (c *ChainNode) Copy() Node { + return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)} +} + +// BoolNode holds a boolean constant. +type BoolNode struct { + NodeType + Pos + tr *Tree + True bool // The value of the boolean constant. +} + +func (t *Tree) newBool(pos Pos, true bool) *BoolNode { + return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true} +} + +func (b *BoolNode) String() string { + if b.True { + return "true" + } + return "false" +} + +func (b *BoolNode) tree() *Tree { + return b.tr +} + +func (b *BoolNode) Copy() Node { + return b.tr.newBool(b.Pos, b.True) +} + +// NumberNode holds a number: signed or unsigned integer, float, or complex. +// The value is parsed and stored under all the types that can represent the value. +// This simulates in a small amount of code the behavior of Go's ideal constants. +type NumberNode struct { + NodeType + Pos + tr *Tree + IsInt bool // Number has an integral value. + IsUint bool // Number has an unsigned integral value. + IsFloat bool // Number has a floating-point value. + IsComplex bool // Number is complex. + Int64 int64 // The signed integer value. + Uint64 uint64 // The unsigned integer value. + Float64 float64 // The floating-point value. + Complex128 complex128 // The complex value. + Text string // The original textual representation from the input. +} + +func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) { + n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text} + switch typ { + case itemCharConstant: + rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0]) + if err != nil { + return nil, err + } + if tail != "'" { + return nil, fmt.Errorf("malformed character constant: %s", text) + } + n.Int64 = int64(rune) + n.IsInt = true + n.Uint64 = uint64(rune) + n.IsUint = true + n.Float64 = float64(rune) // odd but those are the rules. + n.IsFloat = true + return n, nil + case itemComplex: + // fmt.Sscan can parse the pair, so let it do the work. + if _, err := fmt.Sscan(text, &n.Complex128); err != nil { + return nil, err + } + n.IsComplex = true + n.simplifyComplex() + return n, nil + } + // Imaginary constants can only be complex unless they are zero. + if len(text) > 0 && text[len(text)-1] == 'i' { + f, err := strconv.ParseFloat(text[:len(text)-1], 64) + if err == nil { + n.IsComplex = true + n.Complex128 = complex(0, f) + n.simplifyComplex() + return n, nil + } + } + // Do integer test first so we get 0x123 etc. + u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below. + if err == nil { + n.IsUint = true + n.Uint64 = u + } + i, err := strconv.ParseInt(text, 0, 64) + if err == nil { + n.IsInt = true + n.Int64 = i + if i == 0 { + n.IsUint = true // in case of -0. + n.Uint64 = u + } + } + // If an integer extraction succeeded, promote the float. + if n.IsInt { + n.IsFloat = true + n.Float64 = float64(n.Int64) + } else if n.IsUint { + n.IsFloat = true + n.Float64 = float64(n.Uint64) + } else { + f, err := strconv.ParseFloat(text, 64) + if err == nil { + n.IsFloat = true + n.Float64 = f + // If a floating-point extraction succeeded, extract the int if needed. + if !n.IsInt && float64(int64(f)) == f { + n.IsInt = true + n.Int64 = int64(f) + } + if !n.IsUint && float64(uint64(f)) == f { + n.IsUint = true + n.Uint64 = uint64(f) + } + } + } + if !n.IsInt && !n.IsUint && !n.IsFloat { + return nil, fmt.Errorf("illegal number syntax: %q", text) + } + return n, nil +} + +// simplifyComplex pulls out any other types that are represented by the complex number. +// These all require that the imaginary part be zero. +func (n *NumberNode) simplifyComplex() { + n.IsFloat = imag(n.Complex128) == 0 + if n.IsFloat { + n.Float64 = real(n.Complex128) + n.IsInt = float64(int64(n.Float64)) == n.Float64 + if n.IsInt { + n.Int64 = int64(n.Float64) + } + n.IsUint = float64(uint64(n.Float64)) == n.Float64 + if n.IsUint { + n.Uint64 = uint64(n.Float64) + } + } +} + +func (n *NumberNode) String() string { + return n.Text +} + +func (n *NumberNode) tree() *Tree { + return n.tr +} + +func (n *NumberNode) Copy() Node { + nn := new(NumberNode) + *nn = *n // Easy, fast, correct. + return nn +} + +// StringNode holds a string constant. The value has been "unquoted". +type StringNode struct { + NodeType + Pos + tr *Tree + Quoted string // The original text of the string, with quotes. + Text string // The string, after quote processing. +} + +func (t *Tree) newString(pos Pos, orig, text string) *StringNode { + return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text} +} + +func (s *StringNode) String() string { + return s.Quoted +} + +func (s *StringNode) tree() *Tree { + return s.tr +} + +func (s *StringNode) Copy() Node { + return s.tr.newString(s.Pos, s.Quoted, s.Text) +} + +// endNode represents an {{end}} action. +// It does not appear in the final parse tree. +type endNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newEnd(pos Pos) *endNode { + return &endNode{tr: t, NodeType: nodeEnd, Pos: pos} +} + +func (e *endNode) String() string { + return "{{end}}" +} + +func (e *endNode) tree() *Tree { + return e.tr +} + +func (e *endNode) Copy() Node { + return e.tr.newEnd(e.Pos) +} + +// elseNode represents an {{else}} action. Does not appear in the final tree. +type elseNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) +} + +func (t *Tree) newElse(pos Pos, line int) *elseNode { + return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line} +} + +func (e *elseNode) Type() NodeType { + return nodeElse +} + +func (e *elseNode) String() string { + return "{{else}}" +} + +func (e *elseNode) tree() *Tree { + return e.tr +} + +func (e *elseNode) Copy() Node { + return e.tr.newElse(e.Pos, e.Line) +} + +// BranchNode is the common representation of if, range, and with. +type BranchNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Pipe *PipeNode // The pipeline to be evaluated. + List *ListNode // What to execute if the value is non-empty. + ElseList *ListNode // What to execute if the value is empty (nil if absent). +} + +func (b *BranchNode) String() string { + name := "" + switch b.NodeType { + case NodeIf: + name = "if" + case NodeRange: + name = "range" + case NodeWith: + name = "with" + default: + panic("unknown branch type") + } + if b.ElseList != nil { + return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList) + } + return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List) +} + +func (b *BranchNode) tree() *Tree { + return b.tr +} + +func (b *BranchNode) Copy() Node { + switch b.NodeType { + case NodeIf: + return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + case NodeRange: + return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + case NodeWith: + return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + default: + panic("unknown branch type") + } +} + +// IfNode represents an {{if}} action and its commands. +type IfNode struct { + BranchNode +} + +func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode { + return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (i *IfNode) Copy() Node { + return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList()) +} + +// RangeNode represents a {{range}} action and its commands. +type RangeNode struct { + BranchNode +} + +func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode { + return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (r *RangeNode) Copy() Node { + return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList()) +} + +// WithNode represents a {{with}} action and its commands. +type WithNode struct { + BranchNode +} + +func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode { + return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (w *WithNode) Copy() Node { + return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList()) +} + +// TemplateNode represents a {{template}} action. +type TemplateNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Name string // The name of the template (unquoted). + Pipe *PipeNode // The command to evaluate as dot for the template. +} + +func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode { + return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe} +} + +func (t *TemplateNode) String() string { + if t.Pipe == nil { + return fmt.Sprintf("{{template %q}}", t.Name) + } + return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe) +} + +func (t *TemplateNode) tree() *Tree { + return t.tr +} + +func (t *TemplateNode) Copy() Node { + return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe()) +} diff --git a/vendor/github.com/alecthomas/template/parse/parse.go b/vendor/github.com/alecthomas/template/parse/parse.go new file mode 100644 index 00000000..0d77ade8 --- /dev/null +++ b/vendor/github.com/alecthomas/template/parse/parse.go @@ -0,0 +1,700 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package parse builds parse trees for templates as defined by text/template +// and html/template. Clients should use those packages to construct templates +// rather than this one, which provides shared internal data structures not +// intended for general use. +package parse + +import ( + "bytes" + "fmt" + "runtime" + "strconv" + "strings" +) + +// Tree is the representation of a single parsed template. +type Tree struct { + Name string // name of the template represented by the tree. + ParseName string // name of the top-level template during parsing, for error messages. + Root *ListNode // top-level root of the tree. + text string // text parsed to create the template (or its parent) + // Parsing only; cleared after parse. + funcs []map[string]interface{} + lex *lexer + token [3]item // three-token lookahead for parser. + peekCount int + vars []string // variables defined at the moment. +} + +// Copy returns a copy of the Tree. Any parsing state is discarded. +func (t *Tree) Copy() *Tree { + if t == nil { + return nil + } + return &Tree{ + Name: t.Name, + ParseName: t.ParseName, + Root: t.Root.CopyList(), + text: t.text, + } +} + +// Parse returns a map from template name to parse.Tree, created by parsing the +// templates described in the argument string. The top-level template will be +// given the specified name. If an error is encountered, parsing stops and an +// empty map is returned with the error. +func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) { + treeSet = make(map[string]*Tree) + t := New(name) + t.text = text + _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...) + return +} + +// next returns the next token. +func (t *Tree) next() item { + if t.peekCount > 0 { + t.peekCount-- + } else { + t.token[0] = t.lex.nextItem() + } + return t.token[t.peekCount] +} + +// backup backs the input stream up one token. +func (t *Tree) backup() { + t.peekCount++ +} + +// backup2 backs the input stream up two tokens. +// The zeroth token is already there. +func (t *Tree) backup2(t1 item) { + t.token[1] = t1 + t.peekCount = 2 +} + +// backup3 backs the input stream up three tokens +// The zeroth token is already there. +func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back. + t.token[1] = t1 + t.token[2] = t2 + t.peekCount = 3 +} + +// peek returns but does not consume the next token. +func (t *Tree) peek() item { + if t.peekCount > 0 { + return t.token[t.peekCount-1] + } + t.peekCount = 1 + t.token[0] = t.lex.nextItem() + return t.token[0] +} + +// nextNonSpace returns the next non-space token. +func (t *Tree) nextNonSpace() (token item) { + for { + token = t.next() + if token.typ != itemSpace { + break + } + } + return token +} + +// peekNonSpace returns but does not consume the next non-space token. +func (t *Tree) peekNonSpace() (token item) { + for { + token = t.next() + if token.typ != itemSpace { + break + } + } + t.backup() + return token +} + +// Parsing. + +// New allocates a new parse tree with the given name. +func New(name string, funcs ...map[string]interface{}) *Tree { + return &Tree{ + Name: name, + funcs: funcs, + } +} + +// ErrorContext returns a textual representation of the location of the node in the input text. +// The receiver is only used when the node does not have a pointer to the tree inside, +// which can occur in old code. +func (t *Tree) ErrorContext(n Node) (location, context string) { + pos := int(n.Position()) + tree := n.tree() + if tree == nil { + tree = t + } + text := tree.text[:pos] + byteNum := strings.LastIndex(text, "\n") + if byteNum == -1 { + byteNum = pos // On first line. + } else { + byteNum++ // After the newline. + byteNum = pos - byteNum + } + lineNum := 1 + strings.Count(text, "\n") + context = n.String() + if len(context) > 20 { + context = fmt.Sprintf("%.20s...", context) + } + return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context +} + +// errorf formats the error and terminates processing. +func (t *Tree) errorf(format string, args ...interface{}) { + t.Root = nil + format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format) + panic(fmt.Errorf(format, args...)) +} + +// error terminates processing. +func (t *Tree) error(err error) { + t.errorf("%s", err) +} + +// expect consumes the next token and guarantees it has the required type. +func (t *Tree) expect(expected itemType, context string) item { + token := t.nextNonSpace() + if token.typ != expected { + t.unexpected(token, context) + } + return token +} + +// expectOneOf consumes the next token and guarantees it has one of the required types. +func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item { + token := t.nextNonSpace() + if token.typ != expected1 && token.typ != expected2 { + t.unexpected(token, context) + } + return token +} + +// unexpected complains about the token and terminates processing. +func (t *Tree) unexpected(token item, context string) { + t.errorf("unexpected %s in %s", token, context) +} + +// recover is the handler that turns panics into returns from the top level of Parse. +func (t *Tree) recover(errp *error) { + e := recover() + if e != nil { + if _, ok := e.(runtime.Error); ok { + panic(e) + } + if t != nil { + t.stopParse() + } + *errp = e.(error) + } + return +} + +// startParse initializes the parser, using the lexer. +func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) { + t.Root = nil + t.lex = lex + t.vars = []string{"$"} + t.funcs = funcs +} + +// stopParse terminates parsing. +func (t *Tree) stopParse() { + t.lex = nil + t.vars = nil + t.funcs = nil +} + +// Parse parses the template definition string to construct a representation of +// the template for execution. If either action delimiter string is empty, the +// default ("{{" or "}}") is used. Embedded template definitions are added to +// the treeSet map. +func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) { + defer t.recover(&err) + t.ParseName = t.Name + t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim)) + t.text = text + t.parse(treeSet) + t.add(treeSet) + t.stopParse() + return t, nil +} + +// add adds tree to the treeSet. +func (t *Tree) add(treeSet map[string]*Tree) { + tree := treeSet[t.Name] + if tree == nil || IsEmptyTree(tree.Root) { + treeSet[t.Name] = t + return + } + if !IsEmptyTree(t.Root) { + t.errorf("template: multiple definition of template %q", t.Name) + } +} + +// IsEmptyTree reports whether this tree (node) is empty of everything but space. +func IsEmptyTree(n Node) bool { + switch n := n.(type) { + case nil: + return true + case *ActionNode: + case *IfNode: + case *ListNode: + for _, node := range n.Nodes { + if !IsEmptyTree(node) { + return false + } + } + return true + case *RangeNode: + case *TemplateNode: + case *TextNode: + return len(bytes.TrimSpace(n.Text)) == 0 + case *WithNode: + default: + panic("unknown node: " + n.String()) + } + return false +} + +// parse is the top-level parser for a template, essentially the same +// as itemList except it also parses {{define}} actions. +// It runs to EOF. +func (t *Tree) parse(treeSet map[string]*Tree) (next Node) { + t.Root = t.newList(t.peek().pos) + for t.peek().typ != itemEOF { + if t.peek().typ == itemLeftDelim { + delim := t.next() + if t.nextNonSpace().typ == itemDefine { + newT := New("definition") // name will be updated once we know it. + newT.text = t.text + newT.ParseName = t.ParseName + newT.startParse(t.funcs, t.lex) + newT.parseDefinition(treeSet) + continue + } + t.backup2(delim) + } + n := t.textOrAction() + if n.Type() == nodeEnd { + t.errorf("unexpected %s", n) + } + t.Root.append(n) + } + return nil +} + +// parseDefinition parses a {{define}} ... {{end}} template definition and +// installs the definition in the treeSet map. The "define" keyword has already +// been scanned. +func (t *Tree) parseDefinition(treeSet map[string]*Tree) { + const context = "define clause" + name := t.expectOneOf(itemString, itemRawString, context) + var err error + t.Name, err = strconv.Unquote(name.val) + if err != nil { + t.error(err) + } + t.expect(itemRightDelim, context) + var end Node + t.Root, end = t.itemList() + if end.Type() != nodeEnd { + t.errorf("unexpected %s in %s", end, context) + } + t.add(treeSet) + t.stopParse() +} + +// itemList: +// textOrAction* +// Terminates at {{end}} or {{else}}, returned separately. +func (t *Tree) itemList() (list *ListNode, next Node) { + list = t.newList(t.peekNonSpace().pos) + for t.peekNonSpace().typ != itemEOF { + n := t.textOrAction() + switch n.Type() { + case nodeEnd, nodeElse: + return list, n + } + list.append(n) + } + t.errorf("unexpected EOF") + return +} + +// textOrAction: +// text | action +func (t *Tree) textOrAction() Node { + switch token := t.nextNonSpace(); token.typ { + case itemElideNewline: + return t.elideNewline() + case itemText: + return t.newText(token.pos, token.val) + case itemLeftDelim: + return t.action() + default: + t.unexpected(token, "input") + } + return nil +} + +// elideNewline: +// Remove newlines trailing rightDelim if \\ is present. +func (t *Tree) elideNewline() Node { + token := t.peek() + if token.typ != itemText { + t.unexpected(token, "input") + return nil + } + + t.next() + stripped := strings.TrimLeft(token.val, "\n\r") + diff := len(token.val) - len(stripped) + if diff > 0 { + // This is a bit nasty. We mutate the token in-place to remove + // preceding newlines. + token.pos += Pos(diff) + token.val = stripped + } + return t.newText(token.pos, token.val) +} + +// Action: +// control +// command ("|" command)* +// Left delim is past. Now get actions. +// First word could be a keyword such as range. +func (t *Tree) action() (n Node) { + switch token := t.nextNonSpace(); token.typ { + case itemElse: + return t.elseControl() + case itemEnd: + return t.endControl() + case itemIf: + return t.ifControl() + case itemRange: + return t.rangeControl() + case itemTemplate: + return t.templateControl() + case itemWith: + return t.withControl() + } + t.backup() + // Do not pop variables; they persist until "end". + return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command")) +} + +// Pipeline: +// declarations? command ('|' command)* +func (t *Tree) pipeline(context string) (pipe *PipeNode) { + var decl []*VariableNode + pos := t.peekNonSpace().pos + // Are there declarations? + for { + if v := t.peekNonSpace(); v.typ == itemVariable { + t.next() + // Since space is a token, we need 3-token look-ahead here in the worst case: + // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an + // argument variable rather than a declaration. So remember the token + // adjacent to the variable so we can push it back if necessary. + tokenAfterVariable := t.peek() + if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") { + t.nextNonSpace() + variable := t.newVariable(v.pos, v.val) + decl = append(decl, variable) + t.vars = append(t.vars, v.val) + if next.typ == itemChar && next.val == "," { + if context == "range" && len(decl) < 2 { + continue + } + t.errorf("too many declarations in %s", context) + } + } else if tokenAfterVariable.typ == itemSpace { + t.backup3(v, tokenAfterVariable) + } else { + t.backup2(v) + } + } + break + } + pipe = t.newPipeline(pos, t.lex.lineNumber(), decl) + for { + switch token := t.nextNonSpace(); token.typ { + case itemRightDelim, itemRightParen: + if len(pipe.Cmds) == 0 { + t.errorf("missing value for %s", context) + } + if token.typ == itemRightParen { + t.backup() + } + return + case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier, + itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen: + t.backup() + pipe.append(t.command()) + default: + t.unexpected(token, context) + } + } +} + +func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) { + defer t.popVars(len(t.vars)) + line = t.lex.lineNumber() + pipe = t.pipeline(context) + var next Node + list, next = t.itemList() + switch next.Type() { + case nodeEnd: //done + case nodeElse: + if allowElseIf { + // Special case for "else if". If the "else" is followed immediately by an "if", + // the elseControl will have left the "if" token pending. Treat + // {{if a}}_{{else if b}}_{{end}} + // as + // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}. + // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}} + // is assumed. This technique works even for long if-else-if chains. + // TODO: Should we allow else-if in with and range? + if t.peek().typ == itemIf { + t.next() // Consume the "if" token. + elseList = t.newList(next.Position()) + elseList.append(t.ifControl()) + // Do not consume the next item - only one {{end}} required. + break + } + } + elseList, next = t.itemList() + if next.Type() != nodeEnd { + t.errorf("expected end; found %s", next) + } + } + return pipe.Position(), line, pipe, list, elseList +} + +// If: +// {{if pipeline}} itemList {{end}} +// {{if pipeline}} itemList {{else}} itemList {{end}} +// If keyword is past. +func (t *Tree) ifControl() Node { + return t.newIf(t.parseControl(true, "if")) +} + +// Range: +// {{range pipeline}} itemList {{end}} +// {{range pipeline}} itemList {{else}} itemList {{end}} +// Range keyword is past. +func (t *Tree) rangeControl() Node { + return t.newRange(t.parseControl(false, "range")) +} + +// With: +// {{with pipeline}} itemList {{end}} +// {{with pipeline}} itemList {{else}} itemList {{end}} +// If keyword is past. +func (t *Tree) withControl() Node { + return t.newWith(t.parseControl(false, "with")) +} + +// End: +// {{end}} +// End keyword is past. +func (t *Tree) endControl() Node { + return t.newEnd(t.expect(itemRightDelim, "end").pos) +} + +// Else: +// {{else}} +// Else keyword is past. +func (t *Tree) elseControl() Node { + // Special case for "else if". + peek := t.peekNonSpace() + if peek.typ == itemIf { + // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ". + return t.newElse(peek.pos, t.lex.lineNumber()) + } + return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber()) +} + +// Template: +// {{template stringValue pipeline}} +// Template keyword is past. The name must be something that can evaluate +// to a string. +func (t *Tree) templateControl() Node { + var name string + token := t.nextNonSpace() + switch token.typ { + case itemString, itemRawString: + s, err := strconv.Unquote(token.val) + if err != nil { + t.error(err) + } + name = s + default: + t.unexpected(token, "template invocation") + } + var pipe *PipeNode + if t.nextNonSpace().typ != itemRightDelim { + t.backup() + // Do not pop variables; they persist until "end". + pipe = t.pipeline("template") + } + return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe) +} + +// command: +// operand (space operand)* +// space-separated arguments up to a pipeline character or right delimiter. +// we consume the pipe character but leave the right delim to terminate the action. +func (t *Tree) command() *CommandNode { + cmd := t.newCommand(t.peekNonSpace().pos) + for { + t.peekNonSpace() // skip leading spaces. + operand := t.operand() + if operand != nil { + cmd.append(operand) + } + switch token := t.next(); token.typ { + case itemSpace: + continue + case itemError: + t.errorf("%s", token.val) + case itemRightDelim, itemRightParen: + t.backup() + case itemPipe: + default: + t.errorf("unexpected %s in operand; missing space?", token) + } + break + } + if len(cmd.Args) == 0 { + t.errorf("empty command") + } + return cmd +} + +// operand: +// term .Field* +// An operand is a space-separated component of a command, +// a term possibly followed by field accesses. +// A nil return means the next item is not an operand. +func (t *Tree) operand() Node { + node := t.term() + if node == nil { + return nil + } + if t.peek().typ == itemField { + chain := t.newChain(t.peek().pos, node) + for t.peek().typ == itemField { + chain.Add(t.next().val) + } + // Compatibility with original API: If the term is of type NodeField + // or NodeVariable, just put more fields on the original. + // Otherwise, keep the Chain node. + // TODO: Switch to Chains always when we can. + switch node.Type() { + case NodeField: + node = t.newField(chain.Position(), chain.String()) + case NodeVariable: + node = t.newVariable(chain.Position(), chain.String()) + default: + node = chain + } + } + return node +} + +// term: +// literal (number, string, nil, boolean) +// function (identifier) +// . +// .Field +// $ +// '(' pipeline ')' +// A term is a simple "expression". +// A nil return means the next item is not a term. +func (t *Tree) term() Node { + switch token := t.nextNonSpace(); token.typ { + case itemError: + t.errorf("%s", token.val) + case itemIdentifier: + if !t.hasFunction(token.val) { + t.errorf("function %q not defined", token.val) + } + return NewIdentifier(token.val).SetTree(t).SetPos(token.pos) + case itemDot: + return t.newDot(token.pos) + case itemNil: + return t.newNil(token.pos) + case itemVariable: + return t.useVar(token.pos, token.val) + case itemField: + return t.newField(token.pos, token.val) + case itemBool: + return t.newBool(token.pos, token.val == "true") + case itemCharConstant, itemComplex, itemNumber: + number, err := t.newNumber(token.pos, token.val, token.typ) + if err != nil { + t.error(err) + } + return number + case itemLeftParen: + pipe := t.pipeline("parenthesized pipeline") + if token := t.next(); token.typ != itemRightParen { + t.errorf("unclosed right paren: unexpected %s", token) + } + return pipe + case itemString, itemRawString: + s, err := strconv.Unquote(token.val) + if err != nil { + t.error(err) + } + return t.newString(token.pos, token.val, s) + } + t.backup() + return nil +} + +// hasFunction reports if a function name exists in the Tree's maps. +func (t *Tree) hasFunction(name string) bool { + for _, funcMap := range t.funcs { + if funcMap == nil { + continue + } + if funcMap[name] != nil { + return true + } + } + return false +} + +// popVars trims the variable list to the specified length +func (t *Tree) popVars(n int) { + t.vars = t.vars[:n] +} + +// useVar returns a node for a variable reference. It errors if the +// variable is not defined. +func (t *Tree) useVar(pos Pos, name string) Node { + v := t.newVariable(pos, name) + for _, varName := range t.vars { + if varName == v.Ident[0] { + return v + } + } + t.errorf("undefined variable %q", v.Ident[0]) + return nil +} diff --git a/vendor/github.com/alecthomas/template/template.go b/vendor/github.com/alecthomas/template/template.go new file mode 100644 index 00000000..447ed2ab --- /dev/null +++ b/vendor/github.com/alecthomas/template/template.go @@ -0,0 +1,218 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "fmt" + "reflect" + + "github.com/alecthomas/template/parse" +) + +// common holds the information shared by related templates. +type common struct { + tmpl map[string]*Template + // We use two maps, one for parsing and one for execution. + // This separation makes the API cleaner since it doesn't + // expose reflection to the client. + parseFuncs FuncMap + execFuncs map[string]reflect.Value +} + +// Template is the representation of a parsed template. The *parse.Tree +// field is exported only for use by html/template and should be treated +// as unexported by all other clients. +type Template struct { + name string + *parse.Tree + *common + leftDelim string + rightDelim string +} + +// New allocates a new template with the given name. +func New(name string) *Template { + return &Template{ + name: name, + } +} + +// Name returns the name of the template. +func (t *Template) Name() string { + return t.name +} + +// New allocates a new template associated with the given one and with the same +// delimiters. The association, which is transitive, allows one template to +// invoke another with a {{template}} action. +func (t *Template) New(name string) *Template { + t.init() + return &Template{ + name: name, + common: t.common, + leftDelim: t.leftDelim, + rightDelim: t.rightDelim, + } +} + +func (t *Template) init() { + if t.common == nil { + t.common = new(common) + t.tmpl = make(map[string]*Template) + t.parseFuncs = make(FuncMap) + t.execFuncs = make(map[string]reflect.Value) + } +} + +// Clone returns a duplicate of the template, including all associated +// templates. The actual representation is not copied, but the name space of +// associated templates is, so further calls to Parse in the copy will add +// templates to the copy but not to the original. Clone can be used to prepare +// common templates and use them with variant definitions for other templates +// by adding the variants after the clone is made. +func (t *Template) Clone() (*Template, error) { + nt := t.copy(nil) + nt.init() + nt.tmpl[t.name] = nt + for k, v := range t.tmpl { + if k == t.name { // Already installed. + continue + } + // The associated templates share nt's common structure. + tmpl := v.copy(nt.common) + nt.tmpl[k] = tmpl + } + for k, v := range t.parseFuncs { + nt.parseFuncs[k] = v + } + for k, v := range t.execFuncs { + nt.execFuncs[k] = v + } + return nt, nil +} + +// copy returns a shallow copy of t, with common set to the argument. +func (t *Template) copy(c *common) *Template { + nt := New(t.name) + nt.Tree = t.Tree + nt.common = c + nt.leftDelim = t.leftDelim + nt.rightDelim = t.rightDelim + return nt +} + +// AddParseTree creates a new template with the name and parse tree +// and associates it with t. +func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) { + if t.common != nil && t.tmpl[name] != nil { + return nil, fmt.Errorf("template: redefinition of template %q", name) + } + nt := t.New(name) + nt.Tree = tree + t.tmpl[name] = nt + return nt, nil +} + +// Templates returns a slice of the templates associated with t, including t +// itself. +func (t *Template) Templates() []*Template { + if t.common == nil { + return nil + } + // Return a slice so we don't expose the map. + m := make([]*Template, 0, len(t.tmpl)) + for _, v := range t.tmpl { + m = append(m, v) + } + return m +} + +// Delims sets the action delimiters to the specified strings, to be used in +// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template +// definitions will inherit the settings. An empty delimiter stands for the +// corresponding default: {{ or }}. +// The return value is the template, so calls can be chained. +func (t *Template) Delims(left, right string) *Template { + t.leftDelim = left + t.rightDelim = right + return t +} + +// Funcs adds the elements of the argument map to the template's function map. +// It panics if a value in the map is not a function with appropriate return +// type. However, it is legal to overwrite elements of the map. The return +// value is the template, so calls can be chained. +func (t *Template) Funcs(funcMap FuncMap) *Template { + t.init() + addValueFuncs(t.execFuncs, funcMap) + addFuncs(t.parseFuncs, funcMap) + return t +} + +// Lookup returns the template with the given name that is associated with t, +// or nil if there is no such template. +func (t *Template) Lookup(name string) *Template { + if t.common == nil { + return nil + } + return t.tmpl[name] +} + +// Parse parses a string into a template. Nested template definitions will be +// associated with the top-level template t. Parse may be called multiple times +// to parse definitions of templates to associate with t. It is an error if a +// resulting template is non-empty (contains content other than template +// definitions) and would replace a non-empty template with the same name. +// (In multiple calls to Parse with the same receiver template, only one call +// can contain text other than space, comments, and template definitions.) +func (t *Template) Parse(text string) (*Template, error) { + t.init() + trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins) + if err != nil { + return nil, err + } + // Add the newly parsed trees, including the one for t, into our common structure. + for name, tree := range trees { + // If the name we parsed is the name of this template, overwrite this template. + // The associate method checks it's not a redefinition. + tmpl := t + if name != t.name { + tmpl = t.New(name) + } + // Even if t == tmpl, we need to install it in the common.tmpl map. + if replace, err := t.associate(tmpl, tree); err != nil { + return nil, err + } else if replace { + tmpl.Tree = tree + } + tmpl.leftDelim = t.leftDelim + tmpl.rightDelim = t.rightDelim + } + return t, nil +} + +// associate installs the new template into the group of templates associated +// with t. It is an error to reuse a name except to overwrite an empty +// template. The two are already known to share the common structure. +// The boolean return value reports wither to store this tree as t.Tree. +func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) { + if new.common != t.common { + panic("internal error: associate not common") + } + name := new.name + if old := t.tmpl[name]; old != nil { + oldIsEmpty := parse.IsEmptyTree(old.Root) + newIsEmpty := parse.IsEmptyTree(tree.Root) + if newIsEmpty { + // Whether old is empty or not, new is empty; no reason to replace old. + return false, nil + } + if !oldIsEmpty { + return false, fmt.Errorf("template: redefinition of template %q", name) + } + } + t.tmpl[name] = new + return true, nil +} diff --git a/vendor/github.com/alecthomas/units/COPYING b/vendor/github.com/alecthomas/units/COPYING new file mode 100644 index 00000000..2993ec08 --- /dev/null +++ b/vendor/github.com/alecthomas/units/COPYING @@ -0,0 +1,19 @@ +Copyright (C) 2014 Alec Thomas + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alecthomas/units/README.md b/vendor/github.com/alecthomas/units/README.md new file mode 100644 index 00000000..bee884e3 --- /dev/null +++ b/vendor/github.com/alecthomas/units/README.md @@ -0,0 +1,11 @@ +# Units - Helpful unit multipliers and functions for Go + +The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package. + +It allows for code like this: + +```go +n, err := ParseBase2Bytes("1KB") +// n == 1024 +n = units.Mebibyte * 512 +``` diff --git a/vendor/github.com/alecthomas/units/bytes.go b/vendor/github.com/alecthomas/units/bytes.go new file mode 100644 index 00000000..eaadeb80 --- /dev/null +++ b/vendor/github.com/alecthomas/units/bytes.go @@ -0,0 +1,83 @@ +package units + +// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte, +// etc.). +type Base2Bytes int64 + +// Base-2 byte units. +const ( + Kibibyte Base2Bytes = 1024 + KiB = Kibibyte + Mebibyte = Kibibyte * 1024 + MiB = Mebibyte + Gibibyte = Mebibyte * 1024 + GiB = Gibibyte + Tebibyte = Gibibyte * 1024 + TiB = Tebibyte + Pebibyte = Tebibyte * 1024 + PiB = Pebibyte + Exbibyte = Pebibyte * 1024 + EiB = Exbibyte +) + +var ( + bytesUnitMap = MakeUnitMap("iB", "B", 1024) + oldBytesUnitMap = MakeUnitMap("B", "B", 1024) +) + +// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB +// and KiB are both 1024. +func ParseBase2Bytes(s string) (Base2Bytes, error) { + n, err := ParseUnit(s, bytesUnitMap) + if err != nil { + n, err = ParseUnit(s, oldBytesUnitMap) + } + return Base2Bytes(n), err +} + +func (b Base2Bytes) String() string { + return ToString(int64(b), 1024, "iB", "B") +} + +var ( + metricBytesUnitMap = MakeUnitMap("B", "B", 1000) +) + +// MetricBytes are SI byte units (1000 bytes in a kilobyte). +type MetricBytes SI + +// SI base-10 byte units. +const ( + Kilobyte MetricBytes = 1000 + KB = Kilobyte + Megabyte = Kilobyte * 1000 + MB = Megabyte + Gigabyte = Megabyte * 1000 + GB = Gigabyte + Terabyte = Gigabyte * 1000 + TB = Terabyte + Petabyte = Terabyte * 1000 + PB = Petabyte + Exabyte = Petabyte * 1000 + EB = Exabyte +) + +// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes. +func ParseMetricBytes(s string) (MetricBytes, error) { + n, err := ParseUnit(s, metricBytesUnitMap) + return MetricBytes(n), err +} + +func (m MetricBytes) String() string { + return ToString(int64(m), 1000, "B", "B") +} + +// ParseStrictBytes supports both iB and B suffixes for base 2 and metric, +// respectively. That is, KiB represents 1024 and KB represents 1000. +func ParseStrictBytes(s string) (int64, error) { + n, err := ParseUnit(s, bytesUnitMap) + if err != nil { + n, err = ParseUnit(s, metricBytesUnitMap) + } + return int64(n), err +} diff --git a/vendor/github.com/alecthomas/units/doc.go b/vendor/github.com/alecthomas/units/doc.go new file mode 100644 index 00000000..156ae386 --- /dev/null +++ b/vendor/github.com/alecthomas/units/doc.go @@ -0,0 +1,13 @@ +// Package units provides helpful unit multipliers and functions for Go. +// +// The goal of this package is to have functionality similar to the time [1] package. +// +// +// [1] http://golang.org/pkg/time/ +// +// It allows for code like this: +// +// n, err := ParseBase2Bytes("1KB") +// // n == 1024 +// n = units.Mebibyte * 512 +package units diff --git a/vendor/github.com/alecthomas/units/si.go b/vendor/github.com/alecthomas/units/si.go new file mode 100644 index 00000000..8234a9d5 --- /dev/null +++ b/vendor/github.com/alecthomas/units/si.go @@ -0,0 +1,26 @@ +package units + +// SI units. +type SI int64 + +// SI unit multiples. +const ( + Kilo SI = 1000 + Mega = Kilo * 1000 + Giga = Mega * 1000 + Tera = Giga * 1000 + Peta = Tera * 1000 + Exa = Peta * 1000 +) + +func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 { + return map[string]float64{ + shortSuffix: 1, + "K" + suffix: float64(scale), + "M" + suffix: float64(scale * scale), + "G" + suffix: float64(scale * scale * scale), + "T" + suffix: float64(scale * scale * scale * scale), + "P" + suffix: float64(scale * scale * scale * scale * scale), + "E" + suffix: float64(scale * scale * scale * scale * scale * scale), + } +} diff --git a/vendor/github.com/alecthomas/units/util.go b/vendor/github.com/alecthomas/units/util.go new file mode 100644 index 00000000..6527e92d --- /dev/null +++ b/vendor/github.com/alecthomas/units/util.go @@ -0,0 +1,138 @@ +package units + +import ( + "errors" + "fmt" + "strings" +) + +var ( + siUnits = []string{"", "K", "M", "G", "T", "P", "E"} +) + +func ToString(n int64, scale int64, suffix, baseSuffix string) string { + mn := len(siUnits) + out := make([]string, mn) + for i, m := range siUnits { + if n%scale != 0 || i == 0 && n == 0 { + s := suffix + if i == 0 { + s = baseSuffix + } + out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s) + } + n /= scale + if n == 0 { + break + } + } + return strings.Join(out, "") +} + +// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123 +var errLeadingInt = errors.New("units: bad [0-9]*") // never printed + +// leadingInt consumes the leading [0-9]* from s. +func leadingInt(s string) (x int64, rem string, err error) { + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + break + } + if x >= (1<<63-10)/10 { + // overflow + return 0, "", errLeadingInt + } + x = x*10 + int64(c) - '0' + } + return x, s[i:], nil +} + +func ParseUnit(s string, unitMap map[string]float64) (int64, error) { + // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+ + orig := s + f := float64(0) + neg := false + + // Consume [-+]? + if s != "" { + c := s[0] + if c == '-' || c == '+' { + neg = c == '-' + s = s[1:] + } + } + // Special case: if all that is left is "0", this is zero. + if s == "0" { + return 0, nil + } + if s == "" { + return 0, errors.New("units: invalid " + orig) + } + for s != "" { + g := float64(0) // this element of the sequence + + var x int64 + var err error + + // The next character must be [0-9.] + if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) { + return 0, errors.New("units: invalid " + orig) + } + // Consume [0-9]* + pl := len(s) + x, s, err = leadingInt(s) + if err != nil { + return 0, errors.New("units: invalid " + orig) + } + g = float64(x) + pre := pl != len(s) // whether we consumed anything before a period + + // Consume (\.[0-9]*)? + post := false + if s != "" && s[0] == '.' { + s = s[1:] + pl := len(s) + x, s, err = leadingInt(s) + if err != nil { + return 0, errors.New("units: invalid " + orig) + } + scale := 1.0 + for n := pl - len(s); n > 0; n-- { + scale *= 10 + } + g += float64(x) / scale + post = pl != len(s) + } + if !pre && !post { + // no digits (e.g. ".s" or "-.s") + return 0, errors.New("units: invalid " + orig) + } + + // Consume unit. + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c == '.' || ('0' <= c && c <= '9') { + break + } + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, errors.New("units: unknown unit " + u + " in " + orig) + } + + f += g * unit + } + + if neg { + f = -f + } + if f < float64(-1<<63) || f > float64(1<<63-1) { + return 0, errors.New("units: overflow parsing unit") + } + return int64(f), nil +} diff --git a/vendor/github.com/beevik/ntp/.travis.yml b/vendor/github.com/beevik/ntp/.travis.yml new file mode 100644 index 00000000..ad71df28 --- /dev/null +++ b/vendor/github.com/beevik/ntp/.travis.yml @@ -0,0 +1,15 @@ +language: go +sudo: false + +go: + - 1.7.x + - 1.8.x + - 1.9.x + - tip + +matrix: + allow_failures: + - go: tip + +script: + - go test -v ./... diff --git a/vendor/github.com/beevik/ntp/CONTRIBUTORS b/vendor/github.com/beevik/ntp/CONTRIBUTORS new file mode 100644 index 00000000..626c12eb --- /dev/null +++ b/vendor/github.com/beevik/ntp/CONTRIBUTORS @@ -0,0 +1,7 @@ +Brett Vickers (beevik) +Mikhail Salosin (AlphaB) +Anton Tolchanov (knyar) +Christopher Batey (chbatey) +Meng Zhuo (mengzhuo) +Leonid Evdokimov (darkk) +Ask Bjørn Hansen (abh) \ No newline at end of file diff --git a/vendor/github.com/beevik/ntp/LICENSE b/vendor/github.com/beevik/ntp/LICENSE new file mode 100644 index 00000000..45d3d495 --- /dev/null +++ b/vendor/github.com/beevik/ntp/LICENSE @@ -0,0 +1,24 @@ +Copyright 2015-2017 Brett Vickers. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/beevik/ntp/README.md b/vendor/github.com/beevik/ntp/README.md new file mode 100644 index 00000000..9719733b --- /dev/null +++ b/vendor/github.com/beevik/ntp/README.md @@ -0,0 +1,71 @@ +[![Build Status](https://travis-ci.org/beevik/ntp.svg?branch=master)](https://travis-ci.org/beevik/ntp) +[![GoDoc](https://godoc.org/github.com/beevik/ntp?status.svg)](https://godoc.org/github.com/beevik/ntp) + +ntp +=== + +The ntp package is an implementation of a Simple NTP (SNTP) client based on +[RFC5905](https://tools.ietf.org/html/rfc5905). It allows you to connect to +a remote NTP server and request information about the current time. + + +## Querying the current time + +If all you care about is the current time according to a remote NTP server, +simply use the `Time` function: +```go +time, err := ntp.Time("0.beevik-ntp.pool.ntp.org") +``` + + +## Querying time metadata + +To obtain the current time as well as some additional metadata about the time, +use the `Query` function: +```go +response, err := ntp.Query("0.beevik-ntp.pool.ntp.org") +time := time.Now().Add(response.ClockOffset) +``` + +Alternatively, use the `QueryWithOptions` function if you want to change the +default behavior used by the `Query` function: +```go +options := ntp.QueryOptions{ Timeout: 30*time.Second, TTL: 5 } +response, err := ntp.QueryWithOptions("0.beevik-ntp.pool.ntp.org", options) +time := time.Now().Add(response.ClockOffset) +``` + +The `Response` structure returned by `Query` includes the following +information: +* `Time`: The time the server transmitted its response, according to its own clock. +* `ClockOffset`: The estimated offset of the local system clock relative to the server's clock. For a more accurate time reading, you may add this offset to any subsequent system clock reading. +* `RTT`: An estimate of the round-trip-time delay between the client and the server. +* `Precision`: The precision of the server's clock reading. +* `Stratum`: The server's stratum, which indicates the number of hops from the server to the reference clock. A stratum 1 server is directly attached to the reference clock. If the stratum is zero, the server has responded with the "kiss of death". +* `ReferenceID`: A unique identifier for the consulted reference clock. +* `ReferenceTime`: The time at which the server last updated its local clock setting. +* `RootDelay`: The server's aggregate round-trip-time delay to the stratum 1 server. +* `RootDispersion`: The server's estimated maximum measurement error relative to the reference clock. +* `RootDistance`: An estimate of the root synchronization distance between the client and the stratum 1 server. +* `Leap`: The leap second indicator, indicating whether a second should be added to or removed from the current month's last minute. +* `MinError`: A lower bound on the clock error between the client and the server. +* `KissCode`: A 4-character string describing the reason for a "kiss of death" response (stratum=0). +* `Poll`: The maximum polling interval between successive messages to the server. + +The `Response` structure's `Validate` method performs additional sanity checks +to determine whether the response is suitable for time synchronization +purposes. +```go +err := response.Validate() +if err == nil { + // response data is suitable for synchronization purposes +} +``` + +## Using the NTP pool + +The NTP pool is a shared resource used by people all over the world. +To prevent it from becoming overloaded, please avoid querying the standard +`pool.ntp.org` zone names in your applications. Instead, consider requesting +your own [vendor zone](http://www.pool.ntp.org/en/vendors.html) or [joining +the pool](http://www.pool.ntp.org/join.html). diff --git a/vendor/github.com/beevik/ntp/RELEASE_NOTES.md b/vendor/github.com/beevik/ntp/RELEASE_NOTES.md new file mode 100644 index 00000000..603765e9 --- /dev/null +++ b/vendor/github.com/beevik/ntp/RELEASE_NOTES.md @@ -0,0 +1,54 @@ +Release v0.2.0 +============== + +There are no breaking changes or further deprecations in this release. + +**Changes** + +* Added `KissCode` to the `Response` structure. + + +Release v0.1.1 +============== + +**Breaking changes** + +* Removed the `MaxStratum` constant. + +**Deprecations** + +* Officially deprecated the `TimeV` function. + +**Internal changes** + +* Removed `minDispersion` from the `RootDistance` calculation, since the value + was arbitrary. +* Moved some validation into main code path so that invalid `TransmitTime` and + `mode` responses trigger an error even when `Response.Validate` is not + called. + + +Release v0.1.0 +============== + +This is the initial release of the `ntp` package. Currently it supports the following features: +* `Time()` to query the current time according to a remote NTP server. +* `Query()` to query multiple pieces of time-related information from a remote NTP server. +* `QueryWithOptions()`, which is like `Query()` but with the ability to override default query options. + +Time-related information returned by the `Query` functions includes: +* `Time`: the time the server transmitted its response, according to the server's clock. +* `ClockOffset`: the estimated offset of the client's clock relative to the server's clock. You may apply this offset to any local system clock reading once the query is complete. +* `RTT`: an estimate of the round-trip-time delay between the client and the server. +* `Precision`: the precision of the server's clock reading. +* `Stratum`: the "stratum" level of the server, where 1 indicates a server directly connected to a reference clock, and values greater than 1 indicating the number of hops from the reference clock. +* `ReferenceID`: A unique identifier for the NTP server that was contacted. +* `ReferenceTime`: The time at which the server last updated its local clock setting. +* `RootDelay`: The server's round-trip delay to the reference clock. +* `RootDispersion`: The server's total dispersion to the referenced clock. +* `RootDistance`: An estimate of the root synchronization distance. +* `Leap`: The leap second indicator. +* `MinError`: A lower bound on the clock error between the client and the server. +* `Poll`: the maximum polling interval between successive messages on the server. + +The `Response` structure returned by the `Query` functions also contains a `Response.Validate()` function that returns an error if any of the fields returned by the server are invalid. diff --git a/vendor/github.com/beevik/ntp/ntp.go b/vendor/github.com/beevik/ntp/ntp.go new file mode 100644 index 00000000..ba47e436 --- /dev/null +++ b/vendor/github.com/beevik/ntp/ntp.go @@ -0,0 +1,565 @@ +// Copyright 2015-2017 Brett Vickers. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ntp provides an implementation of a Simple NTP (SNTP) client +// capable of querying the current time from a remote NTP server. See +// RFC5905 (https://tools.ietf.org/html/rfc5905) for more details. +// +// This approach grew out of a go-nuts post by Michael Hofmann: +// https://groups.google.com/forum/?fromgroups#!topic/golang-nuts/FlcdMU5fkLQ +package ntp + +import ( + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "net" + "time" + + "golang.org/x/net/ipv4" +) + +// The LeapIndicator is used to warn if a leap second should be inserted +// or deleted in the last minute of the current month. +type LeapIndicator uint8 + +const ( + // LeapNoWarning indicates no impending leap second. + LeapNoWarning LeapIndicator = 0 + + // LeapAddSecond indicates the last minute of the day has 61 seconds. + LeapAddSecond = 1 + + // LeapDelSecond indicates the last minute of the day has 59 seconds. + LeapDelSecond = 2 + + // LeapNotInSync indicates an unsynchronized leap second. + LeapNotInSync = 3 +) + +// Internal constants +const ( + defaultNtpVersion = 4 + nanoPerSec = 1000000000 + maxStratum = 16 + defaultTimeout = 5 * time.Second + maxPollInterval = (1 << 17) * time.Second + maxDispersion = 16 * time.Second +) + +// Internal variables +var ( + ntpEpoch = time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC) +) + +type mode uint8 + +// NTP modes. This package uses only client mode. +const ( + reserved mode = 0 + iota + symmetricActive + symmetricPassive + client + server + broadcast + controlMessage + reservedPrivate +) + +// An ntpTime is a 64-bit fixed-point (Q32.32) representation of the number of +// seconds elapsed. +type ntpTime uint64 + +// Duration interprets the fixed-point ntpTime as a number of elapsed seconds +// and returns the corresponding time.Duration value. +func (t ntpTime) Duration() time.Duration { + sec := (t >> 32) * nanoPerSec + frac := (t & 0xffffffff) * nanoPerSec >> 32 + return time.Duration(sec + frac) +} + +// Time interprets the fixed-point ntpTime as an absolute time and returns +// the corresponding time.Time value. +func (t ntpTime) Time() time.Time { + return ntpEpoch.Add(t.Duration()) +} + +// toNtpTime converts the time.Time value t into its 64-bit fixed-point +// ntpTime representation. +func toNtpTime(t time.Time) ntpTime { + nsec := uint64(t.Sub(ntpEpoch)) + sec := nsec / nanoPerSec + // Round up the fractional component so that repeated conversions + // between time.Time and ntpTime do not yield continually decreasing + // results. + frac := (((nsec - sec*nanoPerSec) << 32) + nanoPerSec - 1) / nanoPerSec + return ntpTime(sec<<32 | frac) +} + +// An ntpTimeShort is a 32-bit fixed-point (Q16.16) representation of the +// number of seconds elapsed. +type ntpTimeShort uint32 + +// Duration interprets the fixed-point ntpTimeShort as a number of elapsed +// seconds and returns the corresponding time.Duration value. +func (t ntpTimeShort) Duration() time.Duration { + t64 := uint64(t) + sec := (t64 >> 16) * nanoPerSec + frac := (t64 & 0xffff) * nanoPerSec >> 16 + return time.Duration(sec + frac) +} + +// msg is an internal representation of an NTP packet. +type msg struct { + LiVnMode uint8 // Leap Indicator (2) + Version (3) + Mode (3) + Stratum uint8 + Poll int8 + Precision int8 + RootDelay ntpTimeShort + RootDispersion ntpTimeShort + ReferenceID uint32 + ReferenceTime ntpTime + OriginTime ntpTime + ReceiveTime ntpTime + TransmitTime ntpTime +} + +// setVersion sets the NTP protocol version on the message. +func (m *msg) setVersion(v int) { + m.LiVnMode = (m.LiVnMode & 0xc7) | uint8(v)<<3 +} + +// setMode sets the NTP protocol mode on the message. +func (m *msg) setMode(md mode) { + m.LiVnMode = (m.LiVnMode & 0xf8) | uint8(md) +} + +// setLeap modifies the leap indicator on the message. +func (m *msg) setLeap(li LeapIndicator) { + m.LiVnMode = (m.LiVnMode & 0x3f) | uint8(li)<<6 +} + +// getVersion returns the version value in the message. +func (m *msg) getVersion() int { + return int((m.LiVnMode >> 3) & 0x07) +} + +// getMode returns the mode value in the message. +func (m *msg) getMode() mode { + return mode(m.LiVnMode & 0x07) +} + +// getLeap returns the leap indicator on the message. +func (m *msg) getLeap() LeapIndicator { + return LeapIndicator((m.LiVnMode >> 6) & 0x03) +} + +// QueryOptions contains the list of configurable options that may be used +// with the QueryWithOptions function. +type QueryOptions struct { + Timeout time.Duration // defaults to 5 seconds + Version int // NTP protocol version, defaults to 4 + LocalAddress string // IP address to use for the client address + Port int // Server port, defaults to 123 + TTL int // IP TTL to use, defaults to system default +} + +// A Response contains time data, some of which is returned by the NTP server +// and some of which is calculated by the client. +type Response struct { + // Time is the transmit time reported by the server just before it + // responded to the client's NTP query. + Time time.Time + + // ClockOffset is the estimated offset of the client clock relative to + // the server. Add this to the client's system clock time to obtain a + // more accurate time. + ClockOffset time.Duration + + // RTT is the measured round-trip-time delay estimate between the client + // and the server. + RTT time.Duration + + // Precision is the reported precision of the server's clock. + Precision time.Duration + + // Stratum is the "stratum level" of the server. The smaller the number, + // the closer the server is to the reference clock. Stratum 1 servers are + // attached directly to the reference clock. A stratum value of 0 + // indicates the "kiss of death," which typically occurs when the client + // issues too many requests to the server in a short period of time. + Stratum uint8 + + // ReferenceID is a 32-bit identifier identifying the server or + // reference clock. + ReferenceID uint32 + + // ReferenceTime is the time when the server's system clock was last + // set or corrected. + ReferenceTime time.Time + + // RootDelay is the server's estimated aggregate round-trip-time delay to + // the stratum 1 server. + RootDelay time.Duration + + // RootDispersion is the server's estimated maximum measurement error + // relative to the stratum 1 server. + RootDispersion time.Duration + + // RootDistance is an estimate of the total synchronization distance + // between the client and the stratum 1 server. + RootDistance time.Duration + + // Leap indicates whether a leap second should be added or removed from + // the current month's last minute. + Leap LeapIndicator + + // MinError is a lower bound on the error between the client and server + // clocks. When the client and server are not synchronized to the same + // clock, the reported timestamps may appear to violate the principle of + // causality. In other words, the NTP server's response may indicate + // that a message was received before it was sent. In such cases, the + // minimum error may be useful. + MinError time.Duration + + // KissCode is a 4-character string describing the reason for a + // "kiss of death" response (stratum = 0). For a list of standard kiss + // codes, see https://tools.ietf.org/html/rfc5905#section-7.4. + KissCode string + + // Poll is the maximum interval between successive NTP polling messages. + // It is not relevant for simple NTP clients like this one. + Poll time.Duration +} + +// Validate checks if the response is valid for the purposes of time +// synchronization. +func (r *Response) Validate() error { + // Handle invalid stratum values. + if r.Stratum == 0 { + return fmt.Errorf("kiss of death received: %s", r.KissCode) + } + if r.Stratum >= maxStratum { + return errors.New("invalid stratum in response") + } + + // Handle invalid leap second indicator. + if r.Leap == LeapNotInSync { + return errors.New("invalid leap second") + } + + // Estimate the "freshness" of the time. If it exceeds the maximum + // polling interval (~36 hours), then it cannot be considered "fresh". + freshness := r.Time.Sub(r.ReferenceTime) + if freshness > maxPollInterval { + return errors.New("server clock not fresh") + } + + // Calculate the peer synchronization distance, lambda: + // lambda := RootDelay/2 + RootDispersion + // If this value exceeds MAXDISP (16s), then the time is not suitable + // for synchronization purposes. + // https://tools.ietf.org/html/rfc5905#appendix-A.5.1.1. + lambda := r.RootDelay/2 + r.RootDispersion + if lambda > maxDispersion { + return errors.New("invalid dispersion") + } + + // If the server's transmit time is before its reference time, the + // response is invalid. + if r.Time.Before(r.ReferenceTime) { + return errors.New("invalid time reported") + } + + // nil means the response is valid. + return nil +} + +// Query returns a response from the remote NTP server host. It contains +// the time at which the server transmitted the response as well as other +// useful information about the time and the remote server. +func Query(host string) (*Response, error) { + return QueryWithOptions(host, QueryOptions{}) +} + +// QueryWithOptions performs the same function as Query but allows for the +// customization of several query options. +func QueryWithOptions(host string, opt QueryOptions) (*Response, error) { + m, now, err := getTime(host, opt) + if err != nil { + return nil, err + } + return parseTime(m, now), nil +} + +// TimeV returns the current time using information from a remote NTP server. +// On error, it returns the local system time. The version may be 2, 3, or 4. +// +// Deprecated: TimeV is deprecated. Use QueryWithOptions instead. +func TimeV(host string, version int) (time.Time, error) { + m, recvTime, err := getTime(host, QueryOptions{Version: version}) + if err != nil { + return time.Now(), err + } + + r := parseTime(m, recvTime) + err = r.Validate() + if err != nil { + return time.Now(), err + } + + // Use the clock offset to calculate the time. + return time.Now().Add(r.ClockOffset), nil +} + +// Time returns the current time using information from a remote NTP server. +// It uses version 4 of the NTP protocol. On error, it returns the local +// system time. +func Time(host string) (time.Time, error) { + return TimeV(host, defaultNtpVersion) +} + +// getTime performs the NTP server query and returns the response message +// along with the local system time it was received. +func getTime(host string, opt QueryOptions) (*msg, ntpTime, error) { + if opt.Version == 0 { + opt.Version = defaultNtpVersion + } + if opt.Version < 2 || opt.Version > 4 { + return nil, 0, errors.New("invalid protocol version requested") + } + + // Resolve the remote NTP server address. + raddr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, "123")) + if err != nil { + return nil, 0, err + } + + // Resolve the local address if specified as an option. + var laddr *net.UDPAddr + if opt.LocalAddress != "" { + laddr, err = net.ResolveUDPAddr("udp", net.JoinHostPort(opt.LocalAddress, "0")) + if err != nil { + return nil, 0, err + } + } + + // Override the port if requested. + if opt.Port != 0 { + raddr.Port = opt.Port + } + + // Prepare a "connection" to the remote server. + con, err := net.DialUDP("udp", laddr, raddr) + if err != nil { + return nil, 0, err + } + defer con.Close() + + // Set a TTL for the packet if requested. + if opt.TTL != 0 { + ipcon := ipv4.NewConn(con) + err = ipcon.SetTTL(opt.TTL) + if err != nil { + return nil, 0, err + } + } + + // Set a timeout on the connection. + if opt.Timeout == 0 { + opt.Timeout = defaultTimeout + } + con.SetDeadline(time.Now().Add(opt.Timeout)) + + // Allocate a message to hold the response. + recvMsg := new(msg) + + // Allocate a message to hold the query. + xmitMsg := new(msg) + xmitMsg.setMode(client) + xmitMsg.setVersion(opt.Version) + xmitMsg.setLeap(LeapNotInSync) + + // To ensure privacy and prevent spoofing, try to use a random 64-bit + // value for the TransmitTime. If crypto/rand couldn't generate a + // random value, fall back to using the system clock. Keep track of + // when the messsage was actually transmitted. + bits := make([]byte, 8) + _, err = rand.Read(bits) + var xmitTime time.Time + if err == nil { + xmitMsg.TransmitTime = ntpTime(binary.BigEndian.Uint64(bits)) + xmitTime = time.Now() + } else { + xmitTime = time.Now() + xmitMsg.TransmitTime = toNtpTime(xmitTime) + } + + // Transmit the query. + err = binary.Write(con, binary.BigEndian, xmitMsg) + if err != nil { + return nil, 0, err + } + + // Receive the response. + err = binary.Read(con, binary.BigEndian, recvMsg) + if err != nil { + return nil, 0, err + } + + // Keep track of the time the response was received. + delta := time.Since(xmitTime) + if delta < 0 { + // The local system may have had its clock adjusted since it + // sent the query. In go 1.9 and later, time.Since ensures + // that a monotonic clock is used, so delta can never be less + // than zero. In versions before 1.9, a monotonic clock is + // not used, so we have to check. + return nil, 0, errors.New("client clock ticked backwards") + } + recvTime := toNtpTime(xmitTime.Add(delta)) + + // Check for invalid fields. + if recvMsg.getMode() != server { + return nil, 0, errors.New("invalid mode in response") + } + if recvMsg.TransmitTime == ntpTime(0) { + return nil, 0, errors.New("invalid transmit time in response") + } + if recvMsg.OriginTime != xmitMsg.TransmitTime { + return nil, 0, errors.New("server response mismatch") + } + if recvMsg.ReceiveTime > recvMsg.TransmitTime { + return nil, 0, errors.New("server clock ticked backwards") + } + + // Correct the received message's origin time using the actual + // transmit time. + recvMsg.OriginTime = toNtpTime(xmitTime) + + return recvMsg, recvTime, nil +} + +// parseTime parses the NTP packet along with the packet receive time to +// generate a Response record. +func parseTime(m *msg, recvTime ntpTime) *Response { + r := &Response{ + Time: m.TransmitTime.Time(), + ClockOffset: offset(m.OriginTime, m.ReceiveTime, m.TransmitTime, recvTime), + RTT: rtt(m.OriginTime, m.ReceiveTime, m.TransmitTime, recvTime), + Precision: toInterval(m.Precision), + Stratum: m.Stratum, + ReferenceID: m.ReferenceID, + ReferenceTime: m.ReferenceTime.Time(), + RootDelay: m.RootDelay.Duration(), + RootDispersion: m.RootDispersion.Duration(), + Leap: m.getLeap(), + MinError: minError(m.OriginTime, m.ReceiveTime, m.TransmitTime, recvTime), + Poll: toInterval(m.Poll), + } + + // Calculate values depending on other calculated values + r.RootDistance = rootDistance(r.RTT, r.RootDelay, r.RootDispersion) + + // If a kiss of death was received, interpret the reference ID as + // a kiss code. + if r.Stratum == 0 { + r.KissCode = kissCode(r.ReferenceID) + } + + return r +} + +// The following helper functions calculate additional metadata about the +// timestamps received from an NTP server. The timestamps returned by +// the server are given the following variable names: +// +// org = Origin Timestamp (client send time) +// rec = Receive Timestamp (server receive time) +// xmt = Transmit Timestamp (server reply time) +// dst = Destination Timestamp (client receive time) + +func rtt(org, rec, xmt, dst ntpTime) time.Duration { + // round trip delay time + // rtt = (dst-org) - (xmt-rec) + a := dst.Time().Sub(org.Time()) + b := xmt.Time().Sub(rec.Time()) + rtt := a - b + if rtt < 0 { + rtt = 0 + } + return rtt +} + +func offset(org, rec, xmt, dst ntpTime) time.Duration { + // local clock offset + // offset = ((rec-org) + (xmt-dst)) / 2 + a := rec.Time().Sub(org.Time()) + b := xmt.Time().Sub(dst.Time()) + return (a + b) / time.Duration(2) +} + +func minError(org, rec, xmt, dst ntpTime) time.Duration { + // Each NTP response contains two pairs of send/receive timestamps. + // When either pair indicates a "causality violation", we calculate the + // error as the difference in time between them. The minimum error is + // the greater of the two causality violations. + var error0, error1 ntpTime + if org >= rec { + error0 = org - rec + } + if xmt >= dst { + error1 = xmt - dst + } + if error0 > error1 { + return error0.Duration() + } + return error1.Duration() +} + +func rootDistance(rtt, rootDelay, rootDisp time.Duration) time.Duration { + // The root distance is: + // the maximum error due to all causes of the local clock + // relative to the primary server. It is defined as half the + // total delay plus total dispersion plus peer jitter. + // (https://tools.ietf.org/html/rfc5905#appendix-A.5.5.2) + // + // In the reference implementation, it is calculated as follows: + // rootDist = max(MINDISP, rootDelay + rtt)/2 + rootDisp + // + peerDisp + PHI * (uptime - peerUptime) + // + peerJitter + // For an SNTP client which sends only a single packet, most of these + // terms are irrelevant and become 0. + totalDelay := rtt + rootDelay + return totalDelay/2 + rootDisp +} + +func toInterval(t int8) time.Duration { + switch { + case t > 0: + return time.Duration(uint64(time.Second) << uint(t)) + case t < 0: + return time.Duration(uint64(time.Second) >> uint(-t)) + default: + return time.Second + } +} + +func kissCode(id uint32) string { + isPrintable := func(ch byte) bool { return ch >= 32 && ch <= 126 } + + b := []byte{ + byte(id >> 24), + byte(id >> 16), + byte(id >> 8), + byte(id), + } + for _, ch := range b { + if !isPrintable(ch) { + return "" + } + } + return string(b) +} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 00000000..339177be --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt new file mode 100644 index 00000000..1602287d --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 00000000..d7d14f8e --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/NOTICE b/vendor/github.com/coreos/go-systemd/NOTICE new file mode 100644 index 00000000..23a0ada2 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/dbus/dbus.go new file mode 100644 index 00000000..1d54810a --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/dbus.go @@ -0,0 +1,240 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ +package dbus + +import ( + "encoding/hex" + "fmt" + "os" + "strconv" + "strings" + "sync" + + "github.com/godbus/dbus" +) + +const ( + alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` + num = `0123456789` + alphanum = alpha + num + signalBuffer = 100 +) + +// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped +func needsEscape(i int, b byte) bool { + // Escape everything that is not a-z-A-Z-0-9 + // Also escape 0-9 if it's the first character + return strings.IndexByte(alphanum, b) == -1 || + (i == 0 && strings.IndexByte(num, b) != -1) +} + +// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the +// rules that systemd uses for serializing special characters. +func PathBusEscape(path string) string { + // Special case the empty string + if len(path) == 0 { + return "_" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if needsEscape(i, c) { + e := fmt.Sprintf("_%x", c) + n = append(n, []byte(e)...) + } else { + n = append(n, c) + } + } + return string(n) +} + +// pathBusUnescape is the inverse of PathBusEscape. +func pathBusUnescape(path string) string { + if path == "_" { + return "" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if c == '_' && i+2 < len(path) { + res, err := hex.DecodeString(path[i+1 : i+3]) + if err == nil { + n = append(n, res...) + } + i += 2 + } else { + n = append(n, c) + } + } + return string(n) +} + +// Conn is a connection to systemd's dbus endpoint. +type Conn struct { + // sysconn/sysobj are only used to call dbus methods + sysconn *dbus.Conn + sysobj dbus.BusObject + + // sigconn/sigobj are only used to receive dbus signals + sigconn *dbus.Conn + sigobj dbus.BusObject + + jobListener struct { + jobs map[dbus.ObjectPath]chan<- string + sync.Mutex + } + subStateSubscriber struct { + updateCh chan<- *SubStateUpdate + errCh chan<- error + sync.Mutex + ignore map[dbus.ObjectPath]int64 + cleanIgnore int64 + } + propertiesSubscriber struct { + updateCh chan<- *PropertiesUpdate + errCh chan<- error + sync.Mutex + } +} + +// New establishes a connection to any available bus and authenticates. +// Callers should call Close() when done with the connection. +func New() (*Conn, error) { + conn, err := NewSystemConnection() + if err != nil && os.Geteuid() == 0 { + return NewSystemdConnection() + } + return conn, err +} + +// NewSystemConnection establishes a connection to the system bus and authenticates. +// Callers should call Close() when done with the connection +func NewSystemConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SystemBusPrivate) + }) +} + +// NewUserConnection establishes a connection to the session bus and +// authenticates. This can be used to connect to systemd user instances. +// Callers should call Close() when done with the connection. +func NewUserConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SessionBusPrivate) + }) +} + +// NewSystemdConnection establishes a private, direct connection to systemd. +// This can be used for communicating with systemd without a dbus daemon. +// Callers should call Close() when done with the connection. +func NewSystemdConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + // We skip Hello when talking directly to systemd. + return dbusAuthConnection(func() (*dbus.Conn, error) { + return dbus.Dial("unix:path=/run/systemd/private") + }) + }) +} + +// Close closes an established connection +func (c *Conn) Close() { + c.sysconn.Close() + c.sigconn.Close() +} + +// NewConnection establishes a connection to a bus using a caller-supplied function. +// This allows connecting to remote buses through a user-supplied mechanism. +// The supplied function may be called multiple times, and should return independent connections. +// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded, +// and any authentication should be handled by the function. +func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { + sysconn, err := dialBus() + if err != nil { + return nil, err + } + + sigconn, err := dialBus() + if err != nil { + sysconn.Close() + return nil, err + } + + c := &Conn{ + sysconn: sysconn, + sysobj: systemdObject(sysconn), + sigconn: sigconn, + sigobj: systemdObject(sigconn), + } + + c.subStateSubscriber.ignore = make(map[dbus.ObjectPath]int64) + c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) + + // Setup the listeners on jobs so that we can get completions + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") + + c.dispatch() + return c, nil +} + +// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager +// interface. The value is returned in its string representation, as defined at +// https://developer.gnome.org/glib/unstable/gvariant-text.html +func (c *Conn) GetManagerProperty(prop string) (string, error) { + variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) + if err != nil { + return "", err + } + return variant.String(), nil +} + +func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus() + if err != nil { + return nil, err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = conn.Auth(methods) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := dbusAuthConnection(createBus) + if err != nil { + return nil, err + } + + if err = conn.Hello(); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func systemdObject(conn *dbus.Conn) dbus.BusObject { + return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods.go b/vendor/github.com/coreos/go-systemd/dbus/methods.go new file mode 100644 index 00000000..4511d0a9 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/methods.go @@ -0,0 +1,591 @@ +// Copyright 2015, 2018 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "fmt" + "path" + "strconv" + + "github.com/godbus/dbus" +) + +func (c *Conn) jobComplete(signal *dbus.Signal) { + var id uint32 + var job dbus.ObjectPath + var unit string + var result string + dbus.Store(signal.Body, &id, &job, &unit, &result) + c.jobListener.Lock() + out, ok := c.jobListener.jobs[job] + if ok { + out <- result + delete(c.jobListener.jobs, job) + } + c.jobListener.Unlock() +} + +func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) { + if ch != nil { + c.jobListener.Lock() + defer c.jobListener.Unlock() + } + + var p dbus.ObjectPath + err := c.sysobj.Call(job, 0, args...).Store(&p) + if err != nil { + return 0, err + } + + if ch != nil { + c.jobListener.jobs[p] = ch + } + + // ignore error since 0 is fine if conversion fails + jobID, _ := strconv.Atoi(path.Base(string(p))) + + return jobID, nil +} + +// StartUnit enqueues a start job and depending jobs, if any (unless otherwise +// specified by the mode string). +// +// Takes the unit to activate, plus a mode string. The mode needs to be one of +// replace, fail, isolate, ignore-dependencies, ignore-requirements. If +// "replace" the call will start the unit and its dependencies, possibly +// replacing already queued jobs that conflict with this. If "fail" the call +// will start the unit and its dependencies, but will fail if this would change +// an already queued job. If "isolate" the call will start the unit in question +// and terminate all units that aren't dependencies of it. If +// "ignore-dependencies" it will start a unit but ignore all its dependencies. +// If "ignore-requirements" it will start a unit but only ignore the +// requirement dependencies. It is not recommended to make use of the latter +// two options. +// +// If the provided channel is non-nil, a result string will be sent to it upon +// job completion: one of done, canceled, timeout, failed, dependency, skipped. +// done indicates successful execution of a job. canceled indicates that a job +// has been canceled before it finished execution. timeout indicates that the +// job timeout was reached. failed indicates that the job failed. dependency +// indicates that a job this job has been depending on failed and the job hence +// has been removed too. skipped indicates that a job was skipped because it +// didn't apply to the units current state. +// +// If no error occurs, the ID of the underlying systemd job will be returned. There +// does exist the possibility for no error to be returned, but for the returned job +// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint +// should not be considered authoritative. +// +// If an error does occur, it will be returned to the user alongside a job ID of 0. +func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) +} + +// StopUnit is similar to StartUnit but stops the specified unit rather +// than starting it. +func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) +} + +// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. +func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +} + +// RestartUnit restarts a service. If a service is restarted that isn't +// running it will be started. +func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +} + +// TryRestartUnit is like RestartUnit, except that a service that isn't running +// is not affected by the restart. +func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) +} + +// ReloadOrRestart attempts a reload if the unit supports it and use a restart +// otherwise. +func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +} + +// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try" +// flavored restart otherwise. +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) +} + +// StartTransientUnit() may be used to create and start a transient unit, which +// will be released as soon as it is not running or referenced anymore or the +// system is rebooted. name is the unit name including suffix, and must be +// unique. mode is the same as in StartUnit(), properties contains properties +// of the unit. +func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +} + +// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's +// processes are killed. +func (c *Conn) KillUnit(name string, signal int32) { + c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() +} + +// ResetFailedUnit resets the "failed" state of a specific unit. +func (c *Conn) ResetFailedUnit(name string) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() +} + +// SystemState returns the systemd state. Equivalent to `systemctl is-system-running`. +func (c *Conn) SystemState() (*Property, error) { + var err error + var prop dbus.Variant + + obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") + err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: "SystemState", Value: prop}, nil +} + +// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface +func (c *Conn) getProperties(path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { + var err error + var props map[string]dbus.Variant + + if !path.IsValid() { + return nil, fmt.Errorf("invalid unit name: %v", path) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) + if err != nil { + return nil, err + } + + out := make(map[string]interface{}, len(props)) + for k, v := range props { + out[k] = v.Value() + } + + return out, nil +} + +// GetUnitProperties takes the (unescaped) unit name and returns all of its dbus object properties. +func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(path, "org.freedesktop.systemd1.Unit") +} + +// GetUnitProperties takes the (escaped) unit path and returns all of its dbus object properties. +func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { + return c.getProperties(path, "org.freedesktop.systemd1.Unit") +} + +func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) { + var err error + var prop dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: propertyName, Value: prop}, nil +} + +func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName) +} + +// GetServiceProperty returns property for given service name and property name +func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { + return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName) +} + +// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope +// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit +func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(path, "org.freedesktop.systemd1."+unitType) +} + +// SetUnitProperties() may be used to modify certain unit properties at runtime. +// Not all properties may be changed at runtime, but many resource management +// settings (primarily those in systemd.cgroup(5)) may. The changes are applied +// instantly, and stored on disk for future boots, unless runtime is true, in which +// case the settings only apply until the next reboot. name is the name of the unit +// to modify. properties are the settings to set, encoded as an array of property +// name and value pairs. +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() +} + +func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName) +} + +type UnitStatus struct { + Name string // The primary unit name as string + Description string // The human readable description string + LoadState string // The load state (i.e. whether the unit file has been loaded successfully) + ActiveState string // The active state (i.e. whether the unit is currently started or not) + SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) + Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. + Path dbus.ObjectPath // The unit object path + JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise + JobType string // The job type as string + JobPath dbus.ObjectPath // The job object path +} + +type storeFunc func(retvalues ...interface{}) error + +func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]UnitStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + err = dbus.Store(resultInterface, statusInterface...) + if err != nil { + return nil, err + } + + return status, nil +} + +// ListUnits returns an array with all currently loaded units. Note that +// units may be known by multiple names at the same time, and hence there might +// be more unit names loaded than actual units behind them. +func (c *Conn) ListUnits() ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store) +} + +// ListUnitsFiltered returns an array with units filtered by state. +// It takes a list of units' statuses to filter. +func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) +} + +// ListUnitsByPatterns returns an array with units. +// It takes a list of units' statuses and names to filter. +// Note that units may be known by multiple names at the same time, +// and hence there might be more unit names loaded than actual units behind them. +func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) +} + +// ListUnitsByNames returns an array with units. It takes a list of units' +// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns +// method, this method returns statuses even for inactive or non-existing +// units. Input array should contain exact unit names, but not patterns. +func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) +} + +type UnitFile struct { + Path string + Type string +} + +func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + files := make([]UnitFile, len(result)) + fileInterface := make([]interface{}, len(files)) + for i := range files { + fileInterface[i] = &files[i] + } + + err = dbus.Store(resultInterface, fileInterface...) + if err != nil { + return nil, err + } + + return files, nil +} + +// ListUnitFiles returns an array of all available units on disk. +func (c *Conn) ListUnitFiles() ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) +} + +// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns. +func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) +} + +type LinkUnitFileChange EnableUnitFileChange + +// LinkUnitFiles() links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +// EnableUnitFiles() may be used to enable one or more units in the system (by +// creating symlinks to them in /etc or /run). +// +// It takes a list of unit files to enable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and two booleans: the first controls whether the unit shall +// be enabled for runtime only (true, /run), or persistently (false, /etc). +// The second one controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns one boolean and an array with the changes made. The +// boolean signals whether the unit files contained any enablement +// information (i.e. an [Install]) section. The changes list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + var carries_install_info bool + + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) + if err != nil { + return false, nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]EnableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return false, nil, err + } + + return carries_install_info, changes, nil +} + +type EnableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// DisableUnitFiles() may be used to disable one or more units in the system (by +// removing symlinks to them from /etc or /run). +// +// It takes a list of unit files to disable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and one boolean: whether the unit was enabled for runtime +// only (true, /run), or persistently (false, /etc). +// +// This call returns an array with the changes made. The changes list +// consists of structures with three strings: the type of the change (one of +// symlink or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]DisableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type DisableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// MaskUnitFiles masks one or more units in the system +// +// It takes three arguments: +// * list of units to mask (either just file names or full +// absolute paths if the unit files are residing outside +// the usual unit search paths) +// * runtime to specify whether the unit was enabled for runtime +// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) +// * force flag +func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]MaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type MaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// UnmaskUnitFiles unmasks one or more units in the system +// +// It takes two arguments: +// * list of unit files to mask (either just file names or full +// absolute paths if the unit files are residing outside +// the usual unit search paths) +// * runtime to specify whether the unit was enabled for runtime +// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) +func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]UnmaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type UnmaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Reload instructs systemd to scan for and reload unit files. This is +// equivalent to a 'systemctl daemon-reload'. +func (c *Conn) Reload() error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() +} + +func unitPath(name string) dbus.ObjectPath { + return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) +} + +// unitName returns the unescaped base element of the supplied escaped path +func unitName(dpath dbus.ObjectPath) string { + return pathBusUnescape(path.Base(string(dpath))) +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/properties.go b/vendor/github.com/coreos/go-systemd/dbus/properties.go new file mode 100644 index 00000000..6c818958 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/properties.go @@ -0,0 +1,237 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "github.com/godbus/dbus" +) + +// From the systemd docs: +// +// The properties array of StartTransientUnit() may take many of the settings +// that may also be configured in unit files. Not all parameters are currently +// accepted though, but we plan to cover more properties with future release. +// Currently you may set the Description, Slice and all dependency types of +// units, as well as RemainAfterExit, ExecStart for service units, +// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, +// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, +// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, +// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map +// directly to their counterparts in unit files and as normal D-Bus object +// properties. The exception here is the PIDs field of scope units which is +// used for construction of the scope only and specifies the initial PIDs to +// add to the scope object. + +type Property struct { + Name string + Value dbus.Variant +} + +type PropertyCollection struct { + Name string + Properties []Property +} + +type execStart struct { + Path string // the binary path to execute + Args []string // an array with all arguments to pass to the executed command, starting with argument 0 + UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly +} + +// PropExecStart sets the ExecStart service property. The first argument is a +// slice with the binary path to execute followed by the arguments to pass to +// the executed command. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= +func PropExecStart(command []string, uncleanIsFailure bool) Property { + execStarts := []execStart{ + execStart{ + Path: command[0], + Args: command, + UncleanIsFailure: uncleanIsFailure, + }, + } + + return Property{ + Name: "ExecStart", + Value: dbus.MakeVariant(execStarts), + } +} + +// PropRemainAfterExit sets the RemainAfterExit service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= +func PropRemainAfterExit(b bool) Property { + return Property{ + Name: "RemainAfterExit", + Value: dbus.MakeVariant(b), + } +} + +// PropType sets the Type service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type= +func PropType(t string) Property { + return Property{ + Name: "Type", + Value: dbus.MakeVariant(t), + } +} + +// PropDescription sets the Description unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= +func PropDescription(desc string) Property { + return Property{ + Name: "Description", + Value: dbus.MakeVariant(desc), + } +} + +func propDependency(name string, units []string) Property { + return Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +// PropRequires sets the Requires unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= +func PropRequires(units ...string) Property { + return propDependency("Requires", units) +} + +// PropRequiresOverridable sets the RequiresOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= +func PropRequiresOverridable(units ...string) Property { + return propDependency("RequiresOverridable", units) +} + +// PropRequisite sets the Requisite unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= +func PropRequisite(units ...string) Property { + return propDependency("Requisite", units) +} + +// PropRequisiteOverridable sets the RequisiteOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= +func PropRequisiteOverridable(units ...string) Property { + return propDependency("RequisiteOverridable", units) +} + +// PropWants sets the Wants unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= +func PropWants(units ...string) Property { + return propDependency("Wants", units) +} + +// PropBindsTo sets the BindsTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= +func PropBindsTo(units ...string) Property { + return propDependency("BindsTo", units) +} + +// PropRequiredBy sets the RequiredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= +func PropRequiredBy(units ...string) Property { + return propDependency("RequiredBy", units) +} + +// PropRequiredByOverridable sets the RequiredByOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= +func PropRequiredByOverridable(units ...string) Property { + return propDependency("RequiredByOverridable", units) +} + +// PropWantedBy sets the WantedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= +func PropWantedBy(units ...string) Property { + return propDependency("WantedBy", units) +} + +// PropBoundBy sets the BoundBy unit property. See +// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= +func PropBoundBy(units ...string) Property { + return propDependency("BoundBy", units) +} + +// PropConflicts sets the Conflicts unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= +func PropConflicts(units ...string) Property { + return propDependency("Conflicts", units) +} + +// PropConflictedBy sets the ConflictedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= +func PropConflictedBy(units ...string) Property { + return propDependency("ConflictedBy", units) +} + +// PropBefore sets the Before unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= +func PropBefore(units ...string) Property { + return propDependency("Before", units) +} + +// PropAfter sets the After unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= +func PropAfter(units ...string) Property { + return propDependency("After", units) +} + +// PropOnFailure sets the OnFailure unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= +func PropOnFailure(units ...string) Property { + return propDependency("OnFailure", units) +} + +// PropTriggers sets the Triggers unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= +func PropTriggers(units ...string) Property { + return propDependency("Triggers", units) +} + +// PropTriggeredBy sets the TriggeredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= +func PropTriggeredBy(units ...string) Property { + return propDependency("TriggeredBy", units) +} + +// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= +func PropPropagatesReloadTo(units ...string) Property { + return propDependency("PropagatesReloadTo", units) +} + +// PropRequiresMountsFor sets the RequiresMountsFor unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= +func PropRequiresMountsFor(units ...string) Property { + return propDependency("RequiresMountsFor", units) +} + +// PropSlice sets the Slice unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= +func PropSlice(slice string) Property { + return Property{ + Name: "Slice", + Value: dbus.MakeVariant(slice), + } +} + +// PropPids sets the PIDs field of scope units used in the initial construction +// of the scope only and specifies the initial PIDs to add to the scope object. +// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties +func PropPids(pids ...uint32) Property { + return Property{ + Name: "PIDs", + Value: dbus.MakeVariant(pids), + } +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/set.go b/vendor/github.com/coreos/go-systemd/dbus/set.go new file mode 100644 index 00000000..f92e6fbe --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/set.go @@ -0,0 +1,47 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +type set struct { + data map[string]bool +} + +func (s *set) Add(value string) { + s.data[value] = true +} + +func (s *set) Remove(value string) { + delete(s.data, value) +} + +func (s *set) Contains(value string) (exists bool) { + _, exists = s.data[value] + return +} + +func (s *set) Length() int { + return len(s.data) +} + +func (s *set) Values() (values []string) { + for val, _ := range s.data { + values = append(values, val) + } + return +} + +func newSet() *set { + return &set{make(map[string]bool)} +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/dbus/subscription.go new file mode 100644 index 00000000..5e8e9aed --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription.go @@ -0,0 +1,343 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "log" + "time" + + "github.com/godbus/dbus" +) + +const ( + cleanIgnoreInterval = int64(10 * time.Second) + ignoreInterval = int64(30 * time.Millisecond) +) + +// Subscribe sets up this connection to subscribe to all systemd dbus events. +// This is required before calling SubscribeUnits. When the connection closes +// systemd will automatically stop sending signals so there is no need to +// explicitly call Unsubscribe(). +func (c *Conn) Subscribe() error { + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") + + err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() + if err != nil { + return err + } + + return nil +} + +// Unsubscribe this connection from systemd dbus events. +func (c *Conn) Unsubscribe() error { + err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() + if err != nil { + return err + } + + return nil +} + +func (c *Conn) dispatch() { + ch := make(chan *dbus.Signal, signalBuffer) + + c.sigconn.Signal(ch) + + go func() { + for { + signal, ok := <-ch + if !ok { + return + } + + if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { + c.jobComplete(signal) + } + + if c.subStateSubscriber.updateCh == nil && + c.propertiesSubscriber.updateCh == nil { + continue + } + + var unitPath dbus.ObjectPath + switch signal.Name { + case "org.freedesktop.systemd1.Manager.JobRemoved": + unitName := signal.Body[2].(string) + c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) + case "org.freedesktop.systemd1.Manager.UnitNew": + unitPath = signal.Body[1].(dbus.ObjectPath) + case "org.freedesktop.DBus.Properties.PropertiesChanged": + if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { + unitPath = signal.Path + + if len(signal.Body) >= 2 { + if changed, ok := signal.Body[1].(map[string]dbus.Variant); ok { + c.sendPropertiesUpdate(unitPath, changed) + } + } + } + } + + if unitPath == dbus.ObjectPath("") { + continue + } + + c.sendSubStateUpdate(unitPath) + } + }() +} + +// Returns two unbuffered channels which will receive all changed units every +// interval. Deleted units are sent as nil. +func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { + return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) +} + +// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer +// size of the channels, the comparison function for detecting changes and a filter +// function for cutting down on the noise that your channel receives. +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { + old := make(map[string]*UnitStatus) + statusChan := make(chan map[string]*UnitStatus, buffer) + errChan := make(chan error, buffer) + + go func() { + for { + timerChan := time.After(interval) + + units, err := c.ListUnits() + if err == nil { + cur := make(map[string]*UnitStatus) + for i := range units { + if filterUnit != nil && filterUnit(units[i].Name) { + continue + } + cur[units[i].Name] = &units[i] + } + + // add all new or changed units + changed := make(map[string]*UnitStatus) + for n, u := range cur { + if oldU, ok := old[n]; !ok || isChanged(oldU, u) { + changed[n] = u + } + delete(old, n) + } + + // add all deleted units + for oldN := range old { + changed[oldN] = nil + } + + old = cur + + if len(changed) != 0 { + statusChan <- changed + } + } else { + errChan <- err + } + + <-timerChan + } + }() + + return statusChan, errChan +} + +type SubStateUpdate struct { + UnitName string + SubState string +} + +// SetSubStateSubscriber writes to updateCh when any unit's substate changes. +// Although this writes to updateCh on every state change, the reported state +// may be more recent than the change that generated it (due to an unavoidable +// race in the systemd dbus interface). That is, this method provides a good +// way to keep a current view of all units' states, but is not guaranteed to +// show every state transition they go through. Furthermore, state changes +// will only be written to the channel with non-blocking writes. If updateCh +// is full, it attempts to write an error to errCh; if errCh is full, the error +// passes silently. +func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { + if c == nil { + msg := "nil receiver" + select { + case errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } + + c.subStateSubscriber.Lock() + defer c.subStateSubscriber.Unlock() + c.subStateSubscriber.updateCh = updateCh + c.subStateSubscriber.errCh = errCh +} + +func (c *Conn) sendSubStateUpdate(unitPath dbus.ObjectPath) { + c.subStateSubscriber.Lock() + defer c.subStateSubscriber.Unlock() + + if c.subStateSubscriber.updateCh == nil { + return + } + + isIgnored := c.shouldIgnore(unitPath) + defer c.cleanIgnore() + if isIgnored { + return + } + + info, err := c.GetUnitPathProperties(unitPath) + if err != nil { + select { + case c.subStateSubscriber.errCh <- err: + default: + log.Printf("full error channel while reporting: %s\n", err) + } + return + } + defer c.updateIgnore(unitPath, info) + + name, ok := info["Id"].(string) + if !ok { + msg := "failed to cast info.Id" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", err) + } + return + } + substate, ok := info["SubState"].(string) + if !ok { + msg := "failed to cast info.SubState" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } + + update := &SubStateUpdate{name, substate} + select { + case c.subStateSubscriber.updateCh <- update: + default: + msg := "update channel is full" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } +} + +// The ignore functions work around a wart in the systemd dbus interface. +// Requesting the properties of an unloaded unit will cause systemd to send a +// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's +// properties on UnitNew (as that's the only indication of a new unit coming up +// for the first time), we would enter an infinite loop if we did not attempt +// to detect and ignore these spurious signals. The signal themselves are +// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an +// unloaded unit's signals for a short time after requesting its properties. +// This means that we will miss e.g. a transient unit being restarted +// *immediately* upon failure and also a transient unit being started +// immediately after requesting its status (with systemctl status, for example, +// because this causes a UnitNew signal to be sent which then causes us to fetch +// the properties). + +func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { + t, ok := c.subStateSubscriber.ignore[path] + return ok && t >= time.Now().UnixNano() +} + +func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { + loadState, ok := info["LoadState"].(string) + if !ok { + return + } + + // unit is unloaded - it will trigger bad systemd dbus behavior + if loadState == "not-found" { + c.subStateSubscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval + } +} + +// without this, ignore would grow unboundedly over time +func (c *Conn) cleanIgnore() { + now := time.Now().UnixNano() + if c.subStateSubscriber.cleanIgnore < now { + c.subStateSubscriber.cleanIgnore = now + cleanIgnoreInterval + + for p, t := range c.subStateSubscriber.ignore { + if t < now { + delete(c.subStateSubscriber.ignore, p) + } + } + } +} + +// PropertiesUpdate holds a map of a unit's changed properties +type PropertiesUpdate struct { + UnitName string + Changed map[string]dbus.Variant +} + +// SetPropertiesSubscriber writes to updateCh when any unit's properties +// change. Every property change reported by systemd will be sent; that is, no +// transitions will be "missed" (as they might be with SetSubStateSubscriber). +// However, state changes will only be written to the channel with non-blocking +// writes. If updateCh is full, it attempts to write an error to errCh; if +// errCh is full, the error passes silently. +func (c *Conn) SetPropertiesSubscriber(updateCh chan<- *PropertiesUpdate, errCh chan<- error) { + c.propertiesSubscriber.Lock() + defer c.propertiesSubscriber.Unlock() + c.propertiesSubscriber.updateCh = updateCh + c.propertiesSubscriber.errCh = errCh +} + +// we don't need to worry about shouldIgnore() here because +// sendPropertiesUpdate doesn't call GetProperties() +func (c *Conn) sendPropertiesUpdate(unitPath dbus.ObjectPath, changedProps map[string]dbus.Variant) { + c.propertiesSubscriber.Lock() + defer c.propertiesSubscriber.Unlock() + + if c.propertiesSubscriber.updateCh == nil { + return + } + + update := &PropertiesUpdate{unitName(unitPath), changedProps} + + select { + case c.propertiesSubscriber.updateCh <- update: + default: + msg := "update channel is full" + select { + case c.propertiesSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go new file mode 100644 index 00000000..5b408d58 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go @@ -0,0 +1,57 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "time" +) + +// SubscriptionSet returns a subscription set which is like conn.Subscribe but +// can filter to only return events for a set of units. +type SubscriptionSet struct { + *set + conn *Conn +} + +func (s *SubscriptionSet) filter(unit string) bool { + return !s.Contains(unit) +} + +// Subscribe starts listening for dbus events for all of the units in the set. +// Returns channels identical to conn.SubscribeUnits. +func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { + // TODO: Make fully evented by using systemd 209 with properties changed values + return s.conn.SubscribeUnitsCustom(time.Second, 0, + mismatchUnitStatus, + func(unit string) bool { return s.filter(unit) }, + ) +} + +// NewSubscriptionSet returns a new subscription set. +func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { + return &SubscriptionSet{newSet(), conn} +} + +// mismatchUnitStatus returns true if the provided UnitStatus objects +// are not equivalent. false is returned if the objects are equivalent. +// Only the Name, Description and state-related fields are used in +// the comparison. +func mismatchUnitStatus(u1, u2 *UnitStatus) bool { + return u1.Name != u2.Name || + u1.Description != u2.Description || + u1.LoadState != u2.LoadState || + u1.ActiveState != u2.ActiveState || + u1.SubState != u2.SubState +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 00000000..bc52e96f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 00000000..79299478 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,145 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +type flag uintptr + +var ( + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag +) + +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) + +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v + } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} + +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } + } + panic("reflect.Value read-only flag has changed semantics") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 00000000..205c28d6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe !go1.4 + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 00000000..1be8ce94 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 00000000..2e3d22f3 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 00000000..aacaac6f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 00000000..f78d89fc --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound: + d.w.Write(nilAngleBytes) + + case cycleFound: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 00000000..b04edb7d --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound: + f.fs.Write(nilAngleBytes) + + case cycleFound: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 00000000..32c0e338 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/ema/qdisc/.travis.yml b/vendor/github.com/ema/qdisc/.travis.yml new file mode 100644 index 00000000..7c9b4517 --- /dev/null +++ b/vendor/github.com/ema/qdisc/.travis.yml @@ -0,0 +1,16 @@ +language: go +go: + - 1.x +os: + - linux +sudo: required +before_install: + - go get github.com/golang/lint/golint + - go get honnef.co/go/tools/cmd/staticcheck + - go get -d ./... +script: + - go build -tags=gofuzz ./... + - go vet ./... + - staticcheck ./... + #- golint -set_exit_status ./... + - go test -v -race -tags=integration ./... diff --git a/vendor/github.com/ema/qdisc/LICENSE.md b/vendor/github.com/ema/qdisc/LICENSE.md new file mode 100644 index 00000000..0a38dae3 --- /dev/null +++ b/vendor/github.com/ema/qdisc/LICENSE.md @@ -0,0 +1,10 @@ +MIT License +=========== + +Copyright (C) 2017 Emanuele Rocca + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/ema/qdisc/Makefile b/vendor/github.com/ema/qdisc/Makefile new file mode 100644 index 00000000..b313b082 --- /dev/null +++ b/vendor/github.com/ema/qdisc/Makefile @@ -0,0 +1,11 @@ +build: + go fmt + go build + go vet + staticcheck + #golint -set_exit_status + go test -v -race -tags=integration + +cover: + go test -coverprofile=coverage.out + go tool cover -html=coverage.out diff --git a/vendor/github.com/ema/qdisc/README.md b/vendor/github.com/ema/qdisc/README.md new file mode 100644 index 00000000..9fe5dde5 --- /dev/null +++ b/vendor/github.com/ema/qdisc/README.md @@ -0,0 +1,26 @@ +qdisc [![Build Status](https://travis-ci.org/ema/qdisc.svg?branch=master)](https://travis-ci.org/ema/qdisc) +===== + +Package `qdisc` allows to get queuing discipline information via netlink, +similarly to what `tc -s qdisc show` does. + +Example usage +------------- + + package main + + import ( + "fmt" + + "github.com/ema/qdisc" + ) + + func main() { + info, err := qdisc.Get() + + if err == nil { + for _, msg := range info { + fmt.Printf("%+v\n", msg) + } + } + } diff --git a/vendor/github.com/ema/qdisc/get.go b/vendor/github.com/ema/qdisc/get.go new file mode 100644 index 00000000..b0e8430c --- /dev/null +++ b/vendor/github.com/ema/qdisc/get.go @@ -0,0 +1,293 @@ +package qdisc + +import ( + "fmt" + "math" + "net" + + "github.com/mdlayher/netlink" + "github.com/mdlayher/netlink/nlenc" +) + +const ( + TCA_UNSPEC = iota + TCA_KIND + TCA_OPTIONS + TCA_STATS + TCA_XSTATS + TCA_RATE + TCA_FCNT + TCA_STATS2 + TCA_STAB + __TCA_MAX +) + +const ( + TCA_STATS_UNSPEC = iota + TCA_STATS_BASIC + TCA_STATS_RATE_EST + TCA_STATS_QUEUE + TCA_STATS_APP + TCA_STATS_RATE_EST64 + __TCA_STATS_MAX +) + +// See struct tc_stats in /usr/include/linux/pkt_sched.h +type TC_Stats struct { + Bytes uint64 + Packets uint32 + Drops uint32 + Overlimits uint32 + Bps uint32 + Pps uint32 + Qlen uint32 + Backlog uint32 +} + +// See /usr/include/linux/gen_stats.h +type TC_Stats2 struct { + // struct gnet_stats_basic + Bytes uint64 + Packets uint32 + // struct gnet_stats_queue + Qlen uint32 + Backlog uint32 + Drops uint32 + Requeues uint32 + Overlimits uint32 +} + +// See struct tc_fq_qd_stats /usr/include/linux/pkt_sched.h +type TC_Fq_Qd_Stats struct { + GcFlows uint64 + HighprioPackets uint64 + TcpRetrans uint64 + Throttled uint64 + FlowsPlimit uint64 + PktsTooLong uint64 + AllocationErrors uint64 + TimeNextDelayedFlow int64 + Flows uint32 + InactiveFlows uint32 + ThrottledFlows uint32 + UnthrottleLatencyNs uint32 +} + +type QdiscInfo struct { + IfaceName string + Parent uint32 + Handle uint32 + Kind string + Bytes uint64 + Packets uint32 + Drops uint32 + Requeues uint32 + Overlimits uint32 + GcFlows uint64 + Throttled uint64 + FlowsPlimit uint64 +} + +func parseTCAStats(attr netlink.Attribute) TC_Stats { + var stats TC_Stats + stats.Bytes = nlenc.Uint64(attr.Data[0:8]) + stats.Packets = nlenc.Uint32(attr.Data[8:12]) + stats.Drops = nlenc.Uint32(attr.Data[12:16]) + stats.Overlimits = nlenc.Uint32(attr.Data[16:20]) + stats.Bps = nlenc.Uint32(attr.Data[20:24]) + stats.Pps = nlenc.Uint32(attr.Data[24:28]) + stats.Qlen = nlenc.Uint32(attr.Data[28:32]) + stats.Backlog = nlenc.Uint32(attr.Data[32:36]) + return stats +} + +func parseTCAStats2(attr netlink.Attribute) TC_Stats2 { + var stats TC_Stats2 + + nested, _ := netlink.UnmarshalAttributes(attr.Data) + + for _, a := range nested { + switch a.Type { + case TCA_STATS_BASIC: + stats.Bytes = nlenc.Uint64(a.Data[0:8]) + stats.Packets = nlenc.Uint32(a.Data[8:12]) + case TCA_STATS_QUEUE: + stats.Qlen = nlenc.Uint32(a.Data[0:4]) + stats.Backlog = nlenc.Uint32(a.Data[4:8]) + stats.Drops = nlenc.Uint32(a.Data[8:12]) + stats.Requeues = nlenc.Uint32(a.Data[12:16]) + stats.Overlimits = nlenc.Uint32(a.Data[16:20]) + default: + } + } + + return stats +} + +func parseTC_Fq_Qd_Stats(attr netlink.Attribute) (TC_Fq_Qd_Stats, error) { + var stats TC_Fq_Qd_Stats + + nested, err := netlink.UnmarshalAttributes(attr.Data) + if err != nil { + return stats, err + } + + pts := []*uint64{ + &stats.GcFlows, + &stats.HighprioPackets, + &stats.TcpRetrans, + &stats.Throttled, + &stats.FlowsPlimit, + &stats.PktsTooLong, + &stats.AllocationErrors, + } + for _, a := range nested { + switch a.Type { + case TCA_STATS_APP: + for i := 0; i < len(pts) && (i+1)*8 <= len(a.Data); i++ { + *pts[i] = nlenc.Uint64(a.Data[i*8 : (i+1)*8]) + } + default: + } + } + + return stats, nil +} + +func getQdiscMsgs(c *netlink.Conn) ([]netlink.Message, error) { + req := netlink.Message{ + Header: netlink.Header{ + Flags: netlink.HeaderFlagsRequest | netlink.HeaderFlagsDump, + Type: 38, // RTM_GETQDISC + }, + Data: []byte{0}, + } + + // Perform a request, receive replies, and validate the replies + msgs, err := c.Execute(req) + if err != nil { + return nil, fmt.Errorf("failed to execute request: %v", err) + } + + return msgs, nil +} + +// See https://tools.ietf.org/html/rfc3549#section-3.1.3 +func parseMessage(msg netlink.Message) (QdiscInfo, error) { + var m QdiscInfo + var s TC_Stats + var s2 TC_Stats2 + var s_fq TC_Fq_Qd_Stats + + /* + struct tcmsg { + unsigned char tcm_family; + unsigned char tcm__pad1; + unsigned short tcm__pad2; + int tcm_ifindex; + __u32 tcm_handle; + __u32 tcm_parent; + __u32 tcm_info; + }; + */ + + if len(msg.Data) < 20 { + return m, fmt.Errorf("Short message, len=%d", len(msg.Data)) + } + + ifaceIdx := nlenc.Uint32(msg.Data[4:8]) + + m.Handle = nlenc.Uint32(msg.Data[8:12]) + m.Parent = nlenc.Uint32(msg.Data[12:16]) + + if m.Parent == math.MaxUint32 { + m.Parent = 0 + } + + // The first 20 bytes are taken by tcmsg + attrs, err := netlink.UnmarshalAttributes(msg.Data[20:]) + + if err != nil { + return m, fmt.Errorf("failed to unmarshal attributes: %v", err) + } + + for _, attr := range attrs { + switch attr.Type { + case TCA_KIND: + m.Kind = nlenc.String(attr.Data) + case TCA_STATS2: + s_fq, err = parseTC_Fq_Qd_Stats(attr) + if err != nil { + return m, err + } + if s_fq.GcFlows > 0 { + m.GcFlows = s_fq.GcFlows + } + if s_fq.Throttled > 0 { + m.Throttled = s_fq.Throttled + } + if s_fq.FlowsPlimit > 0 { + m.FlowsPlimit = s_fq.FlowsPlimit + } + + s2 = parseTCAStats2(attr) + m.Bytes = s2.Bytes + m.Packets = s2.Packets + m.Drops = s2.Drops + // requeues only available in TCA_STATS2, not in TCA_STATS + m.Requeues = s2.Requeues + m.Overlimits = s2.Overlimits + case TCA_STATS: + // Legacy + s = parseTCAStats(attr) + m.Bytes = s.Bytes + m.Packets = s.Packets + m.Drops = s.Drops + m.Overlimits = s.Overlimits + default: + // TODO: TCA_OPTIONS and TCA_XSTATS + } + } + + iface, err := net.InterfaceByIndex(int(ifaceIdx)) + + if err == nil { + m.IfaceName = iface.Name + } + + return m, err +} + +func getAndParse(c *netlink.Conn) ([]QdiscInfo, error) { + var res []QdiscInfo + + msgs, err := getQdiscMsgs(c) + + if err != nil { + return nil, err + } + + for _, msg := range msgs { + m, err := parseMessage(msg) + + if err != nil { + return nil, err + } + + res = append(res, m) + } + + return res, nil +} + +func Get() ([]QdiscInfo, error) { + const familyRoute = 0 + + c, err := netlink.Dial(familyRoute, nil) + if err != nil { + return nil, fmt.Errorf("failed to dial netlink: %v", err) + } + defer c.Close() + + return getAndParse(c) +} diff --git a/vendor/github.com/godbus/dbus/.travis.yml b/vendor/github.com/godbus/dbus/.travis.yml new file mode 100644 index 00000000..2e1bbb78 --- /dev/null +++ b/vendor/github.com/godbus/dbus/.travis.yml @@ -0,0 +1,40 @@ +dist: precise +language: go +go_import_path: github.com/godbus/dbus +sudo: true + +go: + - 1.6.3 + - 1.7.3 + - tip + +env: + global: + matrix: + - TARGET=amd64 + - TARGET=arm64 + - TARGET=arm + - TARGET=386 + - TARGET=ppc64le + +matrix: + fast_finish: true + allow_failures: + - go: tip + exclude: + - go: tip + env: TARGET=arm + - go: tip + env: TARGET=arm64 + - go: tip + env: TARGET=386 + - go: tip + env: TARGET=ppc64le + +addons: + apt: + packages: + - dbus + - dbus-x11 + +before_install: diff --git a/vendor/github.com/godbus/dbus/CONTRIBUTING.md b/vendor/github.com/godbus/dbus/CONTRIBUTING.md new file mode 100644 index 00000000..c88f9b2b --- /dev/null +++ b/vendor/github.com/godbus/dbus/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# How to Contribute + +## Getting Started + +- Fork the repository on GitHub +- Read the [README](README.markdown) for build and test instructions +- Play with the project, submit bugs, submit patches! + +## Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work (usually master). +- Make commits of logical units. +- Make sure your commit messages are in the proper format (see below). +- Push your changes to a topic branch in your fork of the repository. +- Make sure the tests pass, and add any new tests as appropriate. +- Submit a pull request to the original repository. + +Thanks for your contributions! + +### Format of the Commit Message + +We follow a rough convention for commit messages that is designed to answer two +questions: what changed and why. The subject line should feature the what and +the body of the commit should describe the why. + +``` +scripts: add the test-cluster command + +this uses tmux to setup a test cluster that you can easily kill and +start for debugging. + +Fixes #38 +``` + +The format can be described more formally as follows: + +``` +: + + + +