From a18fe63c06dd04112116f9a20a4a26277112911d Mon Sep 17 00:00:00 2001 From: Martina Ferrari Date: Mon, 20 Jan 2020 13:25:39 +0100 Subject: [PATCH] Import prometheus-postgres-exporter_0.8.0+ds.orig.tar.gz [dgit import orig prometheus-postgres-exporter_0.8.0+ds.orig.tar.gz] --- .dockerignore | 2 + .gitignore | 21 + .travis.yml | 46 + Dockerfile | 15 + LICENSE | 13 + README.md | 270 +++ cmd/postgres_exporter/pg_setting.go | 141 ++ cmd/postgres_exporter/pg_setting_test.go | 256 +++ cmd/postgres_exporter/postgres_exporter.go | 1674 +++++++++++++++++ .../postgres_exporter_integration_test.go | 128 ++ .../postgres_exporter_test.go | 326 ++++ .../docker-postgres-replication/Dockerfile | 8 + .../docker-postgres-replication/Dockerfile.p2 | 8 + .../docker-postgres-replication/README.md | 11 + .../docker-compose.yml | 32 + .../docker-entrypoint.sh | 140 ++ .../setup-replication.sh | 22 + cmd/postgres_exporter/tests/test-smoke | 180 ++ .../tests/user_queries_ok.yaml | 23 + cmd/postgres_exporter/tests/username_file | 1 + cmd/postgres_exporter/tests/userpass_file | 1 + example.alerts.yml | 57 + gh-assets-clone.sh | 18 + gh-metrics-push.sh | 29 + mage.go | 11 + magefile.go | 786 ++++++++ postgres-metrics-get-changes.sh | 40 + postgres_exporter.rc | 89 + postgres_exporter_integration_test_script | 18 + queries.yaml | 205 ++ tools/.gitignore | 4 + tools/README.md | 9 + tools/vendor/vendor.json | 803 ++++++++ vendor/vendor.json | 357 ++++ 34 files changed, 5744 insertions(+) create mode 100644 .dockerignore create mode 100644 .gitignore create mode 100644 .travis.yml create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 README.md create mode 100644 cmd/postgres_exporter/pg_setting.go create mode 100644 cmd/postgres_exporter/pg_setting_test.go create mode 100644 cmd/postgres_exporter/postgres_exporter.go create mode 100644 cmd/postgres_exporter/postgres_exporter_integration_test.go create mode 100644 cmd/postgres_exporter/postgres_exporter_test.go create mode 100755 cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile create mode 100644 cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile.p2 create mode 100644 cmd/postgres_exporter/tests/docker-postgres-replication/README.md create mode 100644 cmd/postgres_exporter/tests/docker-postgres-replication/docker-compose.yml create mode 100755 cmd/postgres_exporter/tests/docker-postgres-replication/docker-entrypoint.sh create mode 100755 cmd/postgres_exporter/tests/docker-postgres-replication/setup-replication.sh create mode 100755 cmd/postgres_exporter/tests/test-smoke create mode 100644 cmd/postgres_exporter/tests/user_queries_ok.yaml create mode 100644 cmd/postgres_exporter/tests/username_file create mode 100644 cmd/postgres_exporter/tests/userpass_file create mode 100644 example.alerts.yml create mode 100755 gh-assets-clone.sh create mode 100755 gh-metrics-push.sh create mode 100644 mage.go create mode 100644 magefile.go create mode 100755 postgres-metrics-get-changes.sh create mode 100644 postgres_exporter.rc create mode 100755 postgres_exporter_integration_test_script create mode 100644 queries.yaml create mode 100644 tools/.gitignore create mode 100644 tools/README.md create mode 100644 tools/vendor/vendor.json create mode 100644 vendor/vendor.json diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..4ec85b5 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +* +!bin/ diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..66bcdb7 --- /dev/null +++ b/.gitignore @@ -0,0 +1,21 @@ +/.build +/postgres_exporter +/postgres_exporter_integration_test +*.tar.gz +*.test +*-stamp +/.idea +/.vscode +*.iml +/cover.out +/cover.*.out +/.coverage +/bin +/release +/*.prom +/.metrics.*.*.prom +/.metrics.*.*.prom.unique +/.assets-branch +/.metrics.*.added +/.metrics.*.removed +/tools/src diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..a315bb3 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,46 @@ +sudo: required +services: +- docker +language: go +go: +- '1.11' +before_install: +- go get -v github.com/mattn/goveralls +- sudo wget -O /usr/local/bin/p2 https://github.com/wrouesnel/p2cli/releases/download/r4/p2 + && sudo chmod +x /usr/local/bin/p2 +- sudo wget -O /usr/local/bin/docker-compose https://github.com/docker/compose/releases/download/1.9.0-rc4/docker-compose-Linux-x86_64 + && sudo chmod +x /usr/local/bin/docker-compose +- sudo apt-get update && sudo apt-get install postgresql-client-common +script: +- "./gh-assets-clone.sh" +- go run mage.go -v all +- "$HOME/gopath/bin/goveralls -coverprofile=cover.out -service=travis-ci" +- go run mage.go docker +after_success: +- docker login -u $DOCKER_USER -p $DOCKER_PASS +- if [ ! -z "$TRAVIS_TAG" ]; then docker tag wrouesnel/postgres_exporter:latest wrouesnel/postgres_exporter:$TRAVIS_TAG + ; docker push wrouesnel/postgres_exporter:$TRAVIS_TAG ; fi +- if [ "$TRAVIS_BRANCH" == "master" ]; then docker push wrouesnel/postgres_exporter + ; fi +- "./postgres-metrics-get-changes.sh .assets-branch/metriclists" +- if [ "$TRAVIS_BRANCH" == "master" ]; then ./gh-metrics-push.sh ; fi +env: + global: + - DOCKER_USER=wrouesnel + - GIT_ASSETS_BRANCH=assets + - secure: sl1d85bipYhHlHTZ4fwkWrZ07px+lPMQrKPaiyQ9i5tylQAcMqwDroK0pb5HIyIl6PEx72D5atQWnEqluA/0rFt3SxqxtvT+wj6CPmmZfh2fUSol7I07QzAsi95d7q0fg2mStDdfs134Uu+JjxGKEGRu2SL3Zq+LKpaNPtIZVBqrCYYAySLiEJx+DEOfwt1ktn/qHapV5d5FYdfd7trfV411NITyA8AGk6Gy0HztRDGbfcoLOsM+CnVi1p59uUL9ck/hL2DbsB44qDKeWQaruMLwWNDETu+EVwHlDEHGBPb+wdDALnW+Ts3CAUpuGXftHV35XLLbH7NXOnS6QiH938ycfPf3INY51lV7cL6bNtFWDKMAIcPf4wQO2ts4qFhuiUeFdo7qrC6uEI5Fy/sELBgWl4O2opVY3Tf8s8OO/DSb4Cxy6solKgaETkl6EcShaEj7H/Cn7vT0+SLKCpSQlvVQXDLGg6eZTyBA+OWNElE0UvWV7znxWBlke+9NARIl4FcB/SY4A6v1ztpandHWMjNLLxZyVxFEswfU9hvf0qL9SW38OJ5cIK8pvmH2QWG7Xg/j0B3o7SHMdsM+pcSwrzsM6OENgvxPNBb/DinmMyQKxTCVcVmMo7uIS89RIylvN79E8U6NagdFkiLfa3xEHq8zCzEkHi3bsLRvytgT2X0= + - secure: 1JL8KcgkLueQ2DLL81UMYzIHX3qm1TjcO40QL2ZOfdirFWBshOiTwTXeWj5qZaGBzoVV5ezhyZaBY+t3/pObslm20ERce879hEw+TSnKN30wfBqNyv2r7rfsbKkXauultb8RNu9y/9XS0DCEyGdSTQh9UaCa4z6ulu39hffDddrGQjwW1P2gT3Npu1cDYd1iSO36rrA6yXjaoN8OW8U4znKVjOGnarxxFnXJkiYv2PfIrZA6BpL3d0syJtWDyr1G+B48oK9VK+fBV9K0G0E67fJvqB3ANXN3D41il3S+cs8Ulcd7hF+LWxpMsP2r1/XHYSDw3Iiz0QFKKzoyxNdipvdjAVDxrWylyLnmTBYzXk41kRv88mKVLBQM1dbzsLXYcsE2pgIZxxq9OHGZ5CUJ8t0oz5D9oXMUy4QOMQ36jZdvD048aB7DGp4EF2J7ILIhUZrHHErOlXotnsYvNMvamNwqB5Jg4NC+y5QHxERJ+HK5oPrLy+iCb2kmWatSB6vO5OeX/F7IRiqtZghJRddEeMdQ1a6H0GeV1BF7Hx8j3TPMJ66qSAb0RA1lQQCN4l+/YMEWmQD8amf1O5NY116waf+Co4qkvt3c4QctQOMwu3Ra7uLlp6GG61OmHhPTCGSv/LZp6CVtROLY5IltKv7qBzksjvXkO1SzhJOxi0JkZmg= +branches: + except: + - assets +deploy: + skip_cleanup: true + provider: releases + api_key: + secure: rwlge/Rs3wnWyfKRhD9fd5GviVe0foYUp20DY3AjKdDjhtwScA1EeR9QHOkB3raze52en0+KkpqlLCWbt3q4CRT7+ku1DNKhd6VWALdTZ1RPJYvNlU6CKJdRnWUJsECmSBsShXlbiYR8axqNVedzFPFGKzS9gYlFN6rr7pez/JZhxqucopZ6I+TkRHMELrFXyQK7/Y2bNRCLC4a+rGsjKeLLtYXbRXCmS0G4BSJEBRk7d69fIRzBApCMfrcLftgHzPuPth616yyUusQSCQYvaZ5tlwrPP8/E0wG3SVJVeDCMuDOSBZ9M6vNzR8W8VR/hxQamegn1OQgC5kNOaLZCTcJ5xguRouqb+FNFBqrd/Zi6vESo7RiVLULawzwxkh9sIPa3WZYDb3VK/Z/cpggUeR7wAu0S5ZYEvJHRefIZpqofZEHzDE3Blqp5yErz05e/zmjpd6HHK3f/UHmRRYfbulkvGT3aL/dlq5GcFvuxVC/vTL2VPvg9cGbqtf7PakC5IhoHpDs35tOyLxifOBLHvkwtGSxEfsCohIG8Hz2XFD83EsxgOiKSXVPLNd6yxjdqZj7OeAKFFU3bzGndnRbDIXaf987IN1imgUtP6wegfImoRStqxN4gEwwIMFsZCF86Ug4eLhlajLbWhudriDxDPBM/F9950aVxLwmWh9l5cRI= + file_glob: true + file: release/* + on: + tags: true + branch: master + repo: wrouesnel/postgres_exporter diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..86128d4 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,15 @@ +FROM debian:7.11-slim +RUN useradd -u 20001 postgres_exporter + +FROM scratch + +COPY --from=0 /etc/passwd /etc/passwd +USER postgres_exporter + +ARG binary + +COPY $binary /postgres_exporter + +EXPOSE 9187 + +ENTRYPOINT [ "/postgres_exporter" ] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..4812e5b --- /dev/null +++ b/LICENSE @@ -0,0 +1,13 @@ +Copyright 2018 William Rouesnel + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..a81bcd9 --- /dev/null +++ b/README.md @@ -0,0 +1,270 @@ +[![Build Status](https://travis-ci.org/wrouesnel/postgres_exporter.svg?branch=master)](https://travis-ci.org/wrouesnel/postgres_exporter) +[![Coverage Status](https://coveralls.io/repos/github/wrouesnel/postgres_exporter/badge.svg?branch=master)](https://coveralls.io/github/wrouesnel/postgres_exporter?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/wrouesnel/postgres_exporter)](https://goreportcard.com/report/github.com/wrouesnel/postgres_exporter) +[![Docker Pulls](https://img.shields.io/docker/pulls/wrouesnel/postgres_exporter.svg)](https://hub.docker.com/r/wrouesnel/postgres_exporter/tags) + +# PostgreSQL Server Exporter + +Prometheus exporter for PostgreSQL server metrics. + +CI Tested PostgreSQL versions: `9.4`, `9.5`, `9.6`, `10`, `11` + +## Quick Start +This package is available for Docker: +``` +# Start an example database +docker run --net=host -it --rm -e POSTGRES_PASSWORD=password postgres +# Connect to it +docker run --net=host -e DATA_SOURCE_NAME="postgresql://postgres:password@localhost:5432/postgres?sslmode=disable" wrouesnel/postgres_exporter +``` + +## Building and running + +The build system is based on [Mage](https://magefile.org) + +The default make file behavior is to build the binary: +``` +$ go get github.com/wrouesnel/postgres_exporter +$ cd ${GOPATH-$HOME/go}/src/github.com/wrouesnel/postgres_exporter +$ go run mage.go binary +$ export DATA_SOURCE_NAME="postgresql://login:password@hostname:port/dbname" +$ ./postgres_exporter +``` + +To build the dockerfile, run `go run mage.go docker`. + +This will build the docker image as `wrouesnel/postgres_exporter:latest`. This +is a minimal docker image containing *just* postgres_exporter. By default no SSL +certificates are included, if you need to use SSL you should either bind-mount +`/etc/ssl/certs/ca-certificates.crt` or derive a new image containing them. + +### Vendoring +Package vendoring is handled with [`govendor`](https://github.com/kardianos/govendor) + +### Flags + +* `help` + Show context-sensitive help (also try --help-long and --help-man). + +* `web.listen-address` + Address to listen on for web interface and telemetry. Default is `:9187`. + +* `web.telemetry-path` + Path under which to expose metrics. Default is `/metrics`. + +* `disable-default-metrics` + Use only metrics supplied from `queries.yaml` via `--extend.query-path`. + +* `disable-settings-metrics` + Use the flag if you don't want to scrape `pg_settings`. + +* `auto-discover-databases` + Whether to discover the databases on a server dynamically. + +* `extend.query-path` + Path to a YAML file containing custom queries to run. Check out [`queries.yaml`](queries.yaml) + for examples of the format. + +* `dumpmaps` + Do not run - print the internal representation of the metric maps. Useful when debugging a custom + queries file. + +* `constantLabels` + Labels to set in all metrics. A list of `label=value` pairs, separated by commas. + +* `version` + Show application version. + +* `exclude-databases` + A list of databases to remove when autoDiscoverDatabases is enabled. + +* `log.level` + Set logging level: one of `debug`, `info`, `warn`, `error`, `fatal` + +* `log.format` + Set the log output target and format. e.g. `logger:syslog?appname=bob&local=7` or `logger:stdout?json=true` + Defaults to `logger:stderr`. + +### Environment Variables + +The following environment variables configure the exporter: + +* `DATA_SOURCE_NAME` + the default legacy format. Accepts URI form and key=value form arguments. The + URI may contain the username and password to connect with. + +* `DATA_SOURCE_URI` + an alternative to `DATA_SOURCE_NAME` which exclusively accepts the raw URI + without a username and password component. + +* `DATA_SOURCE_URI_FILE` + The same as above but reads the URI from a file. + +* `DATA_SOURCE_USER` + When using `DATA_SOURCE_URI`, this environment variable is used to specify + the username. + +* `DATA_SOURCE_USER_FILE` + The same, but reads the username from a file. + +* `DATA_SOURCE_PASS` + When using `DATA_SOURCE_URI`, this environment variable is used to specify + the password to connect with. + +* `DATA_SOURCE_PASS_FILE` + The same as above but reads the password from a file. + +* `PG_EXPORTER_WEB_LISTEN_ADDRESS` + Address to listen on for web interface and telemetry. Default is `:9187`. + +* `PG_EXPORTER_WEB_TELEMETRY_PATH` + Path under which to expose metrics. Default is `/metrics`. + +* `PG_EXPORTER_DISABLE_DEFAULT_METRICS` + Use only metrics supplied from `queries.yaml`. Value can be `true` or `false`. Default is `false`. + +* `PG_EXPORTER_DISABLE_SETTINGS_METRICS` + Use the flag if you don't want to scrape `pg_settings`. Value can be `true` or `false`. Defauls is `false`. + +* `PG_EXPORTER_AUTO_DISCOVER_DATABASES` + Whether to discover the databases on a server dynamically. Value can be `true` or `false`. Defauls is `false`. + +* `PG_EXPORTER_EXTEND_QUERY_PATH` + Path to a YAML file containing custom queries to run. Check out [`queries.yaml`](queries.yaml) + for examples of the format. + +* `PG_EXPORTER_CONSTANT_LABELS` + Labels to set in all metrics. A list of `label=value` pairs, separated by commas. + +* `PG_EXPORTER_EXCLUDE_DATABASES` + A comma-separated list of databases to remove when autoDiscoverDatabases is enabled. Default is empty string. + +Settings set by environment variables starting with `PG_` will be overwritten by the corresponding CLI flag if given. + +### Setting the Postgres server's data source name + +The PostgreSQL server's [data source name](http://en.wikipedia.org/wiki/Data_source_name) +must be set via the `DATA_SOURCE_NAME` environment variable. + +For running it locally on a default Debian/Ubuntu install, this will work (transpose to init script as appropriate): + + sudo -u postgres DATA_SOURCE_NAME="user=postgres host=/var/run/postgresql/ sslmode=disable" postgres_exporter + +Also, you can set a list of sources to scrape different instances from the one exporter setup. Just define a comma separated string. + + sudo -u postgres DATA_SOURCE_NAME="port=5432,port=6432" postgres_exporter + +See the [github.com/lib/pq](http://github.com/lib/pq) module for other ways to format the connection string. + +### Adding new metrics + +The exporter will attempt to dynamically export additional metrics if they are added in the +future, but they will be marked as "untyped". Additional metric maps can be easily created +from Postgres documentation by copying the tables and using the following Python snippet: + +```python +x = """tab separated raw text of a documentation table""" +for l in StringIO(x): + column, ctype, description = l.split('\t') + print """"{0}" : {{ prometheus.CounterValue, prometheus.NewDesc("pg_stat_database_{0}", "{2}", nil, nil) }}, """.format(column.strip(), ctype, description.strip()) +``` +Adjust the value of the resultant prometheus value type appropriately. This helps build +rich self-documenting metrics for the exporter. + +### Adding new metrics via a config file + +The -extend.query-path command-line argument specifies a YAML file containing additional queries to run. +Some examples are provided in [queries.yaml](queries.yaml). + +### Disabling default metrics +To work with non-officially-supported postgres versions you can try disabling (e.g. 8.2.15) +or a variant of postgres (e.g. Greenplum) you can disable the default metrics with the `--disable-default-metrics` +flag. This removes all built-in metrics, and uses only metrics defined by queries in the `queries.yaml` file you supply +(so you must supply one, otherwise the exporter will return nothing but internal statuses and not your database). + +### Automatically discover databases +To scrape metrics from all databases on a database server, the database DSN's can be dynamically discovered via the +`--auto-discover-databases` flag. When true, `SELECT datname FROM pg_database WHERE datallowconn = true AND datistemplate = false and datname != current_database()` is run for all configured DSN's. From the +result a new set of DSN's is created for which the metrics are scraped. + +In addition, the option `--exclude-databases` adds the possibily to filter the result from the auto discovery to discard databases you do not need. + +### Running as non-superuser + +To be able to collect metrics from `pg_stat_activity` and `pg_stat_replication` +as non-superuser you have to create functions and views as a superuser, and +assign permissions separately to those. + +In PostgreSQL, views run with the permissions of the user that created them so +they can act as security barriers. Functions need to be created to share this +data with the non-superuser. Only creating the views will leave out the most +important bits of data. + +```sql +-- To use IF statements, hence to be able to check if the user exists before +-- attempting creation, we need to switch to procedural SQL (PL/pgSQL) +-- instead of standard SQL. +-- More: https://www.postgresql.org/docs/9.3/plpgsql-overview.html +-- To preserve compatibility with <9.0, DO blocks are not used; instead, +-- a function is created and dropped. +CREATE OR REPLACE FUNCTION __tmp_create_user() returns void as $$ +BEGIN + IF NOT EXISTS ( + SELECT -- SELECT list can stay empty for this + FROM pg_catalog.pg_user + WHERE usename = 'postgres_exporter') THEN + CREATE USER postgres_exporter; + END IF; +END; +$$ language plpgsql; + +SELECT __tmp_create_user(); +DROP FUNCTION __tmp_create_user(); + +ALTER USER postgres_exporter WITH PASSWORD 'password'; +ALTER USER postgres_exporter SET SEARCH_PATH TO postgres_exporter,pg_catalog; + +-- If deploying as non-superuser (for example in AWS RDS), uncomment the GRANT +-- line below and replace with your root user. +-- GRANT postgres_exporter TO ; +CREATE SCHEMA IF NOT EXISTS postgres_exporter; +GRANT USAGE ON SCHEMA postgres_exporter TO postgres_exporter; +GRANT CONNECT ON DATABASE postgres TO postgres_exporter; + +CREATE OR REPLACE FUNCTION get_pg_stat_activity() RETURNS SETOF pg_stat_activity AS +$$ SELECT * FROM pg_catalog.pg_stat_activity; $$ +LANGUAGE sql +VOLATILE +SECURITY DEFINER; + +CREATE OR REPLACE VIEW postgres_exporter.pg_stat_activity +AS + SELECT * from get_pg_stat_activity(); + +GRANT SELECT ON postgres_exporter.pg_stat_activity TO postgres_exporter; + +CREATE OR REPLACE FUNCTION get_pg_stat_replication() RETURNS SETOF pg_stat_replication AS +$$ SELECT * FROM pg_catalog.pg_stat_replication; $$ +LANGUAGE sql +VOLATILE +SECURITY DEFINER; + +CREATE OR REPLACE VIEW postgres_exporter.pg_stat_replication +AS + SELECT * FROM get_pg_stat_replication(); + +GRANT SELECT ON postgres_exporter.pg_stat_replication TO postgres_exporter; +``` + +> **NOTE** +>
Remember to use `postgres` database name in the connection string: +> ``` +> DATA_SOURCE_NAME=postgresql://postgres_exporter:password@localhost:5432/postgres?sslmode=disable +> ``` + +# Hacking +* To build a copy for your current architecture run `go run mage.go binary`. + This will create a symlink to the just built binary in the root directory. +* To build release tar balls run `go run mage.go release`. +* Build system is a bit temperamental at the moment since the conversion to mage - I am working on getting it + to be a perfect out of the box experience, but am time-constrained on it at the moment. diff --git a/cmd/postgres_exporter/pg_setting.go b/cmd/postgres_exporter/pg_setting.go new file mode 100644 index 0000000..49b70ce --- /dev/null +++ b/cmd/postgres_exporter/pg_setting.go @@ -0,0 +1,141 @@ +package main + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/log" +) + +// Query the pg_settings view containing runtime variables +func querySettings(ch chan<- prometheus.Metric, server *Server) error { + log.Debugf("Querying pg_setting view on %q", server) + + // pg_settings docs: https://www.postgresql.org/docs/current/static/view-pg-settings.html + // + // NOTE: If you add more vartypes here, you must update the supported + // types in normaliseUnit() below + query := "SELECT name, setting, COALESCE(unit, ''), short_desc, vartype FROM pg_settings WHERE vartype IN ('bool', 'integer', 'real');" + + rows, err := server.db.Query(query) + if err != nil { + return fmt.Errorf("Error running query on database %q: %s %v", server, namespace, err) + } + defer rows.Close() // nolint: errcheck + + for rows.Next() { + s := &pgSetting{} + err = rows.Scan(&s.name, &s.setting, &s.unit, &s.shortDesc, &s.vartype) + if err != nil { + return fmt.Errorf("Error retrieving rows on %q: %s %v", server, namespace, err) + } + + ch <- s.metric(server.labels) + } + + return nil +} + +// pgSetting is represents a PostgreSQL runtime variable as returned by the +// pg_settings view. +type pgSetting struct { + name, setting, unit, shortDesc, vartype string +} + +func (s *pgSetting) metric(labels prometheus.Labels) prometheus.Metric { + var ( + err error + name = strings.Replace(s.name, ".", "_", -1) + unit = s.unit // nolint: ineffassign + shortDesc = s.shortDesc + subsystem = "settings" + val float64 + ) + + switch s.vartype { + case "bool": + if s.setting == "on" { + val = 1 + } + case "integer", "real": + if val, unit, err = s.normaliseUnit(); err != nil { + // Panic, since we should recognise all units + // and don't want to silently exlude metrics + panic(err) + } + + if len(unit) > 0 { + name = fmt.Sprintf("%s_%s", name, unit) + shortDesc = fmt.Sprintf("%s [Units converted to %s.]", shortDesc, unit) + } + default: + // Panic because we got a type we didn't ask for + panic(fmt.Sprintf("Unsupported vartype %q", s.vartype)) + } + + desc := newDesc(subsystem, name, shortDesc, labels) + return prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, val) +} + +// TODO: fix linter override +// nolint: nakedret +func (s *pgSetting) normaliseUnit() (val float64, unit string, err error) { + val, err = strconv.ParseFloat(s.setting, 64) + if err != nil { + return val, unit, fmt.Errorf("Error converting setting %q value %q to float: %s", s.name, s.setting, err) + } + + // Units defined in: https://www.postgresql.org/docs/current/static/config-setting.html + switch s.unit { + case "": + return + case "ms", "s", "min", "h", "d": + unit = "seconds" + case "B", "kB", "MB", "GB", "TB", "8kB", "16kB", "32kB", "16MB", "32MB", "64MB": + unit = "bytes" + default: + err = fmt.Errorf("Unknown unit for runtime variable: %q", s.unit) + return + } + + // -1 is special, don't modify the value + if val == -1 { + return + } + + switch s.unit { + case "ms": + val /= 1000 + case "min": + val *= 60 + case "h": + val *= 60 * 60 + case "d": + val *= 60 * 60 * 24 + case "kB": + val *= math.Pow(2, 10) + case "MB": + val *= math.Pow(2, 20) + case "GB": + val *= math.Pow(2, 30) + case "TB": + val *= math.Pow(2, 40) + case "8kB": + val *= math.Pow(2, 13) + case "16kB": + val *= math.Pow(2, 14) + case "32kB": + val *= math.Pow(2, 15) + case "16MB": + val *= math.Pow(2, 24) + case "32MB": + val *= math.Pow(2, 25) + case "64MB": + val *= math.Pow(2, 26) + } + + return +} diff --git a/cmd/postgres_exporter/pg_setting_test.go b/cmd/postgres_exporter/pg_setting_test.go new file mode 100644 index 0000000..3d7820e --- /dev/null +++ b/cmd/postgres_exporter/pg_setting_test.go @@ -0,0 +1,256 @@ +// +build !integration + +package main + +import ( + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + . "gopkg.in/check.v1" +) + +type PgSettingSuite struct{} + +var _ = Suite(&PgSettingSuite{}) + +var fixtures = []fixture{ + { + p: pgSetting{ + name: "seconds_fixture_metric", + setting: "5", + unit: "s", + shortDesc: "Foo foo foo", + vartype: "integer", + }, + n: normalised{ + val: 5, + unit: "seconds", + err: "", + }, + d: `Desc{fqName: "pg_settings_seconds_fixture_metric_seconds", help: "Foo foo foo [Units converted to seconds.]", constLabels: {}, variableLabels: []}`, + v: 5, + }, + { + p: pgSetting{ + name: "milliseconds_fixture_metric", + setting: "5000", + unit: "ms", + shortDesc: "Foo foo foo", + vartype: "integer", + }, + n: normalised{ + val: 5, + unit: "seconds", + err: "", + }, + d: `Desc{fqName: "pg_settings_milliseconds_fixture_metric_seconds", help: "Foo foo foo [Units converted to seconds.]", constLabels: {}, variableLabels: []}`, + v: 5, + }, + { + p: pgSetting{ + name: "eight_kb_fixture_metric", + setting: "17", + unit: "8kB", + shortDesc: "Foo foo foo", + vartype: "integer", + }, + n: normalised{ + val: 139264, + unit: "bytes", + err: "", + }, + d: `Desc{fqName: "pg_settings_eight_kb_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`, + v: 139264, + }, + { + p: pgSetting{ + name: "16_kb_real_fixture_metric", + setting: "3.0", + unit: "16kB", + shortDesc: "Foo foo foo", + vartype: "real", + }, + n: normalised{ + val: 49152, + unit: "bytes", + err: "", + }, + d: `Desc{fqName: "pg_settings_16_kb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`, + v: 49152, + }, + { + p: pgSetting{ + name: "16_mb_real_fixture_metric", + setting: "3.0", + unit: "16MB", + shortDesc: "Foo foo foo", + vartype: "real", + }, + n: normalised{ + val: 5.0331648e+07, + unit: "bytes", + err: "", + }, + d: `Desc{fqName: "pg_settings_16_mb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`, + v: 5.0331648e+07, + }, + { + p: pgSetting{ + name: "32_mb_real_fixture_metric", + setting: "3.0", + unit: "32MB", + shortDesc: "Foo foo foo", + vartype: "real", + }, + n: normalised{ + val: 1.00663296e+08, + unit: "bytes", + err: "", + }, + d: `Desc{fqName: "pg_settings_32_mb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`, + v: 1.00663296e+08, + }, + { + p: pgSetting{ + name: "64_mb_real_fixture_metric", + setting: "3.0", + unit: "64MB", + shortDesc: "Foo foo foo", + vartype: "real", + }, + n: normalised{ + val: 2.01326592e+08, + unit: "bytes", + err: "", + }, + d: `Desc{fqName: "pg_settings_64_mb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`, + v: 2.01326592e+08, + }, + { + p: pgSetting{ + name: "bool_on_fixture_metric", + setting: "on", + unit: "", + shortDesc: "Foo foo foo", + vartype: "bool", + }, + n: normalised{ + val: 1, + unit: "", + err: "", + }, + d: `Desc{fqName: "pg_settings_bool_on_fixture_metric", help: "Foo foo foo", constLabels: {}, variableLabels: []}`, + v: 1, + }, + { + p: pgSetting{ + name: "bool_off_fixture_metric", + setting: "off", + unit: "", + shortDesc: "Foo foo foo", + vartype: "bool", + }, + n: normalised{ + val: 0, + unit: "", + err: "", + }, + d: `Desc{fqName: "pg_settings_bool_off_fixture_metric", help: "Foo foo foo", constLabels: {}, variableLabels: []}`, + v: 0, + }, + { + p: pgSetting{ + name: "special_minus_one_value", + setting: "-1", + unit: "d", + shortDesc: "foo foo foo", + vartype: "integer", + }, + n: normalised{ + val: -1, + unit: "seconds", + err: "", + }, + d: `Desc{fqName: "pg_settings_special_minus_one_value_seconds", help: "foo foo foo [Units converted to seconds.]", constLabels: {}, variableLabels: []}`, + v: -1, + }, + { + p: pgSetting{ + name: "rds.rds_superuser_reserved_connections", + setting: "2", + unit: "", + shortDesc: "Sets the number of connection slots reserved for rds_superusers.", + vartype: "integer", + }, + n: normalised{ + val: 2, + unit: "", + err: "", + }, + d: `Desc{fqName: "pg_settings_rds_rds_superuser_reserved_connections", help: "Sets the number of connection slots reserved for rds_superusers.", constLabels: {}, variableLabels: []}`, + v: 2, + }, + { + p: pgSetting{ + name: "unknown_unit", + setting: "10", + unit: "nonexistent", + shortDesc: "foo foo foo", + vartype: "integer", + }, + n: normalised{ + val: 10, + unit: "", + err: `Unknown unit for runtime variable: "nonexistent"`, + }, + }, +} + +func (s *PgSettingSuite) TestNormaliseUnit(c *C) { + for _, f := range fixtures { + switch f.p.vartype { + case "integer", "real": + val, unit, err := f.p.normaliseUnit() + + c.Check(val, Equals, f.n.val) + c.Check(unit, Equals, f.n.unit) + + if err == nil { + c.Check("", Equals, f.n.err) + } else { + c.Check(err.Error(), Equals, f.n.err) + } + } + } +} + +func (s *PgSettingSuite) TestMetric(c *C) { + defer func() { + if r := recover(); r != nil { + if r.(error).Error() != `Unknown unit for runtime variable: "nonexistent"` { + panic(r) + } + } + }() + + for _, f := range fixtures { + d := &dto.Metric{} + m := f.p.metric(prometheus.Labels{}) + m.Write(d) // nolint: errcheck + + c.Check(m.Desc().String(), Equals, f.d) + c.Check(d.GetGauge().GetValue(), Equals, f.v) + } +} + +type normalised struct { + val float64 + unit string + err string +} + +type fixture struct { + p pgSetting + n normalised + d string + v float64 +} diff --git a/cmd/postgres_exporter/postgres_exporter.go b/cmd/postgres_exporter/postgres_exporter.go new file mode 100644 index 0000000..1a9eaaa --- /dev/null +++ b/cmd/postgres_exporter/postgres_exporter.go @@ -0,0 +1,1674 @@ +package main + +import ( + "crypto/sha256" + "database/sql" + "errors" + "fmt" + "io/ioutil" + "math" + "net/http" + "net/url" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/blang/semver" + "github.com/lib/pq" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/prometheus/common/log" + "github.com/prometheus/common/version" + "gopkg.in/alecthomas/kingpin.v2" + "gopkg.in/yaml.v2" +) + +// Branch is set during build to the git branch. +var Branch string + +// BuildDate is set during build to the ISO-8601 date and time. +var BuildDate string + +// Revision is set during build to the git commit revision. +var Revision string + +// Version is set during build to the git describe version +// (semantic version)-(commitish) form. +var Version = "0.0.1-rev" + +// VersionShort is set during build to the semantic version. +var VersionShort = "0.0.1" + +var ( + listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for web interface and telemetry.").Default(":9187").Envar("PG_EXPORTER_WEB_LISTEN_ADDRESS").String() + metricPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").Envar("PG_EXPORTER_WEB_TELEMETRY_PATH").String() + disableDefaultMetrics = kingpin.Flag("disable-default-metrics", "Do not include default metrics.").Default("false").Envar("PG_EXPORTER_DISABLE_DEFAULT_METRICS").Bool() + disableSettingsMetrics = kingpin.Flag("disable-settings-metrics", "Do not include pg_settings metrics.").Default("false").Envar("PG_EXPORTER_DISABLE_SETTINGS_METRICS").Bool() + autoDiscoverDatabases = kingpin.Flag("auto-discover-databases", "Whether to discover the databases on a server dynamically.").Default("false").Envar("PG_EXPORTER_AUTO_DISCOVER_DATABASES").Bool() + queriesPath = kingpin.Flag("extend.query-path", "Path to custom queries to run.").Default("").Envar("PG_EXPORTER_EXTEND_QUERY_PATH").String() + onlyDumpMaps = kingpin.Flag("dumpmaps", "Do not run, simply dump the maps.").Bool() + constantLabelsList = kingpin.Flag("constantLabels", "A list of label=value separated by comma(,).").Default("").Envar("PG_EXPORTER_CONSTANT_LABELS").String() + excludeDatabases = kingpin.Flag("exclude-databases", "A list of databases to remove when autoDiscoverDatabases is enabled").Default("").Envar("PG_EXPORTER_EXCLUDE_DATABASES").String() +) + +// Metric name parts. +const ( + // Namespace for all metrics. + namespace = "pg" + // Subsystems. + exporter = "exporter" + // Metric label used for static string data thats handy to send to Prometheus + // e.g. version + staticLabelName = "static" + // Metric label used for server identification. + serverLabelName = "server" +) + +// ColumnUsage should be one of several enum values which describe how a +// queried row is to be converted to a Prometheus metric. +type ColumnUsage int + +// nolint: golint +const ( + DISCARD ColumnUsage = iota // Ignore this column + LABEL ColumnUsage = iota // Use this column as a label + COUNTER ColumnUsage = iota // Use this column as a counter + GAUGE ColumnUsage = iota // Use this column as a gauge + MAPPEDMETRIC ColumnUsage = iota // Use this column with the supplied mapping of text values + DURATION ColumnUsage = iota // This column should be interpreted as a text duration (and converted to milliseconds) +) + +// UnmarshalYAML implements the yaml.Unmarshaller interface. +func (cu *ColumnUsage) UnmarshalYAML(unmarshal func(interface{}) error) error { + var value string + if err := unmarshal(&value); err != nil { + return err + } + + columnUsage, err := stringToColumnUsage(value) + if err != nil { + return err + } + + *cu = columnUsage + return nil +} + +// MappingOptions is a copy of ColumnMapping used only for parsing +type MappingOptions struct { + Usage string `yaml:"usage"` + Description string `yaml:"description"` + Mapping map[string]float64 `yaml:"metric_mapping"` // Optional column mapping for MAPPEDMETRIC + SupportedVersions semver.Range `yaml:"pg_version"` // Semantic version ranges which are supported. Unsupported columns are not queried (internally converted to DISCARD). +} + +// nolint: golint +type Mapping map[string]MappingOptions + +// nolint: golint +type UserQuery struct { + Query string `yaml:"query"` + Metrics []Mapping `yaml:"metrics"` + Master bool `yaml:"master"` // Querying only for master database + CacheSeconds uint64 `yaml:"cache_seconds"` // Number of seconds to cache the namespace result metrics for. +} + +// nolint: golint +type UserQueries map[string]UserQuery + +// Regex used to get the "short-version" from the postgres version field. +var versionRegex = regexp.MustCompile(`^\w+ ((\d+)(\.\d+)?(\.\d+)?)`) +var lowestSupportedVersion = semver.MustParse("9.1.0") + +// Parses the version of postgres into the short version string we can use to +// match behaviors. +func parseVersion(versionString string) (semver.Version, error) { + submatches := versionRegex.FindStringSubmatch(versionString) + if len(submatches) > 1 { + return semver.ParseTolerant(submatches[1]) + } + return semver.Version{}, + errors.New(fmt.Sprintln("Could not find a postgres version in string:", versionString)) +} + +// ColumnMapping is the user-friendly representation of a prometheus descriptor map +type ColumnMapping struct { + usage ColumnUsage `yaml:"usage"` + description string `yaml:"description"` + mapping map[string]float64 `yaml:"metric_mapping"` // Optional column mapping for MAPPEDMETRIC + supportedVersions semver.Range `yaml:"pg_version"` // Semantic version ranges which are supported. Unsupported columns are not queried (internally converted to DISCARD). +} + +// UnmarshalYAML implements yaml.Unmarshaller +func (cm *ColumnMapping) UnmarshalYAML(unmarshal func(interface{}) error) error { + type plain ColumnMapping + return unmarshal((*plain)(cm)) +} + +// intermediateMetricMap holds the partially loaded metric map parsing. +// This is mainly so we can parse cacheSeconds around. +type intermediateMetricMap struct { + columnMappings map[string]ColumnMapping + master bool + cacheSeconds uint64 +} + +// MetricMapNamespace groups metric maps under a shared set of labels. +type MetricMapNamespace struct { + labels []string // Label names for this namespace + columnMappings map[string]MetricMap // Column mappings in this namespace + master bool // Call query only for master database + cacheSeconds uint64 // Number of seconds this metric namespace can be cached. 0 disables. +} + +// MetricMap stores the prometheus metric description which a given column will +// be mapped to by the collector +type MetricMap struct { + discard bool // Should metric be discarded during mapping? + vtype prometheus.ValueType // Prometheus valuetype + desc *prometheus.Desc // Prometheus descriptor + conversion func(interface{}) (float64, bool) // Conversion function to turn PG result into float64 +} + +// ErrorConnectToServer is a connection to PgSQL server error +type ErrorConnectToServer struct { + Msg string +} + +// Error returns error +func (e *ErrorConnectToServer) Error() string { + return e.Msg +} + +// TODO: revisit this with the semver system +func dumpMaps() { + // TODO: make this function part of the exporter + for name, cmap := range builtinMetricMaps { + query, ok := queryOverrides[name] + if !ok { + fmt.Println(name) + } else { + for _, queryOverride := range query { + fmt.Println(name, queryOverride.versionRange, queryOverride.query) + } + } + + for column, details := range cmap.columnMappings { + fmt.Printf(" %-40s %v\n", column, details) + } + fmt.Println() + } +} + +var builtinMetricMaps = map[string]intermediateMetricMap{ + "pg_stat_bgwriter": { + map[string]ColumnMapping{ + "checkpoints_timed": {COUNTER, "Number of scheduled checkpoints that have been performed", nil, nil}, + "checkpoints_req": {COUNTER, "Number of requested checkpoints that have been performed", nil, nil}, + "checkpoint_write_time": {COUNTER, "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds", nil, nil}, + "checkpoint_sync_time": {COUNTER, "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds", nil, nil}, + "buffers_checkpoint": {COUNTER, "Number of buffers written during checkpoints", nil, nil}, + "buffers_clean": {COUNTER, "Number of buffers written by the background writer", nil, nil}, + "maxwritten_clean": {COUNTER, "Number of times the background writer stopped a cleaning scan because it had written too many buffers", nil, nil}, + "buffers_backend": {COUNTER, "Number of buffers written directly by a backend", nil, nil}, + "buffers_backend_fsync": {COUNTER, "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)", nil, nil}, + "buffers_alloc": {COUNTER, "Number of buffers allocated", nil, nil}, + "stats_reset": {COUNTER, "Time at which these statistics were last reset", nil, nil}, + }, + true, + 0, + }, + "pg_stat_database": { + map[string]ColumnMapping{ + "datid": {LABEL, "OID of a database", nil, nil}, + "datname": {LABEL, "Name of this database", nil, nil}, + "numbackends": {GAUGE, "Number of backends currently connected to this database. This is the only column in this view that returns a value reflecting current state; all other columns return the accumulated values since the last reset.", nil, nil}, + "xact_commit": {COUNTER, "Number of transactions in this database that have been committed", nil, nil}, + "xact_rollback": {COUNTER, "Number of transactions in this database that have been rolled back", nil, nil}, + "blks_read": {COUNTER, "Number of disk blocks read in this database", nil, nil}, + "blks_hit": {COUNTER, "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)", nil, nil}, + "tup_returned": {COUNTER, "Number of rows returned by queries in this database", nil, nil}, + "tup_fetched": {COUNTER, "Number of rows fetched by queries in this database", nil, nil}, + "tup_inserted": {COUNTER, "Number of rows inserted by queries in this database", nil, nil}, + "tup_updated": {COUNTER, "Number of rows updated by queries in this database", nil, nil}, + "tup_deleted": {COUNTER, "Number of rows deleted by queries in this database", nil, nil}, + "conflicts": {COUNTER, "Number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see pg_stat_database_conflicts for details.)", nil, nil}, + "temp_files": {COUNTER, "Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the log_temp_files setting.", nil, nil}, + "temp_bytes": {COUNTER, "Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting.", nil, nil}, + "deadlocks": {COUNTER, "Number of deadlocks detected in this database", nil, nil}, + "blk_read_time": {COUNTER, "Time spent reading data file blocks by backends in this database, in milliseconds", nil, nil}, + "blk_write_time": {COUNTER, "Time spent writing data file blocks by backends in this database, in milliseconds", nil, nil}, + "stats_reset": {COUNTER, "Time at which these statistics were last reset", nil, nil}, + }, + true, + 0, + }, + "pg_stat_database_conflicts": { + map[string]ColumnMapping{ + "datid": {LABEL, "OID of a database", nil, nil}, + "datname": {LABEL, "Name of this database", nil, nil}, + "confl_tablespace": {COUNTER, "Number of queries in this database that have been canceled due to dropped tablespaces", nil, nil}, + "confl_lock": {COUNTER, "Number of queries in this database that have been canceled due to lock timeouts", nil, nil}, + "confl_snapshot": {COUNTER, "Number of queries in this database that have been canceled due to old snapshots", nil, nil}, + "confl_bufferpin": {COUNTER, "Number of queries in this database that have been canceled due to pinned buffers", nil, nil}, + "confl_deadlock": {COUNTER, "Number of queries in this database that have been canceled due to deadlocks", nil, nil}, + }, + true, + 0, + }, + "pg_locks": { + map[string]ColumnMapping{ + "datname": {LABEL, "Name of this database", nil, nil}, + "mode": {LABEL, "Type of Lock", nil, nil}, + "count": {GAUGE, "Number of locks", nil, nil}, + }, + true, + 0, + }, + "pg_stat_replication": { + map[string]ColumnMapping{ + "procpid": {DISCARD, "Process ID of a WAL sender process", nil, semver.MustParseRange("<9.2.0")}, + "pid": {DISCARD, "Process ID of a WAL sender process", nil, semver.MustParseRange(">=9.2.0")}, + "usesysid": {DISCARD, "OID of the user logged into this WAL sender process", nil, nil}, + "usename": {DISCARD, "Name of the user logged into this WAL sender process", nil, nil}, + "application_name": {LABEL, "Name of the application that is connected to this WAL sender", nil, nil}, + "client_addr": {LABEL, "IP address of the client connected to this WAL sender. If this field is null, it indicates that the client is connected via a Unix socket on the server machine.", nil, nil}, + "client_hostname": {DISCARD, "Host name of the connected client, as reported by a reverse DNS lookup of client_addr. This field will only be non-null for IP connections, and only when log_hostname is enabled.", nil, nil}, + "client_port": {DISCARD, "TCP port number that the client is using for communication with this WAL sender, or -1 if a Unix socket is used", nil, nil}, + "backend_start": {DISCARD, "with time zone Time when this process was started, i.e., when the client connected to this WAL sender", nil, nil}, + "backend_xmin": {DISCARD, "The current backend's xmin horizon.", nil, nil}, + "state": {LABEL, "Current WAL sender state", nil, nil}, + "sent_location": {DISCARD, "Last transaction log position sent on this connection", nil, semver.MustParseRange("<10.0.0")}, + "write_location": {DISCARD, "Last transaction log position written to disk by this standby server", nil, semver.MustParseRange("<10.0.0")}, + "flush_location": {DISCARD, "Last transaction log position flushed to disk by this standby server", nil, semver.MustParseRange("<10.0.0")}, + "replay_location": {DISCARD, "Last transaction log position replayed into the database on this standby server", nil, semver.MustParseRange("<10.0.0")}, + "sent_lsn": {DISCARD, "Last transaction log position sent on this connection", nil, semver.MustParseRange(">=10.0.0")}, + "write_lsn": {DISCARD, "Last transaction log position written to disk by this standby server", nil, semver.MustParseRange(">=10.0.0")}, + "flush_lsn": {DISCARD, "Last transaction log position flushed to disk by this standby server", nil, semver.MustParseRange(">=10.0.0")}, + "replay_lsn": {DISCARD, "Last transaction log position replayed into the database on this standby server", nil, semver.MustParseRange(">=10.0.0")}, + "sync_priority": {DISCARD, "Priority of this standby server for being chosen as the synchronous standby", nil, nil}, + "sync_state": {DISCARD, "Synchronous state of this standby server", nil, nil}, + "slot_name": {LABEL, "A unique, cluster-wide identifier for the replication slot", nil, semver.MustParseRange(">=9.2.0")}, + "plugin": {DISCARD, "The base name of the shared object containing the output plugin this logical slot is using, or null for physical slots", nil, nil}, + "slot_type": {DISCARD, "The slot type - physical or logical", nil, nil}, + "datoid": {DISCARD, "The OID of the database this slot is associated with, or null. Only logical slots have an associated database", nil, nil}, + "database": {DISCARD, "The name of the database this slot is associated with, or null. Only logical slots have an associated database", nil, nil}, + "active": {DISCARD, "True if this slot is currently actively being used", nil, nil}, + "active_pid": {DISCARD, "Process ID of a WAL sender process", nil, nil}, + "xmin": {DISCARD, "The oldest transaction that this slot needs the database to retain. VACUUM cannot remove tuples deleted by any later transaction", nil, nil}, + "catalog_xmin": {DISCARD, "The oldest transaction affecting the system catalogs that this slot needs the database to retain. VACUUM cannot remove catalog tuples deleted by any later transaction", nil, nil}, + "restart_lsn": {DISCARD, "The address (LSN) of oldest WAL which still might be required by the consumer of this slot and thus won't be automatically removed during checkpoints", nil, nil}, + "pg_current_xlog_location": {DISCARD, "pg_current_xlog_location", nil, nil}, + "pg_current_wal_lsn": {DISCARD, "pg_current_xlog_location", nil, semver.MustParseRange(">=10.0.0")}, + "pg_current_wal_lsn_bytes": {GAUGE, "WAL position in bytes", nil, semver.MustParseRange(">=10.0.0")}, + "pg_xlog_location_diff": {GAUGE, "Lag in bytes between master and slave", nil, semver.MustParseRange(">=9.2.0 <10.0.0")}, + "pg_wal_lsn_diff": {GAUGE, "Lag in bytes between master and slave", nil, semver.MustParseRange(">=10.0.0")}, + "confirmed_flush_lsn": {DISCARD, "LSN position a consumer of a slot has confirmed flushing the data received", nil, nil}, + "write_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it (but not yet flushed it or applied it). This can be used to gauge the delay that synchronous_commit level remote_write incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")}, + "flush_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it (but not yet applied it). This can be used to gauge the delay that synchronous_commit level remote_flush incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")}, + "replay_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it. This can be used to gauge the delay that synchronous_commit level remote_apply incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")}, + }, + true, + 0, + }, + "pg_stat_archiver": { + map[string]ColumnMapping{ + "archived_count": {COUNTER, "Number of WAL files that have been successfully archived", nil, nil}, + "last_archived_wal": {DISCARD, "Name of the last WAL file successfully archived", nil, nil}, + "last_archived_time": {DISCARD, "Time of the last successful archive operation", nil, nil}, + "failed_count": {COUNTER, "Number of failed attempts for archiving WAL files", nil, nil}, + "last_failed_wal": {DISCARD, "Name of the WAL file of the last failed archival operation", nil, nil}, + "last_failed_time": {DISCARD, "Time of the last failed archival operation", nil, nil}, + "stats_reset": {DISCARD, "Time at which these statistics were last reset", nil, nil}, + "last_archive_age": {GAUGE, "Time in seconds since last WAL segment was successfully archived", nil, nil}, + }, + true, + 0, + }, + "pg_stat_activity": { + map[string]ColumnMapping{ + "datname": {LABEL, "Name of this database", nil, nil}, + "state": {LABEL, "connection state", nil, semver.MustParseRange(">=9.2.0")}, + "count": {GAUGE, "number of connections in this state", nil, nil}, + "max_tx_duration": {GAUGE, "max duration in seconds any active transaction has been running", nil, nil}, + }, + true, + 0, + }, +} + +// OverrideQuery 's are run in-place of simple namespace look ups, and provide +// advanced functionality. But they have a tendency to postgres version specific. +// There aren't too many versions, so we simply store customized versions using +// the semver matching we do for columns. +type OverrideQuery struct { + versionRange semver.Range + query string +} + +// Overriding queries for namespaces above. +// TODO: validate this is a closed set in tests, and there are no overlaps +var queryOverrides = map[string][]OverrideQuery{ + "pg_locks": { + { + semver.MustParseRange(">0.0.0"), + `SELECT pg_database.datname,tmp.mode,COALESCE(count,0) as count + FROM + ( + VALUES ('accesssharelock'), + ('rowsharelock'), + ('rowexclusivelock'), + ('shareupdateexclusivelock'), + ('sharelock'), + ('sharerowexclusivelock'), + ('exclusivelock'), + ('accessexclusivelock') + ) AS tmp(mode) CROSS JOIN pg_database + LEFT JOIN + (SELECT database, lower(mode) AS mode,count(*) AS count + FROM pg_locks WHERE database IS NOT NULL + GROUP BY database, lower(mode) + ) AS tmp2 + ON tmp.mode=tmp2.mode and pg_database.oid = tmp2.database ORDER BY 1`, + }, + }, + + "pg_stat_replication": { + { + semver.MustParseRange(">=10.0.0"), + ` + SELECT *, + (case pg_is_in_recovery() when 't' then null else pg_current_wal_lsn() end) AS pg_current_wal_lsn, + (case pg_is_in_recovery() when 't' then null else pg_wal_lsn_diff(pg_current_wal_lsn(), pg_lsn('0/0'))::float end) AS pg_current_wal_lsn_bytes, + (case pg_is_in_recovery() when 't' then null else pg_wal_lsn_diff(pg_current_wal_lsn(), replay_lsn)::float end) AS pg_wal_lsn_diff + FROM pg_stat_replication + `, + }, + { + semver.MustParseRange(">=9.2.0 <10.0.0"), + ` + SELECT *, + (case pg_is_in_recovery() when 't' then null else pg_current_xlog_location() end) AS pg_current_xlog_location, + (case pg_is_in_recovery() when 't' then null else pg_xlog_location_diff(pg_current_xlog_location(), replay_location)::float end) AS pg_xlog_location_diff + FROM pg_stat_replication + `, + }, + { + semver.MustParseRange("<9.2.0"), + ` + SELECT *, + (case pg_is_in_recovery() when 't' then null else pg_current_xlog_location() end) AS pg_current_xlog_location + FROM pg_stat_replication + `, + }, + }, + + "pg_stat_archiver": { + { + semver.MustParseRange(">=0.0.0"), + ` + SELECT *, + extract(epoch from now() - last_archived_time) AS last_archive_age + FROM pg_stat_archiver + `, + }, + }, + + "pg_stat_activity": { + // This query only works + { + semver.MustParseRange(">=9.2.0"), + ` + SELECT + pg_database.datname, + tmp.state, + COALESCE(count,0) as count, + COALESCE(max_tx_duration,0) as max_tx_duration + FROM + ( + VALUES ('active'), + ('idle'), + ('idle in transaction'), + ('idle in transaction (aborted)'), + ('fastpath function call'), + ('disabled') + ) AS tmp(state) CROSS JOIN pg_database + LEFT JOIN + ( + SELECT + datname, + state, + count(*) AS count, + MAX(EXTRACT(EPOCH FROM now() - xact_start))::float AS max_tx_duration + FROM pg_stat_activity GROUP BY datname,state) AS tmp2 + ON tmp.state = tmp2.state AND pg_database.datname = tmp2.datname + `, + }, + { + semver.MustParseRange("<9.2.0"), + ` + SELECT + datname, + 'unknown' AS state, + COALESCE(count(*),0) AS count, + COALESCE(MAX(EXTRACT(EPOCH FROM now() - xact_start))::float,0) AS max_tx_duration + FROM pg_stat_activity GROUP BY datname + `, + }, + }, +} + +// Convert the query override file to the version-specific query override file +// for the exporter. +func makeQueryOverrideMap(pgVersion semver.Version, queryOverrides map[string][]OverrideQuery) map[string]string { + resultMap := make(map[string]string) + for name, overrideDef := range queryOverrides { + // Find a matching semver. We make it an error to have overlapping + // ranges at test-time, so only 1 should ever match. + matched := false + for _, queryDef := range overrideDef { + if queryDef.versionRange(pgVersion) { + resultMap[name] = queryDef.query + matched = true + break + } + } + if !matched { + log.Warnln("No query matched override for", name, "- disabling metric space.") + resultMap[name] = "" + } + } + + return resultMap +} + +func parseUserQueries(content []byte) (map[string]intermediateMetricMap, map[string]string, error) { + var userQueries UserQueries + + err := yaml.Unmarshal(content, &userQueries) + if err != nil { + return nil, nil, err + } + + // Stores the loaded map representation + metricMaps := make(map[string]intermediateMetricMap) + newQueryOverrides := make(map[string]string) + + for metric, specs := range userQueries { + log.Debugln("New user metric namespace from YAML:", metric, "Will cache results for:", specs.CacheSeconds) + newQueryOverrides[metric] = specs.Query + metricMap, ok := metricMaps[metric] + if !ok { + // Namespace for metric not found - add it. + newMetricMap := make(map[string]ColumnMapping) + metricMap = intermediateMetricMap{ + columnMappings: newMetricMap, + master: specs.Master, + cacheSeconds: specs.CacheSeconds, + } + metricMaps[metric] = metricMap + } + for _, metric := range specs.Metrics { + for name, mappingOption := range metric { + var columnMapping ColumnMapping + tmpUsage, _ := stringToColumnUsage(mappingOption.Usage) + columnMapping.usage = tmpUsage + columnMapping.description = mappingOption.Description + + // TODO: we should support cu + columnMapping.mapping = nil + // Should we support this for users? + columnMapping.supportedVersions = nil + + metricMap.columnMappings[name] = columnMapping + } + } + } + return metricMaps, newQueryOverrides, nil +} + +// Add queries to the builtinMetricMaps and queryOverrides maps. Added queries do not +// respect version requirements, because it is assumed that the user knows +// what they are doing with their version of postgres. +// +// This function modifies metricMap and queryOverrideMap to contain the new +// queries. +// TODO: test code for all cu. +// TODO: the YAML this supports is "non-standard" - we should move away from it. +func addQueries(content []byte, pgVersion semver.Version, server *Server) error { + metricMaps, newQueryOverrides, err := parseUserQueries(content) + if err != nil { + return nil + } + // Convert the loaded metric map into exporter representation + partialExporterMap := makeDescMap(pgVersion, server.labels, metricMaps) + + // Merge the two maps (which are now quite flatteend) + for k, v := range partialExporterMap { + _, found := server.metricMap[k] + if found { + log.Debugln("Overriding metric", k, "from user YAML file.") + } else { + log.Debugln("Adding new metric", k, "from user YAML file.") + } + server.metricMap[k] = v + } + + // Merge the query override map + for k, v := range newQueryOverrides { + _, found := server.queryOverrides[k] + if found { + log.Debugln("Overriding query override", k, "from user YAML file.") + } else { + log.Debugln("Adding new query override", k, "from user YAML file.") + } + server.queryOverrides[k] = v + } + return nil +} + +// Turn the MetricMap column mapping into a prometheus descriptor mapping. +func makeDescMap(pgVersion semver.Version, serverLabels prometheus.Labels, metricMaps map[string]intermediateMetricMap) map[string]MetricMapNamespace { + var metricMap = make(map[string]MetricMapNamespace) + + for namespace, intermediateMappings := range metricMaps { + thisMap := make(map[string]MetricMap) + + // Get the constant labels + var variableLabels []string + for columnName, columnMapping := range intermediateMappings.columnMappings { + if columnMapping.usage == LABEL { + variableLabels = append(variableLabels, columnName) + } + } + + for columnName, columnMapping := range intermediateMappings.columnMappings { + // Check column version compatibility for the current map + // Force to discard if not compatible. + if columnMapping.supportedVersions != nil { + if !columnMapping.supportedVersions(pgVersion) { + // It's very useful to be able to see what columns are being + // rejected. + log.Debugln(columnName, "is being forced to discard due to version incompatibility.") + thisMap[columnName] = MetricMap{ + discard: true, + conversion: func(_ interface{}) (float64, bool) { + return math.NaN(), true + }, + } + continue + } + } + + // Determine how to convert the column based on its usage. + // nolint: dupl + switch columnMapping.usage { + case DISCARD, LABEL: + thisMap[columnName] = MetricMap{ + discard: true, + conversion: func(_ interface{}) (float64, bool) { + return math.NaN(), true + }, + } + case COUNTER: + thisMap[columnName] = MetricMap{ + vtype: prometheus.CounterValue, + desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels), + conversion: func(in interface{}) (float64, bool) { + return dbToFloat64(in) + }, + } + case GAUGE: + thisMap[columnName] = MetricMap{ + vtype: prometheus.GaugeValue, + desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels), + conversion: func(in interface{}) (float64, bool) { + return dbToFloat64(in) + }, + } + case MAPPEDMETRIC: + thisMap[columnName] = MetricMap{ + vtype: prometheus.GaugeValue, + desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels), + conversion: func(in interface{}) (float64, bool) { + text, ok := in.(string) + if !ok { + return math.NaN(), false + } + + val, ok := columnMapping.mapping[text] + if !ok { + return math.NaN(), false + } + return val, true + }, + } + case DURATION: + thisMap[columnName] = MetricMap{ + vtype: prometheus.GaugeValue, + desc: prometheus.NewDesc(fmt.Sprintf("%s_%s_milliseconds", namespace, columnName), columnMapping.description, variableLabels, serverLabels), + conversion: func(in interface{}) (float64, bool) { + var durationString string + switch t := in.(type) { + case []byte: + durationString = string(t) + case string: + durationString = t + default: + log.Errorln("DURATION conversion metric was not a string") + return math.NaN(), false + } + + if durationString == "-1" { + return math.NaN(), false + } + + d, err := time.ParseDuration(durationString) + if err != nil { + log.Errorln("Failed converting result to metric:", columnName, in, err) + return math.NaN(), false + } + return float64(d / time.Millisecond), true + }, + } + } + } + + metricMap[namespace] = MetricMapNamespace{variableLabels, thisMap, intermediateMappings.master, intermediateMappings.cacheSeconds} + } + + return metricMap +} + +// convert a string to the corresponding ColumnUsage +func stringToColumnUsage(s string) (ColumnUsage, error) { + var u ColumnUsage + var err error + switch s { + case "DISCARD": + u = DISCARD + + case "LABEL": + u = LABEL + + case "COUNTER": + u = COUNTER + + case "GAUGE": + u = GAUGE + + case "MAPPEDMETRIC": + u = MAPPEDMETRIC + + case "DURATION": + u = DURATION + + default: + err = fmt.Errorf("wrong ColumnUsage given : %s", s) + } + + return u, err +} + +// Convert database.sql types to float64s for Prometheus consumption. Null types are mapped to NaN. string and []byte +// types are mapped as NaN and !ok +func dbToFloat64(t interface{}) (float64, bool) { + switch v := t.(type) { + case int64: + return float64(v), true + case float64: + return v, true + case time.Time: + return float64(v.Unix()), true + case []byte: + // Try and convert to string and then parse to a float64 + strV := string(v) + result, err := strconv.ParseFloat(strV, 64) + if err != nil { + log.Infoln("Could not parse []byte:", err) + return math.NaN(), false + } + return result, true + case string: + result, err := strconv.ParseFloat(v, 64) + if err != nil { + log.Infoln("Could not parse string:", err) + return math.NaN(), false + } + return result, true + case bool: + if v { + return 1.0, true + } + return 0.0, true + case nil: + return math.NaN(), true + default: + return math.NaN(), false + } +} + +// Convert database.sql to string for Prometheus labels. Null types are mapped to empty strings. +func dbToString(t interface{}) (string, bool) { + switch v := t.(type) { + case int64: + return fmt.Sprintf("%v", v), true + case float64: + return fmt.Sprintf("%v", v), true + case time.Time: + return fmt.Sprintf("%v", v.Unix()), true + case nil: + return "", true + case []byte: + // Try and convert to string + return string(v), true + case string: + return v, true + case bool: + if v { + return "true", true + } + return "false", true + default: + return "", false + } +} + +func parseFingerprint(url string) (string, error) { + dsn, err := pq.ParseURL(url) + if err != nil { + dsn = url + } + + pairs := strings.Split(dsn, " ") + kv := make(map[string]string, len(pairs)) + for _, pair := range pairs { + splitted := strings.SplitN(pair, "=", 2) + if len(splitted) != 2 { + return "", fmt.Errorf("malformed dsn %q", dsn) + } + kv[splitted[0]] = splitted[1] + } + + var fingerprint string + + if host, ok := kv["host"]; ok { + fingerprint += host + } else { + fingerprint += "localhost" + } + + if port, ok := kv["port"]; ok { + fingerprint += ":" + port + } else { + fingerprint += ":5432" + } + + return fingerprint, nil +} + +func loggableDSN(dsn string) string { + pDSN, err := url.Parse(dsn) + if err != nil { + return "could not parse DATA_SOURCE_NAME" + } + // Blank user info if not nil + if pDSN.User != nil { + pDSN.User = url.UserPassword(pDSN.User.Username(), "PASSWORD_REMOVED") + } + + return pDSN.String() +} + +type cachedMetrics struct { + metrics []prometheus.Metric + lastScrape time.Time +} + +// Server describes a connection to Postgres. +// Also it contains metrics map and query overrides. +type Server struct { + db *sql.DB + labels prometheus.Labels + master bool + + // Last version used to calculate metric map. If mismatch on scrape, + // then maps are recalculated. + lastMapVersion semver.Version + // Currently active metric map + metricMap map[string]MetricMapNamespace + // Currently active query overrides + queryOverrides map[string]string + mappingMtx sync.RWMutex + // Currently cached metrics + metricCache map[string]cachedMetrics + cacheMtx sync.Mutex +} + +// ServerOpt configures a server. +type ServerOpt func(*Server) + +// ServerWithLabels configures a set of labels. +func ServerWithLabels(labels prometheus.Labels) ServerOpt { + return func(s *Server) { + for k, v := range labels { + s.labels[k] = v + } + } +} + +// NewServer establishes a new connection using DSN. +func NewServer(dsn string, opts ...ServerOpt) (*Server, error) { + fingerprint, err := parseFingerprint(dsn) + if err != nil { + return nil, err + } + + db, err := sql.Open("postgres", dsn) + if err != nil { + return nil, err + } + db.SetMaxOpenConns(1) + db.SetMaxIdleConns(1) + + log.Infof("Established new database connection to %q.", fingerprint) + + s := &Server{ + db: db, + master: false, + labels: prometheus.Labels{ + serverLabelName: fingerprint, + }, + metricCache: make(map[string]cachedMetrics), + } + + for _, opt := range opts { + opt(s) + } + + return s, nil +} + +// Close disconnects from Postgres. +func (s *Server) Close() error { + return s.db.Close() +} + +// Ping checks connection availability and possibly invalidates the connection if it fails. +func (s *Server) Ping() error { + if err := s.db.Ping(); err != nil { + if cerr := s.Close(); cerr != nil { + log.Errorf("Error while closing non-pinging DB connection to %q: %v", s, cerr) + } + return err + } + return nil +} + +// String returns server's fingerprint. +func (s *Server) String() string { + return s.labels[serverLabelName] +} + +// Scrape loads metrics. +func (s *Server) Scrape(ch chan<- prometheus.Metric, disableSettingsMetrics bool) error { + s.mappingMtx.RLock() + defer s.mappingMtx.RUnlock() + + var err error + + if !disableSettingsMetrics && s.master { + if err = querySettings(ch, s); err != nil { + err = fmt.Errorf("error retrieving settings: %s", err) + } + } + + errMap := queryNamespaceMappings(ch, s) + if len(errMap) > 0 { + err = fmt.Errorf("queryNamespaceMappings returned %d errors", len(errMap)) + } + + return err +} + +// Servers contains a collection of servers to Postgres. +type Servers struct { + m sync.Mutex + servers map[string]*Server + opts []ServerOpt +} + +// NewServers creates a collection of servers to Postgres. +func NewServers(opts ...ServerOpt) *Servers { + return &Servers{ + servers: make(map[string]*Server), + opts: opts, + } +} + +// GetServer returns established connection from a collection. +func (s *Servers) GetServer(dsn string) (*Server, error) { + s.m.Lock() + defer s.m.Unlock() + var err error + var ok bool + errCount := 0 // start at zero because we increment before doing work + retries := 3 + var server *Server + for { + if errCount++; errCount > retries { + return nil, err + } + server, ok = s.servers[dsn] + if !ok { + server, err = NewServer(dsn, s.opts...) + if err != nil { + time.Sleep(time.Duration(errCount) * time.Second) + continue + } + s.servers[dsn] = server + } + if err = server.Ping(); err != nil { + delete(s.servers, dsn) + time.Sleep(time.Duration(errCount) * time.Second) + continue + } + break + } + return server, nil +} + +// Close disconnects from all known servers. +func (s *Servers) Close() { + s.m.Lock() + defer s.m.Unlock() + for _, server := range s.servers { + if err := server.Close(); err != nil { + log.Errorf("failed to close connection to %q: %v", server, err) + } + } +} + +// Exporter collects Postgres metrics. It implements prometheus.Collector. +type Exporter struct { + // Holds a reference to the build in column mappings. Currently this is for testing purposes + // only, since it just points to the global. + builtinMetricMaps map[string]intermediateMetricMap + + disableDefaultMetrics, disableSettingsMetrics, autoDiscoverDatabases bool + + excludeDatabases []string + dsn []string + userQueriesPath string + constantLabels prometheus.Labels + duration prometheus.Gauge + error prometheus.Gauge + psqlUp prometheus.Gauge + userQueriesError *prometheus.GaugeVec + totalScrapes prometheus.Counter + + // servers are used to allow re-using the DB connection between scrapes. + // servers contains metrics map and query overrides. + servers *Servers +} + +// ExporterOpt configures Exporter. +type ExporterOpt func(*Exporter) + +// DisableDefaultMetrics configures default metrics export. +func DisableDefaultMetrics(b bool) ExporterOpt { + return func(e *Exporter) { + e.disableDefaultMetrics = b + } +} + +// DisableSettingsMetrics configures pg_settings export. +func DisableSettingsMetrics(b bool) ExporterOpt { + return func(e *Exporter) { + e.disableSettingsMetrics = b + } +} + +// AutoDiscoverDatabases allows scraping all databases on a database server. +func AutoDiscoverDatabases(b bool) ExporterOpt { + return func(e *Exporter) { + e.autoDiscoverDatabases = b + } +} + +// ExcludeDatabases allows to filter out result from AutoDiscoverDatabases +func ExcludeDatabases(s string) ExporterOpt { + return func(e *Exporter) { + e.excludeDatabases = strings.Split(s, ",") + } +} + +// WithUserQueriesPath configures user's queries path. +func WithUserQueriesPath(p string) ExporterOpt { + return func(e *Exporter) { + e.userQueriesPath = p + } +} + +// WithConstantLabels configures constant labels. +func WithConstantLabels(s string) ExporterOpt { + return func(e *Exporter) { + e.constantLabels = parseConstLabels(s) + } +} + +func parseConstLabels(s string) prometheus.Labels { + labels := make(prometheus.Labels) + + s = strings.TrimSpace(s) + if len(s) == 0 { + return labels + } + + parts := strings.Split(s, ",") + for _, p := range parts { + keyValue := strings.Split(strings.TrimSpace(p), "=") + if len(keyValue) != 2 { + log.Errorf(`Wrong constant labels format %q, should be "key=value"`, p) + continue + } + key := strings.TrimSpace(keyValue[0]) + value := strings.TrimSpace(keyValue[1]) + if key == "" || value == "" { + continue + } + labels[key] = value + } + + return labels +} + +// NewExporter returns a new PostgreSQL exporter for the provided DSN. +func NewExporter(dsn []string, opts ...ExporterOpt) *Exporter { + e := &Exporter{ + dsn: dsn, + builtinMetricMaps: builtinMetricMaps, + } + + for _, opt := range opts { + opt(e) + } + + e.setupInternalMetrics() + e.setupServers() + + return e +} + +func (e *Exporter) setupServers() { + e.servers = NewServers(ServerWithLabels(e.constantLabels)) +} + +func (e *Exporter) setupInternalMetrics() { + e.duration = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: exporter, + Name: "last_scrape_duration_seconds", + Help: "Duration of the last scrape of metrics from PostgresSQL.", + ConstLabels: e.constantLabels, + }) + e.totalScrapes = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: exporter, + Name: "scrapes_total", + Help: "Total number of times PostgresSQL was scraped for metrics.", + ConstLabels: e.constantLabels, + }) + e.error = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: exporter, + Name: "last_scrape_error", + Help: "Whether the last scrape of metrics from PostgreSQL resulted in an error (1 for error, 0 for success).", + ConstLabels: e.constantLabels, + }) + e.psqlUp = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "up", + Help: "Whether the last scrape of metrics from PostgreSQL was able to connect to the server (1 for yes, 0 for no).", + ConstLabels: e.constantLabels, + }) + e.userQueriesError = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: exporter, + Name: "user_queries_load_error", + Help: "Whether the user queries file was loaded and parsed successfully (1 for error, 0 for success).", + ConstLabels: e.constantLabels, + }, []string{"filename", "hashsum"}) +} + +// Describe implements prometheus.Collector. +func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { + // We cannot know in advance what metrics the exporter will generate + // from Postgres. So we use the poor man's describe method: Run a collect + // and send the descriptors of all the collected metrics. The problem + // here is that we need to connect to the Postgres DB. If it is currently + // unavailable, the descriptors will be incomplete. Since this is a + // stand-alone exporter and not used as a library within other code + // implementing additional metrics, the worst that can happen is that we + // don't detect inconsistent metrics created by this exporter + // itself. Also, a change in the monitored Postgres instance may change the + // exported metrics during the runtime of the exporter. + metricCh := make(chan prometheus.Metric) + doneCh := make(chan struct{}) + + go func() { + for m := range metricCh { + ch <- m.Desc() + } + close(doneCh) + }() + + e.Collect(metricCh) + close(metricCh) + <-doneCh +} + +// Collect implements prometheus.Collector. +func (e *Exporter) Collect(ch chan<- prometheus.Metric) { + e.scrape(ch) + + ch <- e.duration + ch <- e.totalScrapes + ch <- e.error + ch <- e.psqlUp + e.userQueriesError.Collect(ch) +} + +func newDesc(subsystem, name, help string, labels prometheus.Labels) *prometheus.Desc { + return prometheus.NewDesc( + prometheus.BuildFQName(namespace, subsystem, name), + help, nil, labels, + ) +} + +func queryDatabases(server *Server) ([]string, error) { + rows, err := server.db.Query("SELECT datname FROM pg_database WHERE datallowconn = true AND datistemplate = false AND datname != current_database()") // nolint: safesql + if err != nil { + return nil, fmt.Errorf("Error retrieving databases: %v", err) + } + defer rows.Close() // nolint: errcheck + + var databaseName string + result := make([]string, 0) + for rows.Next() { + err = rows.Scan(&databaseName) + if err != nil { + return nil, errors.New(fmt.Sprintln("Error retrieving rows:", err)) + } + result = append(result, databaseName) + } + + return result, nil +} + +// Query within a namespace mapping and emit metrics. Returns fatal errors if +// the scrape fails, and a slice of errors if they were non-fatal. +func queryNamespaceMapping(server *Server, namespace string, mapping MetricMapNamespace) ([]prometheus.Metric, []error, error) { + // Check for a query override for this namespace + query, found := server.queryOverrides[namespace] + + // Was this query disabled (i.e. nothing sensible can be queried on cu + // version of PostgreSQL? + if query == "" && found { + // Return success (no pertinent data) + return []prometheus.Metric{}, []error{}, nil + } + + // Don't fail on a bad scrape of one metric + var rows *sql.Rows + var err error + + if !found { + // I've no idea how to avoid this properly at the moment, but this is + // an admin tool so you're not injecting SQL right? + rows, err = server.db.Query(fmt.Sprintf("SELECT * FROM %s;", namespace)) // nolint: gas, safesql + } else { + rows, err = server.db.Query(query) // nolint: safesql + } + if err != nil { + return []prometheus.Metric{}, []error{}, fmt.Errorf("Error running query on database %q: %s %v", server, namespace, err) + } + defer rows.Close() // nolint: errcheck + + var columnNames []string + columnNames, err = rows.Columns() + if err != nil { + return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving column list for: ", namespace, err)) + } + + // Make a lookup map for the column indices + var columnIdx = make(map[string]int, len(columnNames)) + for i, n := range columnNames { + columnIdx[n] = i + } + + var columnData = make([]interface{}, len(columnNames)) + var scanArgs = make([]interface{}, len(columnNames)) + for i := range columnData { + scanArgs[i] = &columnData[i] + } + + nonfatalErrors := []error{} + + metrics := make([]prometheus.Metric, 0) + + for rows.Next() { + err = rows.Scan(scanArgs...) + if err != nil { + return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err)) + } + + // Get the label values for this row. + labels := make([]string, len(mapping.labels)) + for idx, label := range mapping.labels { + labels[idx], _ = dbToString(columnData[columnIdx[label]]) + } + + // Loop over column names, and match to scan data. Unknown columns + // will be filled with an untyped metric number *if* they can be + // converted to float64s. NULLs are allowed and treated as NaN. + for idx, columnName := range columnNames { + var metric prometheus.Metric + if metricMapping, ok := mapping.columnMappings[columnName]; ok { + // Is this a metricy metric? + if metricMapping.discard { + continue + } + + value, ok := dbToFloat64(columnData[idx]) + if !ok { + nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName, columnData[idx]))) + continue + } + // Generate the metric + metric = prometheus.MustNewConstMetric(metricMapping.desc, metricMapping.vtype, value, labels...) + } else { + // Unknown metric. Report as untyped if scan to float64 works, else note an error too. + metricLabel := fmt.Sprintf("%s_%s", namespace, columnName) + desc := prometheus.NewDesc(metricLabel, fmt.Sprintf("Unknown metric from %s", namespace), mapping.labels, server.labels) + + // Its not an error to fail here, since the values are + // unexpected anyway. + value, ok := dbToFloat64(columnData[idx]) + if !ok { + nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unparseable column type - discarding: ", namespace, columnName, err))) + continue + } + metric = prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, value, labels...) + } + metrics = append(metrics, metric) + } + } + return metrics, nonfatalErrors, nil +} + +// Iterate through all the namespace mappings in the exporter and run their +// queries. +func queryNamespaceMappings(ch chan<- prometheus.Metric, server *Server) map[string]error { + // Return a map of namespace -> errors + namespaceErrors := make(map[string]error) + + scrapeStart := time.Now() + + for namespace, mapping := range server.metricMap { + log.Debugln("Querying namespace: ", namespace) + + if mapping.master && !server.master { + log.Debugln("Query skipped...") + continue + } + + scrapeMetric := false + // Check if the metric is cached + server.cacheMtx.Lock() + cachedMetric, found := server.metricCache[namespace] + server.cacheMtx.Unlock() + // If found, check if needs refresh from cache + if found { + if scrapeStart.Sub(cachedMetric.lastScrape).Seconds() > float64(mapping.cacheSeconds) { + scrapeMetric = true + } + } else { + scrapeMetric = true + } + + var metrics []prometheus.Metric + var nonFatalErrors []error + var err error + if scrapeMetric { + metrics, nonFatalErrors, err = queryNamespaceMapping(server, namespace, mapping) + } else { + metrics = cachedMetric.metrics + } + + // Serious error - a namespace disappeared + if err != nil { + namespaceErrors[namespace] = err + log.Infoln(err) + } + // Non-serious errors - likely version or parsing problems. + if len(nonFatalErrors) > 0 { + for _, err := range nonFatalErrors { + log.Infoln(err.Error()) + } + } + + // Emit the metrics into the channel + for _, metric := range metrics { + ch <- metric + } + + if scrapeMetric { + // Only cache if metric is meaningfully cacheable + if mapping.cacheSeconds > 0 { + server.cacheMtx.Lock() + server.metricCache[namespace] = cachedMetrics{ + metrics: metrics, + lastScrape: scrapeStart, + } + server.cacheMtx.Unlock() + } + } + } + + return namespaceErrors +} + +// Check and update the exporters query maps if the version has changed. +func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, server *Server) error { + log.Debugf("Querying Postgres Version on %q", server) + versionRow := server.db.QueryRow("SELECT version();") + var versionString string + err := versionRow.Scan(&versionString) + if err != nil { + return fmt.Errorf("Error scanning version string on %q: %v", server, err) + } + semanticVersion, err := parseVersion(versionString) + if err != nil { + return fmt.Errorf("Error parsing version string on %q: %v", server, err) + } + if !e.disableDefaultMetrics && semanticVersion.LT(lowestSupportedVersion) { + log.Warnf("PostgreSQL version is lower on %q then our lowest supported version! Got %s minimum supported is %s.", server, semanticVersion, lowestSupportedVersion) + } + + // Check if semantic version changed and recalculate maps if needed. + if semanticVersion.NE(server.lastMapVersion) || server.metricMap == nil { + log.Infof("Semantic Version Changed on %q: %s -> %s", server, server.lastMapVersion, semanticVersion) + server.mappingMtx.Lock() + + // Get Default Metrics only for master database + if !e.disableDefaultMetrics && server.master { + server.metricMap = makeDescMap(semanticVersion, server.labels, e.builtinMetricMaps) + server.queryOverrides = makeQueryOverrideMap(semanticVersion, queryOverrides) + } else { + server.metricMap = make(map[string]MetricMapNamespace) + server.queryOverrides = make(map[string]string) + } + + server.lastMapVersion = semanticVersion + + if e.userQueriesPath != "" { + // Clear the metric while a reload is happening + e.userQueriesError.Reset() + + // Calculate the hashsum of the useQueries + userQueriesData, err := ioutil.ReadFile(e.userQueriesPath) + if err != nil { + log.Errorln("Failed to reload user queries:", e.userQueriesPath, err) + e.userQueriesError.WithLabelValues(e.userQueriesPath, "").Set(1) + } else { + hashsumStr := fmt.Sprintf("%x", sha256.Sum256(userQueriesData)) + + if err := addQueries(userQueriesData, semanticVersion, server); err != nil { + log.Errorln("Failed to reload user queries:", e.userQueriesPath, err) + e.userQueriesError.WithLabelValues(e.userQueriesPath, hashsumStr).Set(1) + } else { + // Mark user queries as successfully loaded + e.userQueriesError.WithLabelValues(e.userQueriesPath, hashsumStr).Set(0) + } + } + } + + server.mappingMtx.Unlock() + } + + // Output the version as a special metric only for master database + versionDesc := prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, staticLabelName), + "Version string as reported by postgres", []string{"version", "short_version"}, server.labels) + + if !e.disableDefaultMetrics && server.master { + ch <- prometheus.MustNewConstMetric(versionDesc, + prometheus.UntypedValue, 1, versionString, semanticVersion.String()) + } + return nil +} + +func (e *Exporter) scrape(ch chan<- prometheus.Metric) { + defer func(begun time.Time) { + e.duration.Set(time.Since(begun).Seconds()) + }(time.Now()) + + e.totalScrapes.Inc() + + dsns := e.dsn + if e.autoDiscoverDatabases { + dsns = e.discoverDatabaseDSNs() + } + + var errorsCount int + var connectionErrorsCount int + + for _, dsn := range dsns { + if err := e.scrapeDSN(ch, dsn); err != nil { + errorsCount++ + + log.Errorf(err.Error()) + + if _, ok := err.(*ErrorConnectToServer); ok { + connectionErrorsCount++ + } + } + } + + switch { + case connectionErrorsCount >= len(dsns): + e.psqlUp.Set(0) + default: + e.psqlUp.Set(1) // Didn't fail, can mark connection as up for this scrape. + } + + switch errorsCount { + case 0: + e.error.Set(0) + default: + e.error.Set(1) + } +} + +func (e *Exporter) discoverDatabaseDSNs() []string { + dsns := make(map[string]struct{}) + for _, dsn := range e.dsn { + parsedDSN, err := url.Parse(dsn) + if err != nil { + log.Errorf("Unable to parse DSN (%s): %v", loggableDSN(dsn), err) + continue + } + + dsns[dsn] = struct{}{} + server, err := e.servers.GetServer(dsn) + if err != nil { + log.Errorf("Error opening connection to database (%s): %v", loggableDSN(dsn), err) + continue + } + + // If autoDiscoverDatabases is true, set first dsn as master database (Default: false) + server.master = true + + databaseNames, err := queryDatabases(server) + if err != nil { + log.Errorf("Error querying databases (%s): %v", loggableDSN(dsn), err) + continue + } + for _, databaseName := range databaseNames { + if contains(e.excludeDatabases, databaseName) { + continue + } + parsedDSN.Path = databaseName + dsns[parsedDSN.String()] = struct{}{} + } + } + + result := make([]string, len(dsns)) + index := 0 + for dsn := range dsns { + result[index] = dsn + index++ + } + + return result +} + +func (e *Exporter) scrapeDSN(ch chan<- prometheus.Metric, dsn string) error { + server, err := e.servers.GetServer(dsn) + + if err != nil { + return &ErrorConnectToServer{fmt.Sprintf("Error opening connection to database (%s): %s", loggableDSN(dsn), err.Error())} + } + + // Check if autoDiscoverDatabases is false, set dsn as master database (Default: false) + if !e.autoDiscoverDatabases { + server.master = true + } + + // Check if map versions need to be updated + if err := e.checkMapVersions(ch, server); err != nil { + log.Warnln("Proceeding with outdated query maps, as the Postgres version could not be determined:", err) + } + + return server.Scrape(ch, e.disableSettingsMetrics) +} + +// try to get the DataSource +// DATA_SOURCE_NAME always wins so we do not break older versions +// reading secrets from files wins over secrets in environment variables +// DATA_SOURCE_NAME > DATA_SOURCE_{USER|PASS}_FILE > DATA_SOURCE_{USER|PASS} +func getDataSources() []string { + var dsn = os.Getenv("DATA_SOURCE_NAME") + if len(dsn) == 0 { + var user string + var pass string + var uri string + + if len(os.Getenv("DATA_SOURCE_USER_FILE")) != 0 { + fileContents, err := ioutil.ReadFile(os.Getenv("DATA_SOURCE_USER_FILE")) + if err != nil { + panic(err) + } + user = strings.TrimSpace(string(fileContents)) + } else { + user = os.Getenv("DATA_SOURCE_USER") + } + + if len(os.Getenv("DATA_SOURCE_PASS_FILE")) != 0 { + fileContents, err := ioutil.ReadFile(os.Getenv("DATA_SOURCE_PASS_FILE")) + if err != nil { + panic(err) + } + pass = strings.TrimSpace(string(fileContents)) + } else { + pass = os.Getenv("DATA_SOURCE_PASS") + } + + ui := url.UserPassword(user, pass).String() + + if len(os.Getenv("DATA_SOURCE_URI_FILE")) != 0 { + fileContents, err := ioutil.ReadFile(os.Getenv("DATA_SOURCE_URI_FILE")) + if err != nil { + panic(err) + } + uri = strings.TrimSpace(string(fileContents)) + } else { + uri = os.Getenv("DATA_SOURCE_URI") + } + + dsn = "postgresql://" + ui + "@" + uri + + return []string{dsn} + } + return strings.Split(dsn, ",") +} + +func contains(a []string, x string) bool { + for _, n := range a { + if x == n { + return true + } + } + return false +} + +func main() { + kingpin.Version(fmt.Sprintf("postgres_exporter %s (built with %s)\n", Version, runtime.Version())) + log.AddFlags(kingpin.CommandLine) + kingpin.Parse() + + // landingPage contains the HTML served at '/'. + // TODO: Make this nicer and more informative. + var landingPage = []byte(` + Postgres exporter + +

Postgres exporter

+

Metrics

+ + + `) + + if *onlyDumpMaps { + dumpMaps() + return + } + + dsn := getDataSources() + if len(dsn) == 0 { + log.Fatal("couldn't find environment variables describing the datasource to use") + } + + exporter := NewExporter(dsn, + DisableDefaultMetrics(*disableDefaultMetrics), + DisableSettingsMetrics(*disableSettingsMetrics), + AutoDiscoverDatabases(*autoDiscoverDatabases), + WithUserQueriesPath(*queriesPath), + WithConstantLabels(*constantLabelsList), + ExcludeDatabases(*excludeDatabases), + ) + defer func() { + exporter.servers.Close() + }() + + // Setup build info metric. + version.Branch = Branch + version.BuildDate = BuildDate + version.Revision = Revision + version.Version = VersionShort + prometheus.MustRegister(version.NewCollector("postgres_exporter")) + + prometheus.MustRegister(exporter) + + http.Handle(*metricPath, promhttp.Handler()) + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/html; charset=UTF-8") // nolint: errcheck + w.Write(landingPage) // nolint: errcheck + }) + + log.Infof("Starting Server: %s", *listenAddress) + log.Fatal(http.ListenAndServe(*listenAddress, nil)) +} diff --git a/cmd/postgres_exporter/postgres_exporter_integration_test.go b/cmd/postgres_exporter/postgres_exporter_integration_test.go new file mode 100644 index 0000000..0363af9 --- /dev/null +++ b/cmd/postgres_exporter/postgres_exporter_integration_test.go @@ -0,0 +1,128 @@ +// These are specialized integration tests. We only build them when we're doing +// a lot of additional work to keep the external docker environment they require +// working. +// +build integration + +package main + +import ( + "fmt" + "os" + "strings" + "testing" + + _ "github.com/lib/pq" + "github.com/prometheus/client_golang/prometheus" + . "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +type IntegrationSuite struct { + e *Exporter +} + +var _ = Suite(&IntegrationSuite{}) + +func (s *IntegrationSuite) SetUpSuite(c *C) { + dsn := os.Getenv("DATA_SOURCE_NAME") + c.Assert(dsn, Not(Equals), "") + + exporter := NewExporter(strings.Split(dsn, ",")) + c.Assert(exporter, NotNil) + // Assign the exporter to the suite + s.e = exporter + + prometheus.MustRegister(exporter) +} + +// TODO: it would be nice if cu didn't mostly just recreate the scrape function +func (s *IntegrationSuite) TestAllNamespacesReturnResults(c *C) { + // Setup a dummy channel to consume metrics + ch := make(chan prometheus.Metric, 100) + go func() { + for range ch { + } + }() + + for _, dsn := range s.e.dsn { + // Open a database connection + server, err := NewServer(dsn) + c.Assert(server, NotNil) + c.Assert(err, IsNil) + + // Do a version update + err = s.e.checkMapVersions(ch, server) + c.Assert(err, IsNil) + + err = querySettings(ch, server) + if !c.Check(err, Equals, nil) { + fmt.Println("## ERRORS FOUND") + fmt.Println(err) + } + + // This should never happen in our test cases. + errMap := queryNamespaceMappings(ch, server) + if !c.Check(len(errMap), Equals, 0) { + fmt.Println("## NAMESPACE ERRORS FOUND") + for namespace, err := range errMap { + fmt.Println(namespace, ":", err) + } + } + server.Close() + } +} + +// TestInvalidDsnDoesntCrash tests that specifying an invalid DSN doesn't crash +// the exporter. Related to https://github.com/wrouesnel/postgres_exporter/issues/93 +// although not a replication of the scenario. +func (s *IntegrationSuite) TestInvalidDsnDoesntCrash(c *C) { + // Setup a dummy channel to consume metrics + ch := make(chan prometheus.Metric, 100) + go func() { + for range ch { + } + }() + + // Send a bad DSN + exporter := NewExporter([]string{"invalid dsn"}) + c.Assert(exporter, NotNil) + exporter.scrape(ch) + + // Send a DSN to a non-listening port. + exporter = NewExporter([]string{"postgresql://nothing:nothing@127.0.0.1:1/nothing"}) + c.Assert(exporter, NotNil) + exporter.scrape(ch) +} + +// TestUnknownMetricParsingDoesntCrash deliberately deletes all the column maps out +// of an exporter to test that the default metric handling code can cope with unknown columns. +func (s *IntegrationSuite) TestUnknownMetricParsingDoesntCrash(c *C) { + // Setup a dummy channel to consume metrics + ch := make(chan prometheus.Metric, 100) + go func() { + for range ch { + } + }() + + dsn := os.Getenv("DATA_SOURCE_NAME") + c.Assert(dsn, Not(Equals), "") + + exporter := NewExporter(strings.Split(dsn, ",")) + c.Assert(exporter, NotNil) + + // Convert the default maps into a list of empty maps. + emptyMaps := make(map[string]intermediateMetricMap, 0) + for k := range exporter.builtinMetricMaps { + emptyMaps[k] = intermediateMetricMap{ + map[string]ColumnMapping{}, + true, + 0, + } + } + exporter.builtinMetricMaps = emptyMaps + + // scrape the exporter and make sure it works + exporter.scrape(ch) +} diff --git a/cmd/postgres_exporter/postgres_exporter_test.go b/cmd/postgres_exporter/postgres_exporter_test.go new file mode 100644 index 0000000..39932ce --- /dev/null +++ b/cmd/postgres_exporter/postgres_exporter_test.go @@ -0,0 +1,326 @@ +// +build !integration + +package main + +import ( + "io/ioutil" + "os" + "reflect" + "testing" + + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + . "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +type FunctionalSuite struct { +} + +var _ = Suite(&FunctionalSuite{}) + +func (s *FunctionalSuite) SetUpSuite(c *C) { + +} + +func (s *FunctionalSuite) TestSemanticVersionColumnDiscard(c *C) { + testMetricMap := map[string]intermediateMetricMap{ + "test_namespace": { + map[string]ColumnMapping{ + "metric_which_stays": {COUNTER, "This metric should not be eliminated", nil, nil}, + "metric_which_discards": {COUNTER, "This metric should be forced to DISCARD", nil, nil}, + }, + true, + 0, + }, + } + + { + // No metrics should be eliminated + resultMap := makeDescMap(semver.MustParse("0.0.1"), prometheus.Labels{}, testMetricMap) + c.Check( + resultMap["test_namespace"].columnMappings["metric_which_stays"].discard, + Equals, + false, + ) + c.Check( + resultMap["test_namespace"].columnMappings["metric_which_discards"].discard, + Equals, + false, + ) + } + + // nolint: dupl + { + // Update the map so the discard metric should be eliminated + discardableMetric := testMetricMap["test_namespace"].columnMappings["metric_which_discards"] + discardableMetric.supportedVersions = semver.MustParseRange(">0.0.1") + testMetricMap["test_namespace"].columnMappings["metric_which_discards"] = discardableMetric + + // Discard metric should be discarded + resultMap := makeDescMap(semver.MustParse("0.0.1"), prometheus.Labels{}, testMetricMap) + c.Check( + resultMap["test_namespace"].columnMappings["metric_which_stays"].discard, + Equals, + false, + ) + c.Check( + resultMap["test_namespace"].columnMappings["metric_which_discards"].discard, + Equals, + true, + ) + } + + // nolint: dupl + { + // Update the map so the discard metric should be kept but has a version + discardableMetric := testMetricMap["test_namespace"].columnMappings["metric_which_discards"] + discardableMetric.supportedVersions = semver.MustParseRange(">0.0.1") + testMetricMap["test_namespace"].columnMappings["metric_which_discards"] = discardableMetric + + // Discard metric should be discarded + resultMap := makeDescMap(semver.MustParse("0.0.2"), prometheus.Labels{}, testMetricMap) + c.Check( + resultMap["test_namespace"].columnMappings["metric_which_stays"].discard, + Equals, + false, + ) + c.Check( + resultMap["test_namespace"].columnMappings["metric_which_discards"].discard, + Equals, + false, + ) + } +} + +// test read username and password from file +func (s *FunctionalSuite) TestEnvironmentSettingWithSecretsFiles(c *C) { + err := os.Setenv("DATA_SOURCE_USER_FILE", "./tests/username_file") + c.Assert(err, IsNil) + defer UnsetEnvironment(c, "DATA_SOURCE_USER_FILE") + + err = os.Setenv("DATA_SOURCE_PASS_FILE", "./tests/userpass_file") + c.Assert(err, IsNil) + defer UnsetEnvironment(c, "DATA_SOURCE_PASS_FILE") + + err = os.Setenv("DATA_SOURCE_URI", "localhost:5432/?sslmode=disable") + c.Assert(err, IsNil) + defer UnsetEnvironment(c, "DATA_SOURCE_URI") + + var expected = "postgresql://custom_username$&+,%2F%3A;=%3F%40:custom_password$&+,%2F%3A;=%3F%40@localhost:5432/?sslmode=disable" + + dsn := getDataSources() + if len(dsn) == 0 { + c.Errorf("Expected one data source, zero found") + } + if dsn[0] != expected { + c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn[0], expected) + } +} + +// test read DATA_SOURCE_NAME from environment +func (s *FunctionalSuite) TestEnvironmentSettingWithDns(c *C) { + envDsn := "postgresql://user:password@localhost:5432/?sslmode=enabled" + err := os.Setenv("DATA_SOURCE_NAME", envDsn) + c.Assert(err, IsNil) + defer UnsetEnvironment(c, "DATA_SOURCE_NAME") + + dsn := getDataSources() + if len(dsn) == 0 { + c.Errorf("Expected one data source, zero found") + } + if dsn[0] != envDsn { + c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn[0], envDsn) + } +} + +// test DATA_SOURCE_NAME is used even if username and password environment variables are set +func (s *FunctionalSuite) TestEnvironmentSettingWithDnsAndSecrets(c *C) { + envDsn := "postgresql://userDsn:passwordDsn@localhost:55432/?sslmode=disabled" + err := os.Setenv("DATA_SOURCE_NAME", envDsn) + c.Assert(err, IsNil) + defer UnsetEnvironment(c, "DATA_SOURCE_NAME") + + err = os.Setenv("DATA_SOURCE_USER_FILE", "./tests/username_file") + c.Assert(err, IsNil) + defer UnsetEnvironment(c, "DATA_SOURCE_USER_FILE") + + err = os.Setenv("DATA_SOURCE_PASS", "envUserPass") + c.Assert(err, IsNil) + defer UnsetEnvironment(c, "DATA_SOURCE_PASS") + + dsn := getDataSources() + if len(dsn) == 0 { + c.Errorf("Expected one data source, zero found") + } + if dsn[0] != envDsn { + c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn[0], envDsn) + } +} + +func (s *FunctionalSuite) TestPostgresVersionParsing(c *C) { + type TestCase struct { + input string + expected string + } + + cases := []TestCase{ + { + input: "PostgreSQL 10.1 on x86_64-pc-linux-gnu, compiled by gcc (Debian 6.3.0-18) 6.3.0 20170516, 64-bit", + expected: "10.1.0", + }, + { + input: "PostgreSQL 9.5.4, compiled by Visual C++ build 1800, 64-bit", + expected: "9.5.4", + }, + { + input: "EnterpriseDB 9.6.5.10 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16), 64-bit", + expected: "9.6.5", + }, + } + + for _, cs := range cases { + ver, err := parseVersion(cs.input) + c.Assert(err, IsNil) + c.Assert(ver.String(), Equals, cs.expected) + } +} + +func (s *FunctionalSuite) TestParseFingerprint(c *C) { + cases := []struct { + url string + fingerprint string + err string + }{ + { + url: "postgresql://userDsn:passwordDsn@localhost:55432/?sslmode=disabled", + fingerprint: "localhost:55432", + }, + { + url: "postgresql://userDsn:passwordDsn%3D@localhost:55432/?sslmode=disabled", + fingerprint: "localhost:55432", + }, + { + url: "port=1234", + fingerprint: "localhost:1234", + }, + { + url: "host=example", + fingerprint: "example:5432", + }, + { + url: "xyz", + err: "malformed dsn \"xyz\"", + }, + } + + for _, cs := range cases { + f, err := parseFingerprint(cs.url) + if cs.err == "" { + c.Assert(err, IsNil) + } else { + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, cs.err) + } + c.Assert(f, Equals, cs.fingerprint) + } +} + +func (s *FunctionalSuite) TestParseConstLabels(c *C) { + cases := []struct { + s string + labels prometheus.Labels + }{ + { + s: "a=b", + labels: prometheus.Labels{ + "a": "b", + }, + }, + { + s: "", + labels: prometheus.Labels{}, + }, + { + s: "a=b, c=d", + labels: prometheus.Labels{ + "a": "b", + "c": "d", + }, + }, + { + s: "a=b, xyz", + labels: prometheus.Labels{ + "a": "b", + }, + }, + { + s: "a=", + labels: prometheus.Labels{}, + }, + } + + for _, cs := range cases { + labels := parseConstLabels(cs.s) + if !reflect.DeepEqual(labels, cs.labels) { + c.Fatalf("labels not equal (%v -> %v)", labels, cs.labels) + } + } +} + +func UnsetEnvironment(c *C, d string) { + err := os.Unsetenv(d) + c.Assert(err, IsNil) +} + +// test boolean metric type gets converted to float +func (s *FunctionalSuite) TestBooleanConversionToValueAndString(c *C) { + + type TestCase struct { + input interface{} + expectedString string + expectedValue float64 + expectedOK bool + } + + cases := []TestCase{ + { + input: true, + expectedString: "true", + expectedValue: 1.0, + expectedOK: true, + }, + { + input: false, + expectedString: "false", + expectedValue: 0.0, + expectedOK: true, + }, + } + + for _, cs := range cases { + value, ok := dbToFloat64(cs.input) + c.Assert(value, Equals, cs.expectedValue) + c.Assert(ok, Equals, cs.expectedOK) + + str, ok := dbToString(cs.input) + c.Assert(str, Equals, cs.expectedString) + c.Assert(ok, Equals, cs.expectedOK) + } +} + +func (s *FunctionalSuite) TestParseUserQueries(c *C) { + userQueriesData, err := ioutil.ReadFile("./tests/user_queries_ok.yaml") + if err == nil { + metricMaps, newQueryOverrides, err := parseUserQueries(userQueriesData) + c.Assert(err, Equals, nil) + c.Assert(metricMaps, NotNil) + c.Assert(newQueryOverrides, NotNil) + + if len(metricMaps) != 2 { + c.Errorf("Expected 2 metrics from user file, got %d", len(metricMaps)) + } + } +} diff --git a/cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile b/cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile new file mode 100755 index 0000000..f12569f --- /dev/null +++ b/cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile @@ -0,0 +1,8 @@ +FROM postgres:11 +MAINTAINER Daniel Dent (https://www.danieldent.com) +ENV PG_MAX_WAL_SENDERS 8 +ENV PG_WAL_KEEP_SEGMENTS 8 +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y inetutils-ping +COPY setup-replication.sh /docker-entrypoint-initdb.d/ +COPY docker-entrypoint.sh /docker-entrypoint.sh +RUN chmod +x /docker-entrypoint-initdb.d/setup-replication.sh /docker-entrypoint.sh diff --git a/cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile.p2 b/cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile.p2 new file mode 100644 index 0000000..1689000 --- /dev/null +++ b/cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile.p2 @@ -0,0 +1,8 @@ +FROM postgres:{{VERSION}} +MAINTAINER Daniel Dent (https://www.danieldent.com) +ENV PG_MAX_WAL_SENDERS 8 +ENV PG_WAL_KEEP_SEGMENTS 8 +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y inetutils-ping +COPY setup-replication.sh /docker-entrypoint-initdb.d/ +COPY docker-entrypoint.sh /docker-entrypoint.sh +RUN chmod +x /docker-entrypoint-initdb.d/setup-replication.sh /docker-entrypoint.sh diff --git a/cmd/postgres_exporter/tests/docker-postgres-replication/README.md b/cmd/postgres_exporter/tests/docker-postgres-replication/README.md new file mode 100644 index 0000000..86106b6 --- /dev/null +++ b/cmd/postgres_exporter/tests/docker-postgres-replication/README.md @@ -0,0 +1,11 @@ +# Replicated postgres cluster in docker. + +Upstream is forked from https://github.com/DanielDent/docker-postgres-replication + +My version lives at https://github.com/wrouesnel/docker-postgres-replication + +This very simple docker-compose file lets us stand up a replicated postgres +cluster so we can test streaming. + +# TODO: +Pull in p2 and template the Dockerfile so we can test multiple versions. diff --git a/cmd/postgres_exporter/tests/docker-postgres-replication/docker-compose.yml b/cmd/postgres_exporter/tests/docker-postgres-replication/docker-compose.yml new file mode 100644 index 0000000..81a66da --- /dev/null +++ b/cmd/postgres_exporter/tests/docker-postgres-replication/docker-compose.yml @@ -0,0 +1,32 @@ + +version: '2' + +services: + pg-master: + build: '.' + image: 'danieldent/postgres-replication' + restart: 'always' + environment: + POSTGRES_USER: 'postgres' + POSTGRES_PASSWORD: 'postgres' + PGDATA: '/var/lib/postgresql/data/pgdata' + volumes: + - '/var/lib/postgresql/data' + expose: + - '5432' + + pg-slave: + build: '.' + image: 'danieldent/postgres-replication' + restart: 'always' + environment: + POSTGRES_USER: 'postgres' + POSTGRES_PASSWORD: 'postgres' + PGDATA: '/var/lib/postgresql/data/pgdata' + REPLICATE_FROM: 'pg-master' + volumes: + - '/var/lib/postgresql/data' + expose: + - '5432' + links: + - 'pg-master' diff --git a/cmd/postgres_exporter/tests/docker-postgres-replication/docker-entrypoint.sh b/cmd/postgres_exporter/tests/docker-postgres-replication/docker-entrypoint.sh new file mode 100755 index 0000000..24d15c8 --- /dev/null +++ b/cmd/postgres_exporter/tests/docker-postgres-replication/docker-entrypoint.sh @@ -0,0 +1,140 @@ +#!/bin/bash + +# Backwards compatibility for old variable names (deprecated) +if [ "x$PGUSER" != "x" ]; then + POSTGRES_USER=$PGUSER +fi +if [ "x$PGPASSWORD" != "x" ]; then + POSTGRES_PASSWORD=$PGPASSWORD +fi + +# Forwards-compatibility for old variable names (pg_basebackup uses them) +if [ "x$PGPASSWORD" = "x" ]; then + export PGPASSWORD=$POSTGRES_PASSWORD +fi + +# Based on official postgres package's entrypoint script (https://hub.docker.com/_/postgres/) +# Modified to be able to set up a slave. The docker-entrypoint-initdb.d hook provided is inadequate. + +set -e + +if [ "${1:0:1}" = '-' ]; then + set -- postgres "$@" +fi + +if [ "$1" = 'postgres' ]; then + mkdir -p "$PGDATA" + chmod 700 "$PGDATA" + chown -R postgres "$PGDATA" + + mkdir -p /run/postgresql + chmod g+s /run/postgresql + chown -R postgres /run/postgresql + + # look specifically for PG_VERSION, as it is expected in the DB dir + if [ ! -s "$PGDATA/PG_VERSION" ]; then + if [ "x$REPLICATE_FROM" == "x" ]; then + eval "gosu postgres initdb $POSTGRES_INITDB_ARGS" + else + until /bin/ping -c 1 -W 1 ${REPLICATE_FROM} + do + echo "Waiting for master to ping..." + sleep 1s + done + until gosu postgres pg_basebackup -h ${REPLICATE_FROM} -D ${PGDATA} -U ${POSTGRES_USER} -vP -w + do + echo "Waiting for master to connect..." + sleep 1s + done + fi + + # check password first so we can output the warning before postgres + # messes it up + if [ ! -z "$POSTGRES_PASSWORD" ]; then + pass="PASSWORD '$POSTGRES_PASSWORD'" + authMethod=md5 + else + # The - option suppresses leading tabs but *not* spaces. :) + cat >&2 <<-'EOWARN' + **************************************************** + WARNING: No password has been set for the database. + This will allow anyone with access to the + Postgres port to access your database. In + Docker's default configuration, this is + effectively any other container on the same + system. + + Use "-e POSTGRES_PASSWORD=password" to set + it in "docker run". + **************************************************** + EOWARN + + pass= + authMethod=trust + fi + + if [ "x$REPLICATE_FROM" == "x" ]; then + + { echo; echo "host replication all 0.0.0.0/0 $authMethod"; } | gosu postgres tee -a "$PGDATA/pg_hba.conf" > /dev/null + { echo; echo "host all all 0.0.0.0/0 $authMethod"; } | gosu postgres tee -a "$PGDATA/pg_hba.conf" > /dev/null + + # internal start of server in order to allow set-up using psql-client + # does not listen on external TCP/IP and waits until start finishes + gosu postgres pg_ctl -D "$PGDATA" \ + -o "-c listen_addresses='localhost'" \ + -w start + + : ${POSTGRES_USER:=postgres} + : ${POSTGRES_DB:=$POSTGRES_USER} + export POSTGRES_USER POSTGRES_DB + + psql=( "psql" "-v" "ON_ERROR_STOP=1" ) + + if [ "$POSTGRES_DB" != 'postgres' ]; then + "${psql[@]}" --username postgres <<-EOSQL + CREATE DATABASE "$POSTGRES_DB" ; + EOSQL + echo + fi + + if [ "$POSTGRES_USER" = 'postgres' ]; then + op='ALTER' + else + op='CREATE' + fi + "${psql[@]}" --username postgres <<-EOSQL + $op USER "$POSTGRES_USER" WITH SUPERUSER $pass ; + EOSQL + echo + + fi + + psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" ) + + echo + for f in /docker-entrypoint-initdb.d/*; do + case "$f" in + *.sh) echo "$0: running $f"; . "$f" ;; + *.sql) echo "$0: running $f"; "${psql[@]}" < "$f"; echo ;; + *.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;; + *) echo "$0: ignoring $f" ;; + esac + echo + done + + if [ "x$REPLICATE_FROM" == "x" ]; then + gosu postgres pg_ctl -D "$PGDATA" -m fast -w stop + fi + + echo + echo 'PostgreSQL init process complete; ready for start up.' + echo + fi + + # We need this health check so we know when it's started up. + touch /tmp/.postgres_init_complete + + exec gosu postgres "$@" +fi + +exec "$@" diff --git a/cmd/postgres_exporter/tests/docker-postgres-replication/setup-replication.sh b/cmd/postgres_exporter/tests/docker-postgres-replication/setup-replication.sh new file mode 100755 index 0000000..460c548 --- /dev/null +++ b/cmd/postgres_exporter/tests/docker-postgres-replication/setup-replication.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +if [ "x$REPLICATE_FROM" == "x" ]; then + +cat >> ${PGDATA}/postgresql.conf < ${PGDATA}/recovery.conf <&2 +echo "Test Binary: $test_binary" 1>&2 + +[ -z "$postgres_exporter" ] && echo "Missing exporter binary" && exit 1 +[ -z "$test_binary" ] && echo "Missing test binary" && exit 1 + +cd "$DIR" || exit 1 + +VERSIONS=( \ + 9.4 \ + 9.5 \ + 9.6 \ + 10 \ + 11 \ +) + +wait_for_postgres(){ + local container=$1 + local ip=$2 + local port=$3 + if [ -z "$ip" ]; then + echo "No IP specified." 1>&2 + exit 1 + fi + + if [ -z "$port" ]; then + echo "No port specified." 1>&2 + exit 1 + fi + + local wait_start + wait_start=$(date +%s) || exit 1 + echo "Waiting for postgres to start listening..." + while ! docker exec "$container" pg_isready --host="$ip" --port="$port" &> /dev/null; do + if [ $(( $(date +%s) - wait_start )) -gt "$TIMEOUT" ]; then + echo "Timed out waiting for postgres to start!" 1>&2 + exit 1 + fi + sleep 1 + done + echo "Postgres is online at $ip:$port" +} + +wait_for_exporter() { + local wait_start + wait_start=$(date +%s) || exit 1 + echo "Waiting for exporter to start..." + while ! nc -z localhost "$exporter_port" ; do + if [ $(( $(date +%s) - wait_start )) -gt "$TIMEOUT" ]; then + echo "Timed out waiting for exporter!" 1>&2 + exit 1 + fi + sleep 1 + done + echo "Exporter is online at localhost:$exporter_port" +} + +smoketest_postgres() { + local version=$1 + local CONTAINER_NAME=postgres_exporter-test-smoke + local TIMEOUT=30 + local IMAGE_NAME=postgres + + local CUR_IMAGE=$IMAGE_NAME:$version + + echo "#######################" + echo "Standalone Postgres $version" + echo "#######################" + local docker_cmd="docker run -d -e POSTGRES_PASSWORD=$POSTGRES_PASSWORD $CUR_IMAGE" + echo "Docker Cmd: $docker_cmd" + + CONTAINER_NAME=$($docker_cmd) + standalone_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $CONTAINER_NAME) + # shellcheck disable=SC2064 + trap "docker logs $CONTAINER_NAME ; docker kill $CONTAINER_NAME ; docker rm -v $CONTAINER_NAME; exit 1" EXIT INT TERM + wait_for_postgres "$CONTAINER_NAME" "$standalone_ip" 5432 + + # Run the test binary. + DATA_SOURCE_NAME="postgresql://postgres:$POSTGRES_PASSWORD@$standalone_ip:5432/?sslmode=disable" $test_binary || exit $? + + # Extract a raw metric list. + DATA_SOURCE_NAME="postgresql://postgres:$POSTGRES_PASSWORD@$standalone_ip:5432/?sslmode=disable" $postgres_exporter \ + --log.level=debug --web.listen-address=:$exporter_port & + exporter_pid=$! + # shellcheck disable=SC2064 + trap "docker logs $CONTAINER_NAME ; docker kill $CONTAINER_NAME ; docker rm -v $CONTAINER_NAME; kill $exporter_pid; exit 1" EXIT INT TERM + wait_for_exporter + + # Dump the metrics to a file. + if ! wget -q -O - http://localhost:$exporter_port/metrics 1> "$METRICS_DIR/.metrics.single.$version.prom" ; then + echo "Failed on postgres $version (standalone $DOCKER_IMAGE)" 1>&2 + kill $exporter_pid + exit 1 + fi + + # HACK test: check pg_up is a 1 - TODO: expand integration tests to include metric consumption + if ! grep 'pg_up.* 1' $METRICS_DIR/.metrics.single.$version.prom ; then + echo "pg_up metric was not 1 despite exporter and database being up" + kill $exporter_pid + exit 1 + fi + + kill $exporter_pid + docker kill "$CONTAINER_NAME" + docker rm -v "$CONTAINER_NAME" + trap - EXIT INT TERM + + echo "#######################" + echo "Replicated Postgres $version" + echo "#######################" + old_pwd=$(pwd) + cd docker-postgres-replication || exit 1 + + if ! VERSION="$version" p2 -t Dockerfile.p2 -o Dockerfile ; then + echo "Templating failed" 1>&2 + exit 1 + fi + trap "docker-compose logs; docker-compose down ; docker-compose rm -v; exit 1" EXIT INT TERM + local compose_cmd="POSTGRES_PASSWORD=$POSTGRES_PASSWORD docker-compose up -d --force-recreate --build" + echo "Compose Cmd: $compose_cmd" + eval "$compose_cmd" + + master_container=$(docker-compose ps -q pg-master) + slave_container=$(docker-compose ps -q pg-slave) + master_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$master_container") + slave_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$slave_container") + echo "Got master IP: $master_ip" + wait_for_postgres "$master_container" "$master_ip" 5432 + wait_for_postgres "$slave_container" "$slave_ip" 5432 + + DATA_SOURCE_NAME="postgresql://postgres:$POSTGRES_PASSWORD@$master_ip:5432/?sslmode=disable" $test_binary || exit $? + + DATA_SOURCE_NAME="postgresql://postgres:$POSTGRES_PASSWORD@$master_ip:5432/?sslmode=disable" $postgres_exporter \ + --log.level=debug --web.listen-address=:$exporter_port & + exporter_pid=$! + # shellcheck disable=SC2064 + trap "docker-compose logs; docker-compose down ; docker-compose rm -v ; kill $exporter_pid; exit 1" EXIT INT TERM + wait_for_exporter + + if ! wget -q -O - http://localhost:$exporter_port/metrics 1> "$METRICS_DIR/.metrics.replicated.$version.prom" ; then + echo "Failed on postgres $version (replicated $DOCKER_IMAGE)" 1>&2 + exit 1 + fi + + kill $exporter_pid + docker-compose down + docker-compose rm -v + trap - EXIT INT TERM + + cd "$old_pwd" || exit 1 +} + +# Start pulling the docker images in advance +for version in "${VERSIONS[@]}"; do + docker pull "postgres:$version" > /dev/null & +done + +for version in "${VERSIONS[@]}"; do + echo "Testing postgres version $version" + smoketest_postgres "$version" +done diff --git a/cmd/postgres_exporter/tests/user_queries_ok.yaml b/cmd/postgres_exporter/tests/user_queries_ok.yaml new file mode 100644 index 0000000..e5ecec9 --- /dev/null +++ b/cmd/postgres_exporter/tests/user_queries_ok.yaml @@ -0,0 +1,23 @@ +pg_locks_mode: + query: "WITH q_locks AS (select * from pg_locks where pid != pg_backend_pid() and database = (select oid from pg_database where datname = current_database())) SELECT (select current_database()) as datname, + lockmodes AS tag_lockmode, coalesce((select count(*) FROM q_locks WHERE mode = lockmodes), 0) AS count FROM + unnest('{AccessShareLock, ExclusiveLock, RowShareLock, RowExclusiveLock, ShareLock, ShareRowExclusiveLock, AccessExclusiveLock, ShareUpdateExclusiveLock}'::text[]) lockmodes;" + metrics: + - datname: + usage: "LABEL" + description: "Database name" + - tag_lockmode: + usage: "LABEL" + description: "Lock type" + - count: + usage: "GAUGE" + description: "Number of lock" +pg_wal: + query: "select current_database() as datname, case when pg_is_in_recovery() = false then pg_xlog_location_diff(pg_current_xlog_location(), '0/0')::int8 else pg_xlog_location_diff(pg_last_xlog_replay_location(), '0/0')::int8 end as xlog_location_b;" + metrics: + - datname: + usage: "LABEL" + description: "Database name" + - xlog_location_b: + usage: "COUNTER" + description: "current transaction log write location" diff --git a/cmd/postgres_exporter/tests/username_file b/cmd/postgres_exporter/tests/username_file new file mode 100644 index 0000000..0650cfd --- /dev/null +++ b/cmd/postgres_exporter/tests/username_file @@ -0,0 +1 @@ +custom_username$&+,/:;=?@ diff --git a/cmd/postgres_exporter/tests/userpass_file b/cmd/postgres_exporter/tests/userpass_file new file mode 100644 index 0000000..a9caa8d --- /dev/null +++ b/cmd/postgres_exporter/tests/userpass_file @@ -0,0 +1 @@ +custom_password$&+,/:;=?@ diff --git a/example.alerts.yml b/example.alerts.yml new file mode 100644 index 0000000..1b08a43 --- /dev/null +++ b/example.alerts.yml @@ -0,0 +1,57 @@ +--- +groups: + - name: PostgreSQL + rules: + - alert: PostgreSQLMaxConnectionsReached + expr: sum(pg_stat_activity_count) by (instance) > sum(pg_settings_max_connections) by (instance) + for: 1m + labels: + severity: email + annotations: + summary: "{{ $labels.instance }} has maxed out Postgres connections." + description: "{{ $labels.instance }} is exceeding the currently configured maximum Postgres connection limit (current value: {{ $value }}s). Services may be degraded - please take immediate action (you probably need to increase max_connections in the Docker image and re-deploy." + + - alert: PostgreSQLHighConnections + expr: sum(pg_stat_activity_count) by (instance) > sum(pg_settings_max_connections * 0.8) by (instance) + for: 10m + labels: + severity: email + annotations: + summary: "{{ $labels.instance }} is over 80% of max Postgres connections." + description: "{{ $labels.instance }} is exceeding 80% of the currently configured maximum Postgres connection limit (current value: {{ $value }}s). Please check utilization graphs and confirm if this is normal service growth, abuse or an otherwise temporary condition or if new resources need to be provisioned (or the limits increased, which is mostly likely)." + + - alert: PostgreSQLDown + expr: pg_up != 1 + for: 1m + labels: + severity: email + annotations: + summary: "PostgreSQL is not processing queries: {{ $labels.instance }}" + description: "{{ $labels.instance }} is rejecting query requests from the exporter, and thus probably not allowing DNS requests to work either. User services should not be effected provided at least 1 node is still alive." + + - alert: PostgreSQLSlowQueries + expr: avg(rate(pg_stat_activity_max_tx_duration{datname!~"template.*"}[2m])) by (datname) > 2 * 60 + for: 2m + labels: + severity: email + annotations: + summary: "PostgreSQL high number of slow on {{ $labels.cluster }} for database {{ $labels.datname }} " + description: "PostgreSQL high number of slow queries {{ $labels.cluster }} for database {{ $labels.datname }} with a value of {{ $value }} " + + - alert: PostgreSQLQPS + expr: avg(irate(pg_stat_database_xact_commit{datname!~"template.*"}[5m]) + irate(pg_stat_database_xact_rollback{datname!~"template.*"}[5m])) by (datname) > 10000 + for: 5m + labels: + severity: email + annotations: + summary: "PostgreSQL high number of queries per second {{ $labels.cluster }} for database {{ $labels.datname }}" + description: "PostgreSQL high number of queries per second on {{ $labels.cluster }} for database {{ $labels.datname }} with a value of {{ $value }}" + + - alert: PostgreSQLCacheHitRatio + expr: avg(rate(pg_stat_database_blks_hit{datname!~"template.*"}[5m]) / (rate(pg_stat_database_blks_hit{datname!~"template.*"}[5m]) + rate(pg_stat_database_blks_read{datname!~"template.*"}[5m]))) by (datname) < 0.98 + for: 5m + labels: + severity: email + annotations: + summary: "PostgreSQL low cache hit rate on {{ $labels.cluster }} for database {{ $labels.datname }}" + description: "PostgreSQL low on cache hit rate on {{ $labels.cluster }} for database {{ $labels.datname }} with a value of {{ $value }}" diff --git a/gh-assets-clone.sh b/gh-assets-clone.sh new file mode 100755 index 0000000..506485e --- /dev/null +++ b/gh-assets-clone.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Script to setup the assets clone of the repository using GIT_ASSETS_BRANCH and +# GIT_API_KEY. + +[ ! -z "$GIT_ASSETS_BRANCH" ] || exit 1 + +setup_git() { + git config --global user.email "travis@travis-ci.org" || exit 1 + git config --global user.name "Travis CI" || exit 1 +} + +# Constants +ASSETS_DIR=".assets-branch" + +# Clone the assets branch with the correct credentials +git clone --single-branch -b "$GIT_ASSETS_BRANCH" \ + "https://${GIT_API_KEY}@github.com/${TRAVIS_REPO_SLUG}.git" "$ASSETS_DIR" || exit 1 + diff --git a/gh-metrics-push.sh b/gh-metrics-push.sh new file mode 100755 index 0000000..37f335d --- /dev/null +++ b/gh-metrics-push.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# Script to copy and push new metric versions to the assets branch. + +[ ! -z "$GIT_ASSETS_BRANCH" ] || exit 1 +[ ! -z "$GIT_API_KEY" ] || exit 1 + +version=$(git describe HEAD) || exit 1 + +# Constants +ASSETS_DIR=".assets-branch" +METRICS_DIR="$ASSETS_DIR/metriclists" + +# Ensure metrics dir exists +mkdir -p "$METRICS_DIR/" + +# Remove old files so we spot deletions +rm -f "$METRICS_DIR/.*.unique" + +# Copy new files +cp -f -t "$METRICS_DIR/" ./.metrics.*.prom.unique || exit 1 + +# Enter the assets dir and push. +cd "$ASSETS_DIR" || exit 1 + +git add "metriclists" || exit 1 +git commit -m "Added unique metrics for build from $version" || exit 1 +git push origin "$GIT_ASSETS_BRANCH" || exit 1 + +exit 0 \ No newline at end of file diff --git a/mage.go b/mage.go new file mode 100644 index 0000000..c1392b2 --- /dev/null +++ b/mage.go @@ -0,0 +1,11 @@ +// +build ignore + +package main + +import ( + "os" + + "github.com/magefile/mage/mage" +) + +func main() { os.Exit(mage.Main()) } diff --git a/magefile.go b/magefile.go new file mode 100644 index 0000000..b3e0992 --- /dev/null +++ b/magefile.go @@ -0,0 +1,786 @@ +// +build mage +// Self-contained go-project magefile. + +// nolint: deadcode +package main + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "regexp" + "runtime" + "strings" + "time" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" + "github.com/magefile/mage/target" + + "errors" + "math/bits" + "strconv" + + "github.com/mholt/archiver" +) + +var curDir = func() string { + name, _ := os.Getwd() + return name +}() + +const constCoverageDir = ".coverage" +const constToolDir = "tools" +const constBinDir = "bin" +const constReleaseDir = "release" +const constCmdDir = "cmd" +const constCoverFile = "cover.out" +const constAssets = "assets" +const constAssetsGenerated = "assets/generated" + +var coverageDir = mustStr(filepath.Abs(path.Join(curDir, constCoverageDir))) +var toolDir = mustStr(filepath.Abs(path.Join(curDir, constToolDir))) +var binDir = mustStr(filepath.Abs(path.Join(curDir, constBinDir))) +var releaseDir = mustStr(filepath.Abs(path.Join(curDir, constReleaseDir))) +var cmdDir = mustStr(filepath.Abs(path.Join(curDir, constCmdDir))) +var assetsGenerated = mustStr(filepath.Abs(path.Join(curDir, constAssetsGenerated))) + +// Calculate file paths +var toolsGoPath = toolDir +var toolsSrcDir = mustStr(filepath.Abs(path.Join(toolDir, "src"))) +var toolsBinDir = mustStr(filepath.Abs(path.Join(toolDir, "bin"))) +var toolsVendorDir = mustStr(filepath.Abs(path.Join(toolDir, "vendor"))) + +var outputDirs = []string{binDir, releaseDir, toolsGoPath, toolsBinDir, + toolsVendorDir, assetsGenerated, coverageDir} + +var toolsEnv = map[string]string{"GOPATH": toolsGoPath} + +var containerName = func() string { + if name := os.Getenv("CONTAINER_NAME"); name != "" { + return name + } + return "wrouesnel/postgres_exporter:latest" +}() + +type Platform struct { + OS string + Arch string + BinSuffix string +} + +func (p *Platform) String() string { + return fmt.Sprintf("%s-%s", p.OS, p.Arch) +} + +func (p *Platform) PlatformDir() string { + platformDir := path.Join(binDir, fmt.Sprintf("%s_%s_%s", productName, versionShort, p.String())) + return platformDir +} + +func (p *Platform) PlatformBin(cmd string) string { + platformBin := fmt.Sprintf("%s%s", cmd, p.BinSuffix) + return path.Join(p.PlatformDir(), platformBin) +} + +func (p *Platform) ArchiveDir() string { + return fmt.Sprintf("%s_%s_%s", productName, versionShort, p.String()) +} + +func (p *Platform) ReleaseBase() string { + return path.Join(releaseDir, fmt.Sprintf("%s_%s_%s", productName, versionShort, p.String())) +} + +// Supported platforms +var platforms []Platform = []Platform{ + {"linux", "amd64", ""}, + {"linux", "386", ""}, + {"darwin", "amd64", ""}, + {"darwin", "386", ""}, + {"windows", "amd64", ".exe"}, + {"windows", "386", ".exe"}, + {"freebsd", "amd64", ""}, +} + +// productName can be overridden by environ product name +var productName = func() string { + if name := os.Getenv("PRODUCT_NAME"); name != "" { + return name + } + name, _ := os.Getwd() + return path.Base(name) +}() + +// Source files +var goSrc []string +var goDirs []string +var goPkgs []string +var goCmds []string + +var branch = func() string { + if v := os.Getenv("BRANCH"); v != "" { + return v + } + out, _ := sh.Output("git", "rev-parse", "--abbrev-ref", "HEAD") + + return out +}() + +var buildDate = func() string { + if v := os.Getenv("BUILDDATE"); v != "" { + return v + } + return time.Now().Format("2006-01-02T15:04:05-0700") +}() + +var revision = func() string { + if v := os.Getenv("REVISION"); v != "" { + return v + } + out, _ := sh.Output("git", "rev-parse", "HEAD") + + return out +}() + +var version = func() string { + if v := os.Getenv("VERSION"); v != "" { + return v + } + out, _ := sh.Output("git", "describe", "--dirty") + + if out == "" { + return "v0.0.0" + } + + return out +}() + +var versionShort = func() string { + if v := os.Getenv("VERSION_SHORT"); v != "" { + return v + } + out, _ := sh.Output("git", "describe", "--abbrev=0") + + if out == "" { + return "v0.0.0" + } + + return out +}() + +var concurrency = func() int { + if v := os.Getenv("CONCURRENCY"); v != "" { + pv, err := strconv.ParseUint(v, 10, bits.UintSize) + if err != nil { + panic(err) + } + return int(pv) + } + return runtime.NumCPU() +}() + +var linterDeadline = func() time.Duration { + if v := os.Getenv("LINTER_DEADLINE"); v != "" { + d, _ := time.ParseDuration(v) + if d != 0 { + return d + } + } + return time.Second * 60 +}() + +func Log(args ...interface{}) { + if mg.Verbose() { + fmt.Println(args...) + } +} + +func init() { + // Set environment + os.Setenv("PATH", fmt.Sprintf("%s:%s", toolsBinDir, os.Getenv("PATH"))) + Log("Build PATH: ", os.Getenv("PATH")) + Log("Concurrency:", concurrency) + goSrc = func() []string { + results := new([]string) + filepath.Walk(".", func(relpath string, info os.FileInfo, err error) error { + // Ensure absolute path so globs work + path, err := filepath.Abs(relpath) + if err != nil { + panic(err) + } + + // Look for files + if info.IsDir() { + return nil + } + + // Exclusions + for _, exclusion := range []string{toolDir, binDir, releaseDir, coverageDir} { + if strings.HasPrefix(path, exclusion) { + if info.IsDir() { + return filepath.SkipDir + } + return nil + } + } + + if strings.Contains(path, "/vendor/") { + if info.IsDir() { + return filepath.SkipDir + } + return nil + } + + if strings.Contains(path, ".git") { + if info.IsDir() { + return filepath.SkipDir + } + return nil + } + + if !strings.HasSuffix(path, ".go") { + return nil + } + + *results = append(*results, path) + return nil + }) + return *results + }() + goDirs = func() []string { + resultMap := make(map[string]struct{}) + for _, path := range goSrc { + absDir, err := filepath.Abs(filepath.Dir(path)) + if err != nil { + panic(err) + } + resultMap[absDir] = struct{}{} + } + results := []string{} + for k := range resultMap { + results = append(results, k) + } + return results + }() + goPkgs = func() []string { + results := []string{} + out, err := sh.Output("go", "list", "./...") + if err != nil { + panic(err) + } + for _, line := range strings.Split(out, "\n") { + if !strings.Contains(line, "/vendor/") { + results = append(results, line) + } + } + return results + }() + goCmds = func() []string { + results := []string{} + + finfos, err := ioutil.ReadDir(cmdDir) + if err != nil { + panic(err) + } + for _, finfo := range finfos { + results = append(results, finfo.Name()) + } + return results + }() + + // Ensure output dirs exist + for _, dir := range outputDirs { + os.MkdirAll(dir, os.FileMode(0777)) + } +} + +func mustStr(r string, err error) string { + if err != nil { + panic(err) + } + return r +} + +func getCoreTools() []string { + staticTools := []string{ + "github.com/kardianos/govendor", + "github.com/wadey/gocovmerge", + "github.com/mattn/goveralls", + "github.com/tmthrgd/go-bindata/go-bindata", + "github.com/GoASTScanner/gas/cmd/gas", // workaround for Ast scanner + "github.com/alecthomas/gometalinter", + } + return staticTools +} + +func getMetalinters() []string { + // Gometalinter should now be on the command line + dynamicTools := []string{} + + goMetalinterHelp, _ := sh.Output("gometalinter", "--help") + linterRx := regexp.MustCompile(`\s+\w+:\s*\((.+)\)`) + for _, l := range strings.Split(goMetalinterHelp, "\n") { + linter := linterRx.FindStringSubmatch(l) + if len(linter) > 1 { + dynamicTools = append(dynamicTools, linter[1]) + } + } + return dynamicTools +} + +func ensureVendorSrcLink() error { + Log("Symlink vendor to tools dir") + if err := sh.Rm(toolsSrcDir); err != nil { + return err + } + if err := os.Symlink(toolsVendorDir, toolsSrcDir); err != nil { + return err + } + return nil +} + +// concurrencyLimitedBuild executes a certain number of commands limited by concurrency +func concurrencyLimitedBuild(buildCmds ...interface{}) error { + resultsCh := make(chan error, len(buildCmds)) + concurrencyControl := make(chan struct{}, concurrency) + for _, buildCmd := range buildCmds { + go func(buildCmd interface{}) { + concurrencyControl <- struct{}{} + resultsCh <- buildCmd.(func() error)() + <-concurrencyControl + + }(buildCmd) + } + // Doesn't work at the moment + // mg.Deps(buildCmds...) + results := []error{} + var resultErr error = nil + for len(results) < len(buildCmds) { + err := <-resultsCh + results = append(results, err) + if err != nil { + fmt.Println(err) + resultErr = errors.New("parallel build failed") + } + fmt.Printf("Finished %v of %v\n", len(results), len(buildCmds)) + } + + return resultErr +} + +// Tools builds build tools of the project and is depended on by all other build targets. +func Tools() (err error) { + // Catch panics and convert to errors + defer func() { + if perr := recover(); perr != nil { + err = perr.(error) + } + }() + + if err := ensureVendorSrcLink(); err != nil { + return err + } + + toolBuild := func(toolType string, tools ...string) error { + toolTargets := []interface{}{} + for _, toolImport := range tools { + toolParts := strings.Split(toolImport, "/") + toolBin := path.Join(toolsBinDir, toolParts[len(toolParts)-1]) + Log("Check for changes:", toolBin, toolsVendorDir) + changed, terr := target.Dir(toolBin, toolsVendorDir) + if terr != nil { + if !os.IsNotExist(terr) { + panic(terr) + } + changed = true + } + if changed { + localToolImport := toolImport + f := func() error { return sh.RunWith(toolsEnv, "go", "install", "-v", localToolImport) } + toolTargets = append(toolTargets, f) + } + } + + Log("Build", toolType, "tools") + if berr := concurrencyLimitedBuild(toolTargets...); berr != nil { + return berr + } + return nil + } + + if berr := toolBuild("static", getCoreTools()...); berr != nil { + return berr + } + + if berr := toolBuild("static", getMetalinters()...); berr != nil { + return berr + } + + return nil +} + +// UpdateTools automatically updates tool dependencies to the latest version. +func UpdateTools() error { + if err := ensureVendorSrcLink(); err != nil { + return err + } + + // Ensure govendor is up to date without doing anything + govendorPkg := "github.com/kardianos/govendor" + govendorParts := strings.Split(govendorPkg, "/") + govendorBin := path.Join(toolsBinDir, govendorParts[len(govendorParts)-1]) + + sh.RunWith(toolsEnv, "go", "get", "-v", "-u", govendorPkg) + + if changed, cerr := target.Dir(govendorBin, toolsSrcDir); changed || os.IsNotExist(cerr) { + if err := sh.RunWith(toolsEnv, "go", "install", "-v", govendorPkg); err != nil { + return err + } + } else if cerr != nil { + panic(cerr) + } + + // Set current directory so govendor has the right path + previousPwd, wderr := os.Getwd() + if wderr != nil { + return wderr + } + if err := os.Chdir(toolDir); err != nil { + return err + } + + // govendor fetch core tools + for _, toolImport := range append(getCoreTools(), getMetalinters()...) { + sh.RunV("govendor", "fetch", "-v", toolImport) + } + + // change back to original working directory + if err := os.Chdir(previousPwd); err != nil { + return err + } + return nil +} + +// Assets builds binary assets to be bundled into the binary. +func Assets() error { + mg.Deps(Tools) + + if err := os.MkdirAll("assets/generated", os.FileMode(0777)); err != nil { + return err + } + + return sh.RunV("go-bindata", "-pkg=assets", "-o", "assets/bindata.go", "-ignore=bindata.go", + "-ignore=.*.map$", "-prefix=assets/generated", "assets/generated/...") +} + +// Lint runs gometalinter for code quality. CI will run this before accepting PRs. +func Lint() error { + mg.Deps(Tools) + args := []string{"-j", fmt.Sprintf("%v", concurrency), fmt.Sprintf("--deadline=%s", + linterDeadline.String()), "--enable-all", "--line-length=120", + "--disable=gocyclo", "--disable=testify", "--disable=test", "--disable=lll", "--exclude=assets/bindata.go"} + return sh.RunV("gometalinter", append(args, goDirs...)...) +} + +// Style checks formatting of the file. CI will run this before acceptiing PRs. +func Style() error { + mg.Deps(Tools) + args := []string{"--disable-all", "--enable=gofmt", "--enable=goimports"} + return sh.RunV("gometalinter", append(args, goSrc...)...) +} + +// Fmt automatically formats all source code files +func Fmt() error { + mg.Deps(Tools) + fmtErr := sh.RunV("gofmt", append([]string{"-s", "-w"}, goSrc...)...) + if fmtErr != nil { + return fmtErr + } + impErr := sh.RunV("goimports", append([]string{"-w"}, goSrc...)...) + if impErr != nil { + return fmtErr + } + return nil +} + +func listCoverageFiles() ([]string, error) { + result := []string{} + finfos, derr := ioutil.ReadDir(coverageDir) + if derr != nil { + return result, derr + } + for _, finfo := range finfos { + result = append(result, path.Join(coverageDir, finfo.Name())) + } + return result, nil +} + +// Test run test suite +func Test() error { + mg.Deps(Tools) + + // Ensure coverage directory exists + if err := os.MkdirAll(coverageDir, os.FileMode(0777)); err != nil { + return err + } + + // Clean up coverage directory + coverFiles, derr := listCoverageFiles() + if derr != nil { + return derr + } + for _, coverFile := range coverFiles { + if err := sh.Rm(coverFile); err != nil { + return err + } + } + + // Run tests + coverProfiles := []string{} + for _, pkg := range goPkgs { + coverProfile := path.Join(coverageDir, fmt.Sprintf("%s%s", strings.Replace(pkg, "/", "-", -1), ".out")) + testErr := sh.Run("go", "test", "-v", "-covermode", "count", fmt.Sprintf("-coverprofile=%s", coverProfile), + pkg) + if testErr != nil { + return testErr + } + coverProfiles = append(coverProfiles, coverProfile) + } + + return nil +} + +// Build the intgration test binary +func IntegrationTestBinary() error { + changed, err := target.Path("postgres_exporter_integration_test", goSrc...) + if (changed && (err == nil)) || os.IsNotExist(err) { + return sh.RunWith(map[string]string{"CGO_ENABLED": "0"}, "go", "test", "./cmd/postgres_exporter", + "-c", "-tags", "integration", + "-a", "-ldflags", "-extldflags '-static'", + "-X", fmt.Sprintf("main.Branch=%s", branch), + "-X", fmt.Sprintf("main.BuildDate=%s", buildDate), + "-X", fmt.Sprintf("main.Revision=%s", revision), + "-X", fmt.Sprintf("main.VersionShort=%s", versionShort), + "-o", "postgres_exporter_integration_test", "-cover", "-covermode", "count") + } + return err +} + +// TestIntegration runs integration tests +func TestIntegration() error { + mg.Deps(Binary, IntegrationTestBinary) + + exporterPath := mustStr(filepath.Abs("postgres_exporter")) + testBinaryPath := mustStr(filepath.Abs("postgres_exporter_integration_test")) + testScriptPath := mustStr(filepath.Abs("postgres_exporter_integration_test_script")) + + integrationCoverageProfile := path.Join(coverageDir, "cover.integration.out") + + return sh.RunV("cmd/postgres_exporter/tests/test-smoke", exporterPath, + fmt.Sprintf("%s %s %s", testScriptPath, testBinaryPath, integrationCoverageProfile)) +} + +// Coverage sums up the coverage profiles in .coverage. It does not clean up after itself or before. +func Coverage() error { + // Clean up coverage directory + coverFiles, derr := listCoverageFiles() + if derr != nil { + return derr + } + + mergedCoverage, err := sh.Output("gocovmerge", coverFiles...) + if err != nil { + return err + } + return ioutil.WriteFile(constCoverFile, []byte(mergedCoverage), os.FileMode(0777)) +} + +// All runs a full suite suitable for CI +func All() error { + mg.SerialDeps(Style, Lint, Test, TestIntegration, Coverage, Release) + return nil +} + +// Release builds release archives under the release/ directory +func Release() error { + mg.Deps(ReleaseBin) + + for _, platform := range platforms { + owd, wderr := os.Getwd() + if wderr != nil { + return wderr + } + os.Chdir(binDir) + + if platform.OS == "windows" { + // build a zip binary as well + err := archiver.Zip.Make(fmt.Sprintf("%s.zip", platform.ReleaseBase()), []string{platform.ArchiveDir()}) + if err != nil { + return err + } + } + // build tar gz + err := archiver.TarGz.Make(fmt.Sprintf("%s.tar.gz", platform.ReleaseBase()), []string{platform.ArchiveDir()}) + if err != nil { + return err + } + os.Chdir(owd) + } + + return nil +} + +func makeBuilder(cmd string, platform Platform) func() error { + f := func() error { + // Depend on assets + mg.Deps(Assets) + + cmdSrc := fmt.Sprintf("./%s/%s", mustStr(filepath.Rel(curDir, cmdDir)), cmd) + + Log("Make platform binary directory:", platform.PlatformDir()) + if err := os.MkdirAll(platform.PlatformDir(), os.FileMode(0777)); err != nil { + return err + } + + Log("Checking for changes:", platform.PlatformBin(cmd)) + if changed, err := target.Path(platform.PlatformBin(cmd), goSrc...); !changed { + if err != nil { + if !os.IsNotExist(err) { + return err + } + } else { + return nil + } + } + + fmt.Println("Building", platform.PlatformBin(cmd)) + return sh.RunWith(map[string]string{"CGO_ENABLED": "0", "GOOS": platform.OS, "GOARCH": platform.Arch}, + "go", "build", "-a", "-ldflags", fmt.Sprintf("-extldflags '-static' -X main.Version=%s", version), + "-o", platform.PlatformBin(cmd), cmdSrc) + } + return f +} + +func getCurrentPlatform() *Platform { + var curPlatform *Platform + for _, p := range platforms { + if p.OS == runtime.GOOS && p.Arch == runtime.GOARCH { + storedP := p + curPlatform = &storedP + } + } + Log("Determined current platform:", curPlatform) + return curPlatform +} + +// Binary build a binary for the current platform +func Binary() error { + curPlatform := getCurrentPlatform() + if curPlatform == nil { + return errors.New("current platform is not supported") + } + + for _, cmd := range goCmds { + err := makeBuilder(cmd, *curPlatform)() + if err != nil { + return err + } + // Make a root symlink to the build + cmdPath := path.Join(curDir, cmd) + os.Remove(cmdPath) + if err := os.Symlink(curPlatform.PlatformBin(cmd), cmdPath); err != nil { + return err + } + } + + return nil +} + +// ReleaseBin builds cross-platform release binaries under the bin/ directory +func ReleaseBin() error { + buildCmds := []interface{}{} + + for _, cmd := range goCmds { + for _, platform := range platforms { + buildCmds = append(buildCmds, makeBuilder(cmd, platform)) + } + } + + resultsCh := make(chan error, len(buildCmds)) + concurrencyControl := make(chan struct{}, concurrency) + for _, buildCmd := range buildCmds { + go func(buildCmd interface{}) { + concurrencyControl <- struct{}{} + resultsCh <- buildCmd.(func() error)() + <-concurrencyControl + + }(buildCmd) + } + // Doesn't work at the moment + // mg.Deps(buildCmds...) + results := []error{} + var resultErr error = nil + for len(results) < len(buildCmds) { + err := <-resultsCh + results = append(results, err) + if err != nil { + fmt.Println(err) + resultErr = errors.New("parallel build failed") + } + fmt.Printf("Finished %v of %v\n", len(results), len(buildCmds)) + } + + return resultErr +} + +// Docker builds the docker image +func Docker() error { + mg.Deps(Binary) + p := getCurrentPlatform() + if p == nil { + return errors.New("current platform is not supported") + } + + return sh.RunV("docker", "build", + fmt.Sprintf("--build-arg=binary=%s", + mustStr(filepath.Rel(curDir, p.PlatformBin("postgres_exporter")))), + "-t", containerName, ".") +} + +// Clean deletes build output and cleans up the working directory +func Clean() error { + for _, name := range goCmds { + if err := sh.Rm(path.Join(binDir, name)); err != nil { + return err + } + } + + for _, name := range outputDirs { + if err := sh.Rm(name); err != nil { + return err + } + } + return nil +} + +// Debug prints the value of internal state variables +func Debug() error { + fmt.Println("Source Files:", goSrc) + fmt.Println("Packages:", goPkgs) + fmt.Println("Directories:", goDirs) + fmt.Println("Command Paths:", goCmds) + fmt.Println("Output Dirs:", outputDirs) + fmt.Println("Tool Src Dir:", toolsSrcDir) + fmt.Println("Tool Vendor Dir:", toolsVendorDir) + fmt.Println("Tool GOPATH:", toolsGoPath) + fmt.Println("PATH:", os.Getenv("PATH")) + return nil +} + +// Autogen configure local git repository with commit hooks +func Autogen() error { + fmt.Println("Installing git hooks in local repository...") + return os.Link(path.Join(curDir, toolDir, "pre-commit"), ".git/hooks/pre-commit") +} diff --git a/postgres-metrics-get-changes.sh b/postgres-metrics-get-changes.sh new file mode 100755 index 0000000..37dbfb3 --- /dev/null +++ b/postgres-metrics-get-changes.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# Script to parse a text exposition format file into a unique list of metrics +# output by the exporter and then build lists of added/removed metrics. + +old_src="$1" +if [ ! -d "$old_src" ] ; then + mkdir -p "$old_src" +fi + +function generate_add_removed() { + type="$1" + pg_version="$2" + old_version="$3" + new_version="$4" + + if [ ! -e "$old_version" ] ; then + touch "$old_version" + fi + + comm -23 "$old_version" "$new_version" > ".metrics.${type}.${pg_version}.removed" + comm -13 "$old_version" "$new_version" > ".metrics.${type}.${pg_version}.added" +} + +for raw_prom in $(echo .*.prom) ; do + # Get the type and version + type=$(echo "$raw_prom" | cut -d'.' -f3) + pg_version=$(echo "$raw_prom" | cut -d'.' -f4- | sed 's/\.prom$//g') + + unique_file="${raw_prom}.unique" + old_unique_file="$old_src/$unique_file" + + # Strip, sort and deduplicate the label names + grep -v '#' "$raw_prom" | \ + rev | cut -d' ' -f2- | \ + rev | cut -d'{' -f1 | \ + sort | \ + uniq > "$unique_file" + + generate_add_removed "$type" "$pg_version" "$old_unique_file" "$unique_file" +done diff --git a/postgres_exporter.rc b/postgres_exporter.rc new file mode 100644 index 0000000..2dd4a60 --- /dev/null +++ b/postgres_exporter.rc @@ -0,0 +1,89 @@ +#!/bin/sh + +# PROVIDE: postgres_exporter +# REQUIRE: LOGIN +# KEYWORD: shutdown +# +# rc-script for postgres_exporter +# +# +# Add the following lines to /etc/rc.conf.local or /etc/rc.conf +# to enable this service: +# +# postgres_exporter_enable (bool): Set to NO by default. +# Set it to YES to enable postgres_exporter. +# postgres_exporter_user (string): Set user that postgres_exporter will run under +# Default is "nobody". +# postgres_exporter_group (string): Set group that postgres_exporter will run under +# Default is "nobody". +# postgres_exporter_args (string): Set extra arguments to pass to postgres_exporter +# Default is "". +# postgres_exporter_listen_address (string):Set ip:port to listen on for web interface and telemetry. +# Defaults to ":9187" +# postgres_exporter_pg_user (string): Set the Postgres database user +# Defaults to "postgres_exporter" +# postgres_exporter_pg_pass (string): Set the Postgres datase password +# Default is empty +# postgres_exporter_pg_host (string): Set the Postgres database server +# Defaults to "localhost" +# postgres_exporter_pg_port (string): Set the Postgres database port +# Defaults to "5432" + +# Add extra arguments via "postgres_exporter_args" which could be choosen from: +# (see $ postgres_exporter --help) +# +# -dumpmaps +# Do not run, simply dump the maps. +# -extend.query-path string +# Path to custom queries to run. +# -log.level value +# Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]. +# -version +# print version and exit +# -web.telemetry-path string +# Path under which to expose metrics. (default "/metrics") +# -log.format value +# If set use a syslog logger or JSON logging. Example: logger:syslog?appname=bob&local=7 or logger:stdout?json=true. Defaults to stderr. +# -extend.query-path string +# Path to custom queries to run. + + +. /etc/rc.subr + +name=postgres_exporter +rcvar=postgres_exporter_enable + +load_rc_config $name + +: ${postgres_exporter_enable:="NO"} +: ${postgres_exporter_user:="nobody"} +: ${postgres_exporter_group:="nobody"} +: ${postgres_exporter_args:=""} +: ${postgres_exporter_listen_address:=":9187"} +: ${postgres_exporter_pg_user:="postgres_exporter"} +: ${postgres_exporter_pg_pass:=""} +: ${postgres_exporter_pg_host:="localhost"} +: ${postgres_exporter_pg_port:="5432"} + +postgres_exporter_data_source_name="postgresql://${postgres_exporter_pg_user}:${postgres_exporter_pg_pass}@${postgres_exporter_pg_host}:${postgres_exporter_pg_port}/postgres?sslmode=disable" + + +pidfile=/var/run/postgres_exporter.pid +command="/usr/sbin/daemon" +procname="/usr/local/bin/postgres_exporter" +command_args="-p ${pidfile} /usr/bin/env DATA_SOURCE_NAME="${postgres_exporter_data_source_name}" ${procname} \ + -web.listen-address=${postgres_exporter_listen_address} \ + ${postgres_exporter_args}" + +start_precmd=postgres_exporter_startprecmd + +postgres_exporter_startprecmd() +{ + if [ ! -e ${pidfile} ]; then + install -o ${postgres_exporter_user} -g ${postgres_exporter_group} /dev/null ${pidfile}; + fi +} + +load_rc_config $name +run_rc_command "$1" + diff --git a/postgres_exporter_integration_test_script b/postgres_exporter_integration_test_script new file mode 100755 index 0000000..ebaf83d --- /dev/null +++ b/postgres_exporter_integration_test_script @@ -0,0 +1,18 @@ +#!/bin/bash +# This script wraps the integration test binary so it produces concatenated +# test output. + +test_binary=$1 +shift +output_cov=$1 +shift + +echo "Test Binary: $test_binary" 1>&2 +echo "Coverage File: $output_cov" 1>&2 + +echo "mode: count" > $output_cov + +test_cov=$(mktemp) +$test_binary -test.coverprofile=$test_cov $@ || exit 1 +tail -n +2 $test_cov >> $output_cov +rm -f $test_cov diff --git a/queries.yaml b/queries.yaml new file mode 100644 index 0000000..db2c068 --- /dev/null +++ b/queries.yaml @@ -0,0 +1,205 @@ +pg_replication: + query: "SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp())) as lag" + master: true + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind master in seconds" + +pg_postmaster: + query: "SELECT pg_postmaster_start_time as start_time_seconds from pg_postmaster_start_time()" + master: true + metrics: + - start_time_seconds: + usage: "GAUGE" + description: "Time at which postmaster started" + +pg_stat_user_tables: + query: "SELECT current_database() datname, schemaname, relname, seq_scan, seq_tup_read, idx_scan, idx_tup_fetch, n_tup_ins, n_tup_upd, n_tup_del, n_tup_hot_upd, n_live_tup, n_dead_tup, n_mod_since_analyze, COALESCE(last_vacuum, '1970-01-01Z'), COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum, COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum, COALESCE(last_analyze, '1970-01-01Z') as last_analyze, COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze, vacuum_count, autovacuum_count, analyze_count, autoanalyze_count FROM pg_stat_user_tables" + metrics: + - datname: + usage: "LABEL" + description: "Name of current database" + - schemaname: + usage: "LABEL" + description: "Name of the schema that this table is in" + - relname: + usage: "LABEL" + description: "Name of this table" + - seq_scan: + usage: "COUNTER" + description: "Number of sequential scans initiated on this table" + - seq_tup_read: + usage: "COUNTER" + description: "Number of live rows fetched by sequential scans" + - idx_scan: + usage: "COUNTER" + description: "Number of index scans initiated on this table" + - idx_tup_fetch: + usage: "COUNTER" + description: "Number of live rows fetched by index scans" + - n_tup_ins: + usage: "COUNTER" + description: "Number of rows inserted" + - n_tup_upd: + usage: "COUNTER" + description: "Number of rows updated" + - n_tup_del: + usage: "COUNTER" + description: "Number of rows deleted" + - n_tup_hot_upd: + usage: "COUNTER" + description: "Number of rows HOT updated (i.e., with no separate index update required)" + - n_live_tup: + usage: "GAUGE" + description: "Estimated number of live rows" + - n_dead_tup: + usage: "GAUGE" + description: "Estimated number of dead rows" + - n_mod_since_analyze: + usage: "GAUGE" + description: "Estimated number of rows changed since last analyze" + - last_vacuum: + usage: "GAUGE" + description: "Last time at which this table was manually vacuumed (not counting VACUUM FULL)" + - last_autovacuum: + usage: "GAUGE" + description: "Last time at which this table was vacuumed by the autovacuum daemon" + - last_analyze: + usage: "GAUGE" + description: "Last time at which this table was manually analyzed" + - last_autoanalyze: + usage: "GAUGE" + description: "Last time at which this table was analyzed by the autovacuum daemon" + - vacuum_count: + usage: "COUNTER" + description: "Number of times this table has been manually vacuumed (not counting VACUUM FULL)" + - autovacuum_count: + usage: "COUNTER" + description: "Number of times this table has been vacuumed by the autovacuum daemon" + - analyze_count: + usage: "COUNTER" + description: "Number of times this table has been manually analyzed" + - autoanalyze_count: + usage: "COUNTER" + description: "Number of times this table has been analyzed by the autovacuum daemon" + +pg_statio_user_tables: + query: "SELECT current_database() datname, schemaname, relname, heap_blks_read, heap_blks_hit, idx_blks_read, idx_blks_hit, toast_blks_read, toast_blks_hit, tidx_blks_read, tidx_blks_hit FROM pg_statio_user_tables" + metrics: + - datname: + usage: "LABEL" + description: "Name of current database" + - schemaname: + usage: "LABEL" + description: "Name of the schema that this table is in" + - relname: + usage: "LABEL" + description: "Name of this table" + - heap_blks_read: + usage: "COUNTER" + description: "Number of disk blocks read from this table" + - heap_blks_hit: + usage: "COUNTER" + description: "Number of buffer hits in this table" + - idx_blks_read: + usage: "COUNTER" + description: "Number of disk blocks read from all indexes on this table" + - idx_blks_hit: + usage: "COUNTER" + description: "Number of buffer hits in all indexes on this table" + - toast_blks_read: + usage: "COUNTER" + description: "Number of disk blocks read from this table's TOAST table (if any)" + - toast_blks_hit: + usage: "COUNTER" + description: "Number of buffer hits in this table's TOAST table (if any)" + - tidx_blks_read: + usage: "COUNTER" + description: "Number of disk blocks read from this table's TOAST table indexes (if any)" + - tidx_blks_hit: + usage: "COUNTER" + description: "Number of buffer hits in this table's TOAST table indexes (if any)" + +pg_database: + query: "SELECT pg_database.datname, pg_database_size(pg_database.datname) as size FROM pg_database" + master: true + cache_seconds: 30 + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + +pg_stat_statements: + query: "SELECT t2.rolname, t3.datname, queryid, calls, total_time / 1000 as total_time_seconds, min_time / 1000 as min_time_seconds, max_time / 1000 as max_time_seconds, mean_time / 1000 as mean_time_seconds, stddev_time / 1000 as stddev_time_seconds, rows, shared_blks_hit, shared_blks_read, shared_blks_dirtied, shared_blks_written, local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written, temp_blks_read, temp_blks_written, blk_read_time / 1000 as blk_read_time_seconds, blk_write_time / 1000 as blk_write_time_seconds FROM pg_stat_statements t1 join pg_roles t2 on (t1.userid=t2.oid) join pg_database t3 on (t1.dbid=t3.oid)" + master: true + metrics: + - rolname: + usage: "LABEL" + description: "Name of user" + - datname: + usage: "LABEL" + description: "Name of database" + - queryid: + usage: "LABEL" + description: "Query ID" + - calls: + usage: "COUNTER" + description: "Number of times executed" + - total_time_seconds: + usage: "COUNTER" + description: "Total time spent in the statement, in milliseconds" + - min_time_seconds: + usage: "GAUGE" + description: "Minimum time spent in the statement, in milliseconds" + - max_time_seconds: + usage: "GAUGE" + description: "Maximum time spent in the statement, in milliseconds" + - mean_time_seconds: + usage: "GAUGE" + description: "Mean time spent in the statement, in milliseconds" + - stddev_time_seconds: + usage: "GAUGE" + description: "Population standard deviation of time spent in the statement, in milliseconds" + - rows: + usage: "COUNTER" + description: "Total number of rows retrieved or affected by the statement" + - shared_blks_hit: + usage: "COUNTER" + description: "Total number of shared block cache hits by the statement" + - shared_blks_read: + usage: "COUNTER" + description: "Total number of shared blocks read by the statement" + - shared_blks_dirtied: + usage: "COUNTER" + description: "Total number of shared blocks dirtied by the statement" + - shared_blks_written: + usage: "COUNTER" + description: "Total number of shared blocks written by the statement" + - local_blks_hit: + usage: "COUNTER" + description: "Total number of local block cache hits by the statement" + - local_blks_read: + usage: "COUNTER" + description: "Total number of local blocks read by the statement" + - local_blks_dirtied: + usage: "COUNTER" + description: "Total number of local blocks dirtied by the statement" + - local_blks_written: + usage: "COUNTER" + description: "Total number of local blocks written by the statement" + - temp_blks_read: + usage: "COUNTER" + description: "Total number of temp blocks read by the statement" + - temp_blks_written: + usage: "COUNTER" + description: "Total number of temp blocks written by the statement" + - blk_read_time_seconds: + usage: "COUNTER" + description: "Total time the statement spent reading blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)" + - blk_write_time_seconds: + usage: "COUNTER" + description: "Total time the statement spent writing blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)" diff --git a/tools/.gitignore b/tools/.gitignore new file mode 100644 index 0000000..87ef946 --- /dev/null +++ b/tools/.gitignore @@ -0,0 +1,4 @@ +/pkg +/bin +/tools.deps +/metatools.deps diff --git a/tools/README.md b/tools/README.md new file mode 100644 index 0000000..aa31adb --- /dev/null +++ b/tools/README.md @@ -0,0 +1,9 @@ + +Vendored versions of the build tooling. + +gocovmerge is used to merge coverage reports for uploading to a service like +coveralls, and gometalinter conveniently incorporates multiple Go linters. + +By vendoring both, we gain a self-contained build system. + +Run `make all` to build, and `make update` to update. diff --git a/tools/vendor/vendor.json b/tools/vendor/vendor.json new file mode 100644 index 0000000..6e8eed9 --- /dev/null +++ b/tools/vendor/vendor.json @@ -0,0 +1,803 @@ +{ + "comment": "", + "ignore": "test", + "package": [ + { + "checksumSHA1": "4Tc07iR3HloUYC4HNT4xc0875WY=", + "path": "github.com/Bowery/prompt", + "revision": "0f1139e9a1c74b57ccce6bdb3cd2f7cd04dd3449", + "revisionTime": "2017-02-19T07:16:37Z" + }, + { + "checksumSHA1": "LnZqwaKHuOH0bcpDUrqrcGvER/o=", + "path": "github.com/GoASTScanner/gas", + "revision": "1d9f816ca5d8224320a72b8aefbdb6f5d3692da6", + "revisionTime": "2018-03-05T12:20:24Z" + }, + { + "checksumSHA1": "Z03LJp4+mkPL1JTZzzizqWdtZSs=", + "path": "github.com/GoASTScanner/gas/cmd/gas", + "revision": "1d9f816ca5d8224320a72b8aefbdb6f5d3692da6", + "revisionTime": "2018-03-05T12:20:24Z" + }, + { + "checksumSHA1": "sK1dOo48F424xLCvE+ic8tRk7i8=", + "path": "github.com/GoASTScanner/gas/core", + "revision": "1beec25f7754273c9672a3368ea7048d4e73138e", + "revisionTime": "2017-04-11T19:38:53Z" + }, + { + "checksumSHA1": "ZSTQB9oOviIo0K+41PBciAFINHU=", + "path": "github.com/GoASTScanner/gas/output", + "revision": "1d9f816ca5d8224320a72b8aefbdb6f5d3692da6", + "revisionTime": "2018-03-05T12:20:24Z" + }, + { + "checksumSHA1": "VQoUd/3JzI8CQdaLrAVN723MGZM=", + "path": "github.com/GoASTScanner/gas/rules", + "revision": "1d9f816ca5d8224320a72b8aefbdb6f5d3692da6", + "revisionTime": "2018-03-05T12:20:24Z" + }, + { + "checksumSHA1": "cItvKwnl+gkO2j0Q2964efC+vTw=", + "path": "github.com/alecthomas/gocyclo", + "revision": "aa8f8b160214d8dfccfe3e17e578dd0fcc6fede7", + "revisionTime": "2015-02-08T22:17:26Z" + }, + { + "checksumSHA1": "xEwn4Ufny5rQEJ8f9hao7gkd86g=", + "path": "github.com/alecthomas/gometalinter", + "revision": "39a4757a714702004d3fdca45ff83af4dc484af9", + "revisionTime": "2018-02-23T20:08:23Z" + }, + { + "checksumSHA1": "fCc3grA7vIxfBru7R3SqjcW+oLI=", + "path": "github.com/alecthomas/units", + "revision": "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a", + "revisionTime": "2015-10-22T06:55:26Z" + }, + { + "checksumSHA1": "kjygOPbr5jsND2nU4NJnyyDRVF8=", + "path": "github.com/alexflint/go-arg", + "revision": "cef6506c97e5731da728c374ff3523e481026423", + "revisionTime": "2017-03-30T21:10:29Z" + }, + { + "checksumSHA1": "M5vBatiAKUjyLYrb9nS+6QbpEjE=", + "path": "github.com/alexflint/go-scalar", + "revision": "e80c3b7ed292b052c7083b6fd7154a8422c33f65", + "revisionTime": "2017-02-16T02:04:25Z" + }, + { + "checksumSHA1": "rDM1YOCSZE4BLxZoBJV56/VmZSo=", + "path": "github.com/alexkohler/nakedret", + "revision": "c0e305a4f690fed163d47628bcc06a6d5655bf92", + "revisionTime": "2017-11-06T22:32:15Z" + }, + { + "checksumSHA1": "z6mKUmWeXRT0k+xrXxA5CLKOWiE=", + "path": "github.com/client9/misspell", + "revision": "1d9ab7749ee27131547244ff2f9953d235b591fb", + "revisionTime": "2017-05-30T22:15:07Z" + }, + { + "checksumSHA1": "3Lbx+qNi8brwKa9dU41O6SEOW6c=", + "path": "github.com/client9/misspell/cmd/misspell", + "revision": "9ce5d979ffdaca6385988d7ad1079a33ec942d20", + "revisionTime": "2017-09-28T00:02:06Z" + }, + { + "checksumSHA1": "ULnk7ggN82JFO0ZdBCmSsQH3Vh8=", + "path": "github.com/dchest/safefile", + "revision": "855e8d98f1852d48dde521e0522408d1fe7e836a", + "revisionTime": "2015-10-22T10:31:44Z" + }, + { + "checksumSHA1": "aLXmB5i+PFfM/dVqfHxoB3a8v/g=", + "path": "github.com/dnephin/govet", + "revision": "4a96d43e39d340b63daa8bc5576985aa599885f6", + "revisionTime": "2017-10-12T18:51:37Z", + "version": "fork", + "versionExact": "fork" + }, + { + "checksumSHA1": "Ba6cj2wCpDZcjE0kZ4Q32PJW4fg=", + "path": "github.com/dnephin/govet/internal/cfg", + "revision": "4a96d43e39d340b63daa8bc5576985aa599885f6", + "revisionTime": "2017-10-12T18:51:37Z", + "version": "fork", + "versionExact": "fork" + }, + { + "checksumSHA1": "ttQiZmni3k7Tbfi4/CX3JG6NjAw=", + "path": "github.com/dnephin/govet/internal/whitelist", + "revision": "4a96d43e39d340b63daa8bc5576985aa599885f6", + "revisionTime": "2017-10-12T18:51:37Z", + "version": "fork", + "versionExact": "fork" + }, + { + "checksumSHA1": "+U50xksZS8g53vruOP7px7cLilE=", + "path": "github.com/golang/lint", + "revision": "c5fb716d6688a859aae56d26d3e6070808df29f7", + "revisionTime": "2017-06-02T23:41:31Z" + }, + { + "checksumSHA1": "SsCcmchQUYbzaKLaJ0zfrd9DdrI=", + "path": "github.com/golang/lint/golint", + "revision": "fb4f8c1d3a179654f93ef7e91d68fc7b1de6e88f", + "revisionTime": "2018-03-01T17:26:52Z" + }, + { + "checksumSHA1": "e/Kc2UOy1lKAy31xWlK37M1r2e8=", + "path": "github.com/google/shlex", + "revision": "6f45313302b9c56850fc17f99e40caebce98c716", + "revisionTime": "2015-01-27T13:39:51Z" + }, + { + "checksumSHA1": "TKaX+8YCZQwYv7Kiy2o/E86V0aE=", + "path": "github.com/gordonklaus/ineffassign", + "revision": "7bae11eba15a3285c75e388f77eb6357a2d73ee2", + "revisionTime": "2017-11-18T19:06:32Z" + }, + { + "checksumSHA1": "DbSCKltce7IrgpDUF8+C7J+z+GU=", + "path": "github.com/jgautheron/goconst", + "revision": "6a7633b712b6fb1d6821d33851d086a1d545dacd", + "revisionTime": "2016-05-14T19:25:19Z" + }, + { + "checksumSHA1": "0tPXJ5Wul0FXiUDwVWsd/RA3tWg=", + "path": "github.com/jgautheron/goconst/cmd/goconst", + "revision": "9740945f5dcb78c2faa8eedcce78c2a04aa6e1e9", + "revisionTime": "2017-07-03T17:01:52Z" + }, + { + "checksumSHA1": "NKvKUGq0lp/GjLS7Ffp7BAjcoTg=", + "path": "github.com/kardianos/govendor", + "revision": "c5ee5dc32350319e3423e570818eaa818601b789", + "revisionTime": "2018-02-09T21:39:04Z" + }, + { + "checksumSHA1": "m24kWw3bFoAkKVvTjmxSLsywdHY=", + "path": "github.com/kardianos/govendor/cliprompt", + "revision": "c86c10d612bf08e847456ce91d495eb69ad87087", + "revisionTime": "2017-05-06T05:20:04Z" + }, + { + "checksumSHA1": "/6r+luJ0EK07RknNd0zrubHtMuQ=", + "path": "github.com/kardianos/govendor/context", + "revision": "c86c10d612bf08e847456ce91d495eb69ad87087", + "revisionTime": "2017-05-06T05:20:04Z" + }, + { + "checksumSHA1": "Y0WMEgLxFAzHAIxFViFSWh7dqqY=", + "path": "github.com/kardianos/govendor/help", + "revision": "c86c10d612bf08e847456ce91d495eb69ad87087", + "revisionTime": "2017-05-06T05:20:04Z" + }, + { + "checksumSHA1": "SPRzsXaOsg9dENhLfHRjmFMmGQM=", + "path": "github.com/kardianos/govendor/internal/pathos", + "revision": "c86c10d612bf08e847456ce91d495eb69ad87087", + "revisionTime": "2017-05-06T05:20:04Z" + }, + { + "checksumSHA1": "Bl6I6yMiK60dzOAfyO7As6MSPIk=", + "path": "github.com/kardianos/govendor/internal/vfilepath", + "revision": "c86c10d612bf08e847456ce91d495eb69ad87087", + "revisionTime": "2017-05-06T05:20:04Z" + }, + { + "checksumSHA1": "2Vg+J79rEhmtnprErQ7fTZdneIk=", + "path": "github.com/kardianos/govendor/internal/vos", + "revision": "c86c10d612bf08e847456ce91d495eb69ad87087", + "revisionTime": "2017-05-06T05:20:04Z" + }, + { + "checksumSHA1": "rpK9ccIJkLV4IbKb3lUjUo5DSfU=", + "path": "github.com/kardianos/govendor/migrate", + "revision": "c86c10d612bf08e847456ce91d495eb69ad87087", + "revisionTime": "2017-05-06T05:20:04Z" + }, + { + "checksumSHA1": "wL4SaLS/HTn32Gmq8kpYRr/cn68=", + "path": "github.com/kardianos/govendor/pkgspec", + "revision": "c86c10d612bf08e847456ce91d495eb69ad87087", + "revisionTime": "2017-05-06T05:20:04Z" + }, + { + "checksumSHA1": "I9oQBOtXoOinofTJrBE+zI+vDCs=", + "path": "github.com/kardianos/govendor/prompt", + "revision": "c86c10d612bf08e847456ce91d495eb69ad87087", + "revisionTime": "2017-05-06T05:20:04Z" + }, + { + "checksumSHA1": "tXbK0YAL7/ZrLWkokBrLdp30xjw=", + "path": "github.com/kardianos/govendor/run", + "revision": "c86c10d612bf08e847456ce91d495eb69ad87087", + "revisionTime": "2017-05-06T05:20:04Z" + }, + { + "checksumSHA1": "ZDlz1nWDmErU501lCChKbTT3kEs=", + "path": "github.com/kardianos/govendor/vcs", + "revision": "c86c10d612bf08e847456ce91d495eb69ad87087", + "revisionTime": "2017-05-06T05:20:04Z" + }, + { + "checksumSHA1": "oXa3HaUXhXktMp4C0xTOSrzPDTE=", + "path": "github.com/kardianos/govendor/vendorfile", + "revision": "c86c10d612bf08e847456ce91d495eb69ad87087", + "revisionTime": "2017-05-06T05:20:04Z" + }, + { + "checksumSHA1": "QSxPR3g/AtzGMJSGrdHH6bBQnTc=", + "path": "github.com/kisielk/errcheck", + "revision": "8050dd7cc11578becd8622667107bb21a7baf451", + "revisionTime": "2018-03-03T00:00:09Z" + }, + { + "checksumSHA1": "GP25rgIPshJh0tpiBg3Z8Dexqj4=", + "path": "github.com/kisielk/errcheck/internal/errcheck", + "revision": "23699b7e2cbfdb89481023524954ba2aeff6be90", + "revisionTime": "2017-03-17T17:34:29Z" + }, + { + "checksumSHA1": "9fvV44Csmu+K5BpFvEBs2p8alBI=", + "path": "github.com/kisielk/gotool", + "revision": "0de1eaf82fa3f583ce21fde859f1e7e0c5e9b220", + "revisionTime": "2016-11-30T08:01:11Z" + }, + { + "checksumSHA1": "Us06jbfYQlapYdo8mO94mQMy22o=", + "path": "github.com/mattn/goveralls", + "revision": "a419d25dbaefa70d50cfbf5fbd2fc2f047bf95d2", + "revisionTime": "2018-03-01T14:36:12Z" + }, + { + "checksumSHA1": "dk0ehYSmMaGLWFQPND3cVgk744I=", + "path": "github.com/mdempsky/maligned", + "revision": "08c8e9db1bce03f1af283686c0943fcb75f0109e", + "revisionTime": "2016-08-25T09:47:39Z" + }, + { + "checksumSHA1": "90pFJb64MwgYvN5AmPVaWl87ZyU=", + "path": "github.com/mdempsky/unconvert", + "revision": "beb68d938016d2dec1d1b078054f4d3db25f97be", + "revisionTime": "2016-08-03T23:01:54Z" + }, + { + "checksumSHA1": "k3eGAQ+pCIffVpgvoBrPuLK6Yz8=", + "path": "github.com/mibk/dupl", + "revision": "72dc2d83bec70e053e9294378aacb1a032f51a31", + "revisionTime": "2017-11-19T16:48:37Z" + }, + { + "checksumSHA1": "sHi3Qhc2/0XMcIXB31NLXkbIoz0=", + "path": "github.com/mibk/dupl/job", + "revision": "3447d9b0cb5a3e7dccb1a9f1c975f35683f304e6", + "revisionTime": "2017-02-27T22:14:17Z" + }, + { + "checksumSHA1": "VZ5EU9NrZck4UX3OpC9YK/gB/A4=", + "path": "github.com/mibk/dupl/output", + "revision": "3447d9b0cb5a3e7dccb1a9f1c975f35683f304e6", + "revisionTime": "2017-02-27T22:14:17Z" + }, + { + "checksumSHA1": "mGQ3tVbY9uLwfwoeQjvBBBm7yRw=", + "path": "github.com/mibk/dupl/printer", + "revision": "72dc2d83bec70e053e9294378aacb1a032f51a31", + "revisionTime": "2017-11-19T16:48:37Z" + }, + { + "checksumSHA1": "o9BJwhna5BuCTYWBGBo9VA+Ez/M=", + "path": "github.com/mibk/dupl/suffixtree", + "revision": "3447d9b0cb5a3e7dccb1a9f1c975f35683f304e6", + "revisionTime": "2017-02-27T22:14:17Z" + }, + { + "checksumSHA1": "HveZ42ihDCZQumgGFMQIs8Nendg=", + "path": "github.com/mibk/dupl/syntax", + "revision": "3447d9b0cb5a3e7dccb1a9f1c975f35683f304e6", + "revisionTime": "2017-02-27T22:14:17Z" + }, + { + "checksumSHA1": "YuBPssHbL/iU+1poNlfBDl2IqG4=", + "path": "github.com/mibk/dupl/syntax/golang", + "revision": "3447d9b0cb5a3e7dccb1a9f1c975f35683f304e6", + "revisionTime": "2017-02-27T22:14:17Z" + }, + { + "checksumSHA1": "GtTRl0HhAPDjp+s02RJnFg3znZ0=", + "path": "github.com/mvdan/interfacer", + "revision": "22c51662ff476dfd97944f74db1b263ed920ee83", + "revisionTime": "2017-04-06T16:05:15Z" + }, + { + "checksumSHA1": "zD/VW+BRbOjxk1xq5bmdigi0cp8=", + "path": "github.com/mvdan/interfacer/cmd/interfacer", + "revision": "22c51662ff476dfd97944f74db1b263ed920ee83", + "revisionTime": "2017-04-06T16:05:15Z" + }, + { + "checksumSHA1": "18GDIJCo0vo+mmQDIYmyb2JSWqo=", + "path": "github.com/mvdan/lint", + "revision": "c9cbe299b369cbfea16318baaa037b19a69e45d2", + "revisionTime": "2017-04-06T10:09:31Z" + }, + { + "checksumSHA1": "5LiZtu67exUdRJ0/QQvU/epG9no=", + "path": "github.com/mvdan/unparam", + "revision": "d647bb803b10a6777ee4c6a176416b91fa14713e", + "revisionTime": "2017-05-30T08:59:07Z" + }, + { + "checksumSHA1": "tuOLCrGa9DjfXheKkMXtHtQu3bs=", + "path": "github.com/mvdan/unparam/check", + "revision": "d647bb803b10a6777ee4c6a176416b91fa14713e", + "revisionTime": "2017-05-30T08:59:07Z" + }, + { + "checksumSHA1": "DP8R0Q7TDlHbhz9Livyj8RkRKvU=", + "path": "github.com/nbutton23/zxcvbn-go", + "revision": "a22cb81b2ecdde8b68e9ffb8824731cbf88e1de4", + "revisionTime": "2016-06-27T00:44:24Z" + }, + { + "checksumSHA1": "HEqKoRuKJ86gic8DPZqtDKZNo7E=", + "path": "github.com/nbutton23/zxcvbn-go/adjacency", + "revision": "a22cb81b2ecdde8b68e9ffb8824731cbf88e1de4", + "revisionTime": "2016-06-27T00:44:24Z" + }, + { + "checksumSHA1": "etc47rBuvFfzUZ7n8EDLQiDQeXU=", + "path": "github.com/nbutton23/zxcvbn-go/data", + "revision": "a22cb81b2ecdde8b68e9ffb8824731cbf88e1de4", + "revisionTime": "2016-06-27T00:44:24Z" + }, + { + "checksumSHA1": "/NSHii4ih+43IBnoXrjAtIzTtPI=", + "path": "github.com/nbutton23/zxcvbn-go/entropy", + "revision": "a22cb81b2ecdde8b68e9ffb8824731cbf88e1de4", + "revisionTime": "2016-06-27T00:44:24Z" + }, + { + "checksumSHA1": "VyH3r1FJcSB13wj0T812EMASq1Q=", + "path": "github.com/nbutton23/zxcvbn-go/frequency", + "revision": "a22cb81b2ecdde8b68e9ffb8824731cbf88e1de4", + "revisionTime": "2016-06-27T00:44:24Z" + }, + { + "checksumSHA1": "U6O/H84jE24jhCSOgi+IsjSPomM=", + "path": "github.com/nbutton23/zxcvbn-go/match", + "revision": "a22cb81b2ecdde8b68e9ffb8824731cbf88e1de4", + "revisionTime": "2016-06-27T00:44:24Z" + }, + { + "checksumSHA1": "BHO5wnIg2NQTYSILHmpezEJFv4E=", + "path": "github.com/nbutton23/zxcvbn-go/matching", + "revision": "a22cb81b2ecdde8b68e9ffb8824731cbf88e1de4", + "revisionTime": "2016-06-27T00:44:24Z" + }, + { + "checksumSHA1": "vRXGbBJSonwD03A/WAkhNkYNY38=", + "path": "github.com/nbutton23/zxcvbn-go/scoring", + "revision": "a22cb81b2ecdde8b68e9ffb8824731cbf88e1de4", + "revisionTime": "2016-06-27T00:44:24Z" + }, + { + "checksumSHA1": "cEdCjSL9cNJm5o+nGwPM3WgloyM=", + "path": "github.com/nbutton23/zxcvbn-go/utils/math", + "revision": "a22cb81b2ecdde8b68e9ffb8824731cbf88e1de4", + "revisionTime": "2016-06-27T00:44:24Z" + }, + { + "checksumSHA1": "uEc9/1HbYGeK7wPStF6FmUlfzGE=", + "path": "github.com/nicksnyder/go-i18n/i18n", + "revision": "3e70a1a463008cea6726380c908b1a6a8bdf7b24", + "revisionTime": "2017-05-12T15:20:54Z" + }, + { + "checksumSHA1": "gDe7nlx3FyCVxLkARgl0VAntDRk=", + "path": "github.com/nicksnyder/go-i18n/i18n/bundle", + "revision": "3e70a1a463008cea6726380c908b1a6a8bdf7b24", + "revisionTime": "2017-05-12T15:20:54Z" + }, + { + "checksumSHA1": "+XOg99I1zdmBRUb04ZswvzQ2WS0=", + "path": "github.com/nicksnyder/go-i18n/i18n/language", + "revision": "3e70a1a463008cea6726380c908b1a6a8bdf7b24", + "revisionTime": "2017-05-12T15:20:54Z" + }, + { + "checksumSHA1": "WZOU406In2hs8FJOHWqV8PWkJKs=", + "path": "github.com/nicksnyder/go-i18n/i18n/translation", + "revision": "3e70a1a463008cea6726380c908b1a6a8bdf7b24", + "revisionTime": "2017-05-12T15:20:54Z" + }, + { + "checksumSHA1": "rpXu/2iiIGcK3KMKqXfko3g6rdk=", + "path": "github.com/opennota/check/cmd/aligncheck", + "revision": "11e2eec79ec4f789607e3efbf405cdca2504d4cb", + "revisionTime": "2017-04-02T03:17:31Z" + }, + { + "checksumSHA1": "eWl/ySoMqPr+Q9p9smYNkTgXu2w=", + "path": "github.com/opennota/check/cmd/structcheck", + "revision": "86da7ade2cccfc1c5d6beeb55e5c65eba54f5f3c", + "revisionTime": "2018-01-21T06:50:09Z" + }, + { + "checksumSHA1": "2NeV5byYMgK2g1GLWiqQWwt/OzE=", + "path": "github.com/opennota/check/cmd/varcheck", + "revision": "86da7ade2cccfc1c5d6beeb55e5c65eba54f5f3c", + "revisionTime": "2018-01-21T06:50:09Z" + }, + { + "checksumSHA1": "F1IYMLBLAZaTOWnmXsgaxTGvrWI=", + "path": "github.com/pelletier/go-buffruneio", + "revision": "c37440a7cf42ac63b919c752ca73a85067e05992", + "revisionTime": "2017-02-27T22:03:11Z" + }, + { + "checksumSHA1": "vHrGGP777P2fqQHr2IYwNVVRQ/o=", + "path": "github.com/pelletier/go-toml", + "revision": "fe7536c3dee2596cdd23ee9976a17c22bdaae286", + "revisionTime": "2017-06-02T06:55:32Z" + }, + { + "checksumSHA1": "rJab1YdNhQooDiBWNnt7TLWPyBU=", + "path": "github.com/pkg/errors", + "revision": "c605e284fe17294bda444b34710735b29d1a9d90", + "revisionTime": "2017-05-05T04:36:39Z" + }, + { + "checksumSHA1": "6JP37UqrI0H80Gpk0Y2P+KXgn5M=", + "path": "github.com/ryanuber/go-glob", + "revision": "256dc444b735e061061cf46c809487313d5b0065", + "revisionTime": "2017-01-28T01:21:29Z" + }, + { + "checksumSHA1": "PMpzEhKo6usb71Qsby+a8uZMgBw=", + "path": "github.com/stripe/safesql", + "revision": "cddf355596fe2dbae05b4b5f845b4a6e2fb4e818", + "revisionTime": "2017-12-21T19:52:08Z" + }, + { + "checksumSHA1": "9YtB2Xi9YK/scfhUOjgxmjoaqUw=", + "path": "github.com/tmthrgd/go-bindata", + "revision": "40f4993ede74f673cfe96bed75ef8513a389a00a", + "revisionTime": "2017-11-30T10:15:03Z" + }, + { + "checksumSHA1": "JpZW4NtMSnXZ7T7rug7JEYgeHKc=", + "path": "github.com/tmthrgd/go-bindata/go-bindata", + "revision": "40f4993ede74f673cfe96bed75ef8513a389a00a", + "revisionTime": "2017-11-30T10:15:03Z" + }, + { + "checksumSHA1": "/XExakIFq9PUOjkjlMpe7T/Ps+8=", + "path": "github.com/tmthrgd/go-bindata/internal/identifier", + "revision": "40f4993ede74f673cfe96bed75ef8513a389a00a", + "revisionTime": "2017-11-30T10:15:03Z" + }, + { + "checksumSHA1": "fZaFaXc4iKu9PXl8xrmK3RrZpIY=", + "path": "github.com/tsenart/deadcode", + "revision": "210d2dc333e90c7e3eedf4f2242507a8e83ed4ab", + "revisionTime": "2016-07-24T21:28:37Z" + }, + { + "checksumSHA1": "ih4CCYD19rjjF9fjid+l7w/+cIg=", + "path": "github.com/wadey/gocovmerge", + "revision": "b5bfa59ec0adc420475f97f89b58045c721d761c", + "revisionTime": "2016-03-31T18:18:00Z" + }, + { + "checksumSHA1": "g27xFm/EIghjjcT3DuGt976CgNo=", + "path": "github.com/walle/lll", + "revision": "8b13b3fbf7312913fcfdbfa78997b9bd1dbb11af", + "revisionTime": "2016-07-02T15:04:58Z" + }, + { + "checksumSHA1": "V74uq4M+82grbD85c6TQ3JyMCL4=", + "path": "github.com/walle/lll/cmd/lll", + "revision": "8b13b3fbf7312913fcfdbfa78997b9bd1dbb11af", + "revisionTime": "2016-07-02T15:04:58Z" + }, + { + "checksumSHA1": "S32hhkopTwtHKbri0u4mwxV0UqQ=", + "path": "golang.org/x/lint", + "revision": "fb4f8c1d3a179654f93ef7e91d68fc7b1de6e88f", + "revisionTime": "2018-03-01T17:26:52Z" + }, + { + "checksumSHA1": "PugQbLLjnbBSj+NOXRYBVRnLuuQ=", + "path": "golang.org/x/sys/unix", + "revision": "b90f89a1e7a9c1f6b918820b3daa7f08488c8594", + "revisionTime": "2017-05-29T13:44:53Z" + }, + { + "checksumSHA1": "ziMb9+ANGRJSSIuxYdRbA+cDRBQ=", + "path": "golang.org/x/text/transform", + "revision": "ccbd3f7822129ff389f8ca4858a9b9d4d910531c", + "revisionTime": "2017-05-18T06:42:59Z" + }, + { + "checksumSHA1": "aCXemG0knLp8YJedta7fYAIiX/8=", + "path": "golang.org/x/text/width", + "revision": "ccbd3f7822129ff389f8ca4858a9b9d4d910531c", + "revisionTime": "2017-05-18T06:42:59Z" + }, + { + "checksumSHA1": "V4M/6A62nVBzPFxPbN+EAatCrVs=", + "path": "golang.org/x/tools/cmd/goimports", + "revision": "9f6d4ad827bbe70b5f5c8db2d3d279ea0a2767ad", + "revisionTime": "2018-02-17T07:00:07Z" + }, + { + "checksumSHA1": "V6/A1ZOZ2GUOZcRWcXegtci2FoU=", + "path": "golang.org/x/tools/cmd/gotype", + "revision": "9f6d4ad827bbe70b5f5c8db2d3d279ea0a2767ad", + "revisionTime": "2018-02-17T07:00:07Z" + }, + { + "checksumSHA1": "nD89PLkMqA5CakR8SoDuj3iQz1M=", + "path": "golang.org/x/tools/container/intsets", + "revision": "2a5864fcfb595b4ee9a7607f1beb25778cf64c6e", + "revisionTime": "2017-03-22T18:59:57Z" + }, + { + "checksumSHA1": "O5eYI3n1WdaC30AxQjETe3dAQHU=", + "path": "golang.org/x/tools/cover", + "revision": "2a5864fcfb595b4ee9a7607f1beb25778cf64c6e", + "revisionTime": "2017-03-22T18:59:57Z" + }, + { + "checksumSHA1": "p3gWsy4fQOSXGRMUHr3TnmVFias=", + "path": "golang.org/x/tools/go/ast/astutil", + "revision": "2a5864fcfb595b4ee9a7607f1beb25778cf64c6e", + "revisionTime": "2017-03-22T18:59:57Z" + }, + { + "checksumSHA1": "AnXFEvmaJ7w2Q7hWPcLUmCbPgq0=", + "path": "golang.org/x/tools/go/buildutil", + "revision": "2a5864fcfb595b4ee9a7607f1beb25778cf64c6e", + "revisionTime": "2017-03-22T18:59:57Z" + }, + { + "checksumSHA1": "9evbcWFxUJMFmnXQ2ja5765p3iE=", + "path": "golang.org/x/tools/go/callgraph", + "revision": "2a5864fcfb595b4ee9a7607f1beb25778cf64c6e", + "revisionTime": "2017-03-22T18:59:57Z" + }, + { + "checksumSHA1": "XAetbnZ2wmiJ68+j0am4Hp7K3j8=", + "path": "golang.org/x/tools/go/callgraph/cha", + "revision": "2a5864fcfb595b4ee9a7607f1beb25778cf64c6e", + "revisionTime": "2017-03-22T18:59:57Z" + }, + { + "checksumSHA1": "d/01nwqyc48GkZ3eqEOMszzTwBE=", + "path": "golang.org/x/tools/go/callgraph/rta", + "revision": "73e16cff9e0d4a802937444bebb562458548241d", + "revisionTime": "2018-02-27T16:02:18Z" + }, + { + "checksumSHA1": "rSUfKH182TkCgMhJVsr84a19cbo=", + "path": "golang.org/x/tools/go/gcexportdata", + "revision": "2a5864fcfb595b4ee9a7607f1beb25778cf64c6e", + "revisionTime": "2017-03-22T18:59:57Z" + }, + { + "checksumSHA1": "o6uoZozSLnj3Ph+hj399ZPqJYhE=", + "path": "golang.org/x/tools/go/gcimporter15", + "revision": "2a5864fcfb595b4ee9a7607f1beb25778cf64c6e", + "revisionTime": "2017-03-22T18:59:57Z" + }, + { + "checksumSHA1": "3HnsDHAsl+izX3j9xpU6veKrWpk=", + "path": "golang.org/x/tools/go/loader", + "revision": "2a5864fcfb595b4ee9a7607f1beb25778cf64c6e", + "revisionTime": "2017-03-22T18:59:57Z" + }, + { + "checksumSHA1": "SzM7AWf+ZXc67vcI3jxvaD6iyM0=", + "path": "golang.org/x/tools/go/pointer", + "revision": "2a5864fcfb595b4ee9a7607f1beb25778cf64c6e", + "revisionTime": "2017-03-22T18:59:57Z" + }, + { + "checksumSHA1": "q0kKsRINLQjcGI4RVJ8//lmsHsc=", + "path": "golang.org/x/tools/go/ssa", + "revision": "2a5864fcfb595b4ee9a7607f1beb25778cf64c6e", + "revisionTime": "2017-03-22T18:59:57Z" + }, + { + "checksumSHA1": "FSjqqXMVKi4WoCqohpzt5z+6mMI=", + "path": "golang.org/x/tools/go/ssa/ssautil", + "revision": "2a5864fcfb595b4ee9a7607f1beb25778cf64c6e", + "revisionTime": "2017-03-22T18:59:57Z" + }, + { + "checksumSHA1": "QKvPv3TJ+ZnOLkUeUkT8Wm8eCV0=", + "path": "golang.org/x/tools/go/types/typeutil", + "revision": "2a5864fcfb595b4ee9a7607f1beb25778cf64c6e", + "revisionTime": "2017-03-22T18:59:57Z" + }, + { + "checksumSHA1": "TNJiDMoJEKYZyXo8Vkj37gqH5A0=", + "path": "golang.org/x/tools/go/vcs", + "revision": "2a5864fcfb595b4ee9a7607f1beb25778cf64c6e", + "revisionTime": "2017-03-22T18:59:57Z" + }, + { + "checksumSHA1": "DKr5TDU73FjKqiag3sAiVRKRfK0=", + "path": "golang.org/x/tools/imports", + "revision": "2a5864fcfb595b4ee9a7607f1beb25778cf64c6e", + "revisionTime": "2017-03-22T18:59:57Z" + }, + { + "checksumSHA1": "1FcU7G3PX7GVBLOrtxtvDWVvImo=", + "path": "gopkg.in/alecthomas/kingpin.v3-unstable", + "revision": "bd961acaef2390fc48159c3acaad41ef31833920", + "revisionTime": "2017-05-21T07:44:49Z" + }, + { + "checksumSHA1": "fALlQNY1fM99NesfLJ50KguWsio=", + "path": "gopkg.in/yaml.v2", + "revision": "cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b", + "revisionTime": "2017-04-07T17:21:22Z" + }, + { + "checksumSHA1": "FG8LnaSRTHBnrPHwa0zW4zX9K7M=", + "path": "honnef.co/go/tools/callgraph", + "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", + "revisionTime": "2017-11-25T07:40:24Z" + }, + { + "checksumSHA1": "fR7Q7BVwKHUEsUNGn6Q2zygAvTU=", + "path": "honnef.co/go/tools/callgraph/static", + "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", + "revisionTime": "2017-11-25T07:40:24Z" + }, + { + "checksumSHA1": "YL/UCzWYvDXeFInLOLC1buYve6w=", + "path": "honnef.co/go/tools/cmd/gosimple", + "revision": "8ed405e85c65fb38745a8eafe01ee9590523f172", + "revisionTime": "2018-01-10T22:45:03Z" + }, + { + "checksumSHA1": "84jyAI0Uv1PQ3fN3Ufi0T7/IpOw=", + "path": "honnef.co/go/tools/cmd/megacheck", + "revision": "8ed405e85c65fb38745a8eafe01ee9590523f172", + "revisionTime": "2018-01-10T22:45:03Z" + }, + { + "checksumSHA1": "dP4Ft0yiZSTZOzzNho1Gg5b7o2w=", + "path": "honnef.co/go/tools/cmd/staticcheck", + "revision": "8ed405e85c65fb38745a8eafe01ee9590523f172", + "revisionTime": "2018-01-10T22:45:03Z" + }, + { + "checksumSHA1": "Qipy1/3Z8n4UnoWF9X0sQ/VC5JI=", + "path": "honnef.co/go/tools/cmd/unused", + "revision": "8ed405e85c65fb38745a8eafe01ee9590523f172", + "revisionTime": "2018-01-10T22:45:03Z" + }, + { + "checksumSHA1": "smQXvyCgi0lsTRk7edZNx/z44rc=", + "path": "honnef.co/go/tools/deprecated", + "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", + "revisionTime": "2017-11-25T07:40:24Z" + }, + { + "checksumSHA1": "ZQAEQCc18o76M9Cyncm1W5cczJ8=", + "path": "honnef.co/go/tools/functions", + "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", + "revisionTime": "2017-11-25T07:40:24Z" + }, + { + "checksumSHA1": "NPXYxmyCQTv53OmGTCiHvbfIct4=", + "path": "honnef.co/go/tools/gcsizes", + "revision": "e94d1c1a34c6b61d8d06c7793b8f22cd0dfcdd90", + "revisionTime": "2017-05-22T19:09:05Z" + }, + { + "checksumSHA1": "ZWtH73AO33mmXmK2RfGwld1/00I=", + "path": "honnef.co/go/tools/internal/sharedcheck", + "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", + "revisionTime": "2017-11-25T07:40:24Z" + }, + { + "checksumSHA1": "44oONKG61hcaBAPaA2jNhBgYLmE=", + "path": "honnef.co/go/tools/lint", + "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", + "revisionTime": "2017-11-25T07:40:24Z" + }, + { + "checksumSHA1": "gKJKwlFyfVebwzqA3P/N3HJIq/0=", + "path": "honnef.co/go/tools/lint/lintutil", + "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", + "revisionTime": "2017-11-25T07:40:24Z" + }, + { + "checksumSHA1": "kkVszwWx3L3erU3QkMDIppFv34o=", + "path": "honnef.co/go/tools/simple", + "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", + "revisionTime": "2017-11-25T07:40:24Z" + }, + { + "checksumSHA1": "OB5QjdkxC9rYXruXUuoYSsxK+VY=", + "path": "honnef.co/go/tools/ssa", + "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", + "revisionTime": "2017-11-25T07:40:24Z" + }, + { + "checksumSHA1": "jgNTrcXg52qlqjkb/R2vKxtcDu4=", + "path": "honnef.co/go/tools/ssa/ssautil", + "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", + "revisionTime": "2017-11-25T07:40:24Z" + }, + { + "checksumSHA1": "9kqdoLEm2gHS9QVE1OXWBCtRqhI=", + "path": "honnef.co/go/tools/staticcheck", + "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", + "revisionTime": "2017-11-25T07:40:24Z" + }, + { + "checksumSHA1": "9u74dwwwi+tg9eBr86by4i4CMNM=", + "path": "honnef.co/go/tools/staticcheck/vrp", + "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", + "revisionTime": "2017-11-25T07:40:24Z" + }, + { + "checksumSHA1": "tDBL3athXaJ9JoiY75NktH+OTjQ=", + "path": "honnef.co/go/tools/unused", + "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", + "revisionTime": "2017-11-25T07:40:24Z" + }, + { + "checksumSHA1": "RY0sZkXnDI/MxBauBD28dwuulSs=", + "path": "honnef.co/go/tools/version", + "revision": "376b3b58b9e4def403181ee2fd3d4cc7de8375ae", + "revisionTime": "2017-11-25T07:40:24Z" + }, + { + "path": "local/numcpus", + "revision": "" + }, + { + "checksumSHA1": "FlLpgONxRMWkHp8H9c461RKJMhQ=", + "path": "mvdan.cc/interfacer", + "revision": "99221a8084d79b2e7419d4a6ddd9d8c7761eae6c", + "revisionTime": "2018-03-01T11:25:15Z" + }, + { + "checksumSHA1": "0+bmt/m62xZSbyATqBbp1MTy6ZI=", + "path": "mvdan.cc/interfacer/check", + "revision": "d7e7372184a059b8fd99d96a593e3811bf989d75", + "revisionTime": "2017-09-08T18:13:45Z" + }, + { + "checksumSHA1": "pCQUv3qVciM9V98kVNkOw1JWKzs=", + "path": "mvdan.cc/lint", + "revision": "adc824a0674b99099789b6188a058d485eaf61c0", + "revisionTime": "2017-09-08T18:12:59Z" + }, + { + "checksumSHA1": "BX0SRkBmSo6WoyfZtcw4ympOsI8=", + "path": "mvdan.cc/unparam", + "revision": "0c3aec22d8e6d9b51a978b31539c51fd52071488", + "revisionTime": "2018-03-01T11:27:09Z" + }, + { + "checksumSHA1": "aN6Bomg+fwd0GSfKYVgmPf0pd+I=", + "path": "mvdan.cc/unparam/check", + "revision": "0c3aec22d8e6d9b51a978b31539c51fd52071488", + "revisionTime": "2018-03-01T11:27:09Z" + } + ], + "rootPath": "github.com/wrouesnel/postgres_exporter/tools" +} diff --git a/vendor/vendor.json b/vendor/vendor.json new file mode 100644 index 0000000..3fb6755 --- /dev/null +++ b/vendor/vendor.json @@ -0,0 +1,357 @@ +{ + "comment": "", + "ignore": "test", + "package": [ + { + "checksumSHA1": "KmjnydoAbofMieIWm+it5OWERaM=", + "path": "github.com/alecthomas/template", + "revision": "a0175ee3bccc567396460bf5acd36800cb10c49c", + "revisionTime": "2016-04-05T07:15:01Z" + }, + { + "checksumSHA1": "3wt0pTXXeS+S93unwhGoLIyGX/Q=", + "path": "github.com/alecthomas/template/parse", + "revision": "a0175ee3bccc567396460bf5acd36800cb10c49c", + "revisionTime": "2016-04-05T07:15:01Z" + }, + { + "checksumSHA1": "fCc3grA7vIxfBru7R3SqjcW+oLI=", + "path": "github.com/alecthomas/units", + "revision": "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a", + "revisionTime": "2015-10-22T06:55:26Z" + }, + { + "checksumSHA1": "spyv5/YFBjYyZLZa1U2LBfDR8PM=", + "path": "github.com/beorn7/perks/quantile", + "revision": "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9", + "revisionTime": "2016-08-04T10:47:26Z" + }, + { + "checksumSHA1": "OT4XN9z5k69e2RsMSpwW74B+yk4=", + "path": "github.com/blang/semver", + "revision": "2ee87856327ba09384cabd113bc6b5d174e9ec0f", + "revisionTime": "2017-07-27T06:48:18Z" + }, + { + "checksumSHA1": "92dnVWesQCC1xueK1Du/6c+yLOk=", + "path": "github.com/dsnet/compress", + "revision": "cc9eb1d7ad760af14e8f918698f745e80377af4f", + "revisionTime": "2017-12-08T18:51:09Z" + }, + { + "checksumSHA1": "Q8Y8aBNAuiO4/HVyj9PRyBz50YM=", + "path": "github.com/dsnet/compress/bzip2", + "revision": "cc9eb1d7ad760af14e8f918698f745e80377af4f", + "revisionTime": "2017-12-08T18:51:09Z" + }, + { + "checksumSHA1": "rUK6wJzSweagbKHcRUU1TWkQq/0=", + "path": "github.com/dsnet/compress/bzip2/internal/sais", + "revision": "cc9eb1d7ad760af14e8f918698f745e80377af4f", + "revisionTime": "2017-12-08T18:51:09Z" + }, + { + "checksumSHA1": "u6VJ7jTVulLgPZaXKWCIHc4hbQs=", + "path": "github.com/dsnet/compress/internal", + "revision": "cc9eb1d7ad760af14e8f918698f745e80377af4f", + "revisionTime": "2017-12-08T18:51:09Z" + }, + { + "checksumSHA1": "KDfyyvx86cyY/HUA2SSWRWjn7yI=", + "path": "github.com/dsnet/compress/internal/errors", + "revision": "cc9eb1d7ad760af14e8f918698f745e80377af4f", + "revisionTime": "2017-12-08T18:51:09Z" + }, + { + "checksumSHA1": "Txyi+DYhWRT65KnJokyQWB2xj3A=", + "path": "github.com/dsnet/compress/internal/prefix", + "revision": "cc9eb1d7ad760af14e8f918698f745e80377af4f", + "revisionTime": "2017-12-08T18:51:09Z" + }, + { + "checksumSHA1": "yqF125xVSkmfLpIVGrLlfE05IUk=", + "path": "github.com/golang/protobuf/proto", + "revision": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9", + "revisionTime": "2017-10-21T04:39:52Z" + }, + { + "checksumSHA1": "p/8vSviYF91gFflhrt5vkyksroo=", + "path": "github.com/golang/snappy", + "revision": "553a641470496b2327abcac10b36396bd98e45c9", + "revisionTime": "2017-02-15T23:32:05Z" + }, + { + "checksumSHA1": "GKTFbGomCP1fhH7mFecvwKvh7bc=", + "path": "github.com/lib/pq", + "revision": "78223426e7c66d631117c0a9da1b7f3fde4d23a5", + "revisionTime": "2019-08-13T06:55:22Z" + }, + { + "checksumSHA1": "AU3fA8Sm33Vj9PBoRPSeYfxLRuE=", + "path": "github.com/lib/pq/oid", + "revision": "b609790bd85edf8e9ab7e0f8912750a786177bcf", + "revisionTime": "2017-10-22T19:20:43Z" + }, + { + "checksumSHA1": "n0MMCrKKsQuuhv7vLsrtRUGJVA8=", + "path": "github.com/lib/pq/scram", + "revision": "78223426e7c66d631117c0a9da1b7f3fde4d23a5", + "revisionTime": "2019-08-13T06:55:22Z" + }, + { + "checksumSHA1": "k3e1TD8wrhxfUUG3pQBb10ppNGA=", + "path": "github.com/magefile/mage", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "KODorM0Am1g55qObNz3jVOdRVFs=", + "path": "github.com/magefile/mage/build", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "jdM6DuMtXKrl42m0pM/1YOAPkxc=", + "path": "github.com/magefile/mage/mage", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "TkAemcxaY44gsEjO1BiBxwlEI4A=", + "path": "github.com/magefile/mage/mg", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "b1qY9BFtpJnIZEa8yvpJCRbOhRM=", + "path": "github.com/magefile/mage/parse", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "fEuDveZzYX6oqYOT9jqyZROun/Q=", + "path": "github.com/magefile/mage/parse/srcimporter", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "0/j3qlGc8fsWG42uIDZ5p8tVzPM=", + "path": "github.com/magefile/mage/sh", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "oAjx69UIs6F6hPh+2GQSBMaHAfc=", + "path": "github.com/magefile/mage/target", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "He+VtZO7BsPDCZhZtJ1IkNp629o=", + "path": "github.com/magefile/mage/types", + "revision": "81dbe7074be509fcdc5e496481a8e01276332745", + "revisionTime": "2018-02-12T16:24:26Z" + }, + { + "checksumSHA1": "bKMZjd2wPw13VwoE7mBeSv5djFA=", + "path": "github.com/matttproud/golang_protobuf_extensions/pbutil", + "revision": "c12348ce28de40eed0136aa2b644d0ee0650e56c", + "revisionTime": "2016-04-24T11:30:07Z" + }, + { + "checksumSHA1": "VqPwpjQKzPYZcTkqZOIk8b+gYqI=", + "path": "github.com/mholt/archiver", + "revision": "26cf5bb32d07aa4e8d0de15f56ce516f4641d7df", + "revisionTime": "2017-10-12T05:23:41Z" + }, + { + "checksumSHA1": "rz0k2HRJ9gx11wt/gqATiRd2qz8=", + "path": "github.com/nwaples/rardecode", + "revision": "e06696f847aeda6f39a8f0b7cdff193b7690aef6", + "revisionTime": "2017-03-13T01:07:58Z" + }, + { + "checksumSHA1": "xKzx54LbkghuMauevGWevn5ip3w=", + "path": "github.com/pierrec/lz4", + "revision": "ed8d4cc3b461464e69798080a0092bd028910298", + "revisionTime": "2018-01-13T15:17:03Z" + }, + { + "checksumSHA1": "zPWRjzsPeXCoqmidIcJtHbvrvRs=", + "path": "github.com/pierrec/xxHash/xxHash32", + "revision": "a0006b13c722f7f12368c00a3d3c2ae8a999a0c6", + "revisionTime": "2017-07-14T08:24:55Z" + }, + { + "checksumSHA1": "5dHjKxShYVWVB1Fb00dAnR6kqVk=", + "path": "github.com/prometheus/client_golang/prometheus", + "revision": "2641b987480bca71fb39738eb8c8b0d577cb1d76", + "revisionTime": "2019-06-07T14:56:44Z", + "version": "v0.9.4", + "versionExact": "v0.9.4" + }, + { + "checksumSHA1": "UBqhkyjCz47+S19MVTigxJ2VjVQ=", + "path": "github.com/prometheus/client_golang/prometheus/internal", + "revision": "2641b987480bca71fb39738eb8c8b0d577cb1d76", + "revisionTime": "2019-06-07T14:56:44Z", + "version": "v0.9.4", + "versionExact": "v0.9.4" + }, + { + "checksumSHA1": "V51yx4gq61QCD9clxnps792Eq2Y=", + "path": "github.com/prometheus/client_golang/prometheus/promhttp", + "revision": "2641b987480bca71fb39738eb8c8b0d577cb1d76", + "revisionTime": "2019-06-07T14:56:44Z", + "version": "v0.9.4", + "versionExact": "v0.9.4" + }, + { + "checksumSHA1": "DvwvOlPNAgRntBzt3b3OSRMS2N4=", + "path": "github.com/prometheus/client_model/go", + "revision": "6f3806018612930941127f2a7c6c453ba2c527d2", + "revisionTime": "2017-02-16T18:52:47Z" + }, + { + "checksumSHA1": "vA545Z9FkjGvIHBTAKQOE0nap/k=", + "path": "github.com/prometheus/common/expfmt", + "revision": "287d3e634a1e550c9e463dd7e5a75a422c614505", + "revisionTime": "2019-09-13T08:39:41Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" + }, + { + "checksumSHA1": "1Mhfofk+wGZ94M0+Bd98K8imPD4=", + "path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", + "revision": "287d3e634a1e550c9e463dd7e5a75a422c614505", + "revisionTime": "2019-09-13T08:39:41Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" + }, + { + "checksumSHA1": "MGnqHnmEqc1fjnYiWReSiW8C27A=", + "path": "github.com/prometheus/common/log", + "revision": "287d3e634a1e550c9e463dd7e5a75a422c614505", + "revisionTime": "2019-09-13T08:39:41Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" + }, + { + "checksumSHA1": "ccmMs+h9Jo8kE7izqsUkWShD4d0=", + "path": "github.com/prometheus/common/model", + "revision": "287d3e634a1e550c9e463dd7e5a75a422c614505", + "revisionTime": "2019-09-13T08:39:41Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" + }, + { + "checksumSHA1": "91KYK0SpvkaMJJA2+BcxbVnyRO0=", + "path": "github.com/prometheus/common/version", + "revision": "287d3e634a1e550c9e463dd7e5a75a422c614505", + "revisionTime": "2019-09-13T08:39:41Z", + "version": "v0.7.0", + "versionExact": "v0.7.0" + }, + { + "checksumSHA1": "WB7dFqkmD3R514xql9YM3ZP1dDM=", + "path": "github.com/prometheus/procfs", + "revision": "833678b5bb319f2d20a475cb165c6cc59c2cc77c", + "revisionTime": "2019-05-31T16:30:47Z", + "version": "v0.0.2", + "versionExact": "v0.0.2" + }, + { + "checksumSHA1": "Kmjs49lbjGmlgUPx3pks0tVDed0=", + "path": "github.com/prometheus/procfs/internal/fs", + "revision": "65bdadfa96aecebf4dcf888da995a29eab4fc964", + "revisionTime": "2019-05-28T16:49:32Z", + "version": "v0.0.1", + "versionExact": "v0.0.1" + }, + { + "checksumSHA1": "BYvROBsiyAXK4sq6yhDe8RgT4LM=", + "path": "github.com/sirupsen/logrus", + "revision": "89742aefa4b206dcf400792f3bd35b542998eb3b", + "revisionTime": "2017-08-22T13:27:46Z" + }, + { + "checksumSHA1": "2CJmLcvYL6KW7gp2xaSdorR4i54=", + "path": "github.com/tmthrgd/go-bindata/restore", + "revision": "40f4993ede74f673cfe96bed75ef8513a389a00a", + "revisionTime": "2017-11-30T10:15:03Z" + }, + { + "checksumSHA1": "qgMa75aMGbkFY0jIqqqgVnCUoNA=", + "path": "github.com/ulikunitz/xz", + "revision": "0c6b41e72360850ca4f98dc341fd999726ea007f", + "revisionTime": "2017-06-05T21:53:11Z" + }, + { + "checksumSHA1": "vjnTkzNrMs5Xj6so/fq0mQ6dT1c=", + "path": "github.com/ulikunitz/xz/internal/hash", + "revision": "0c6b41e72360850ca4f98dc341fd999726ea007f", + "revisionTime": "2017-06-05T21:53:11Z" + }, + { + "checksumSHA1": "m0pm57ASBK/CTdmC0ppRHO17mBs=", + "path": "github.com/ulikunitz/xz/internal/xlog", + "revision": "0c6b41e72360850ca4f98dc341fd999726ea007f", + "revisionTime": "2017-06-05T21:53:11Z" + }, + { + "checksumSHA1": "2vZw6zc8xuNlyVz2QKvdlNSZQ1U=", + "path": "github.com/ulikunitz/xz/lzma", + "revision": "0c6b41e72360850ca4f98dc341fd999726ea007f", + "revisionTime": "2017-06-05T21:53:11Z" + }, + { + "checksumSHA1": "nqWNlnMmVpt628zzvyo6Yv2CX5Q=", + "path": "golang.org/x/crypto/ssh/terminal", + "revision": "2509b142fb2b797aa7587dad548f113b2c0f20ce", + "revisionTime": "2017-10-23T14:45:55Z" + }, + { + "checksumSHA1": "ftE54xFDY2r5NPWskbK88BQPrb4=", + "path": "golang.org/x/sys/unix", + "revision": "a1a1f1746d156bbc9954f29134b20ed4ce2752f1", + "revisionTime": "2017-10-23T12:30:29Z" + }, + { + "checksumSHA1": "wGkVl9xZjgnLs/olurjDX2Yg8Xw=", + "path": "golang.org/x/sys/windows", + "revision": "a1a1f1746d156bbc9954f29134b20ed4ce2752f1", + "revisionTime": "2017-10-23T12:30:29Z" + }, + { + "checksumSHA1": "ZdFZFaXmCgEEaEhVPkyXrnhKhsg=", + "path": "golang.org/x/sys/windows/registry", + "revision": "a1a1f1746d156bbc9954f29134b20ed4ce2752f1", + "revisionTime": "2017-10-23T12:30:29Z" + }, + { + "checksumSHA1": "uVlUSSKplihZG7N+QJ6fzDZ4Kh8=", + "path": "golang.org/x/sys/windows/svc/eventlog", + "revision": "a1a1f1746d156bbc9954f29134b20ed4ce2752f1", + "revisionTime": "2017-10-23T12:30:29Z" + }, + { + "checksumSHA1": "3SZTatHIy9OTKc95YlVfXKnoySg=", + "path": "gopkg.in/alecthomas/kingpin.v2", + "revision": "1087e65c9441605df944fb12c33f0fe7072d18ca", + "revisionTime": "2017-07-27T04:22:29Z" + }, + { + "checksumSHA1": "CEFTYXtWmgSh+3Ik1NmDaJcz4E0=", + "path": "gopkg.in/check.v1", + "revision": "20d25e2804050c1cd24a7eea1e7a6447dd0e74ec", + "revisionTime": "2016-12-08T18:13:25Z" + }, + { + "checksumSHA1": "RDJpJQwkF012L6m/2BJizyOksNw=", + "path": "gopkg.in/yaml.v2", + "revision": "eb3733d160e74a9c7e442f435eb3bea458e1d19f", + "revisionTime": "2017-08-12T16:00:11Z" + } + ], + "rootPath": "github.com/wrouesnel/postgres_exporter" +}