mirror of
https://github.com/percona/percona-toolkit.git
synced 2025-09-20 02:44:58 +00:00
PT-1741 Migrated Go pt-mongo-tools to new driver
This commit is contained in:
28
src/go/.env
Normal file
28
src/go/.env
Normal file
@@ -0,0 +1,28 @@
|
||||
AWS_ACCESS_KEY_ID=AKIARXP3OARBNV35P5M5
|
||||
AWS_SECRET_ACCESS_KEY=atWkiyD4Bi/+KVuM+kvyA71ZcqTJW0bMVXHfuNTw
|
||||
GOCACHE=
|
||||
GOLANG_DOCKERHUB_TAG=
|
||||
TEST_MONGODB_ADMIN_USERNAME=admin
|
||||
TEST_MONGODB_ADMIN_PASSWORD=admin123456
|
||||
TEST_MONGODB_USERNAME=test
|
||||
TEST_MONGODB_PASSWORD=123456
|
||||
TEST_MONGODB_S1_RS=rs1
|
||||
TEST_MONGODB_STANDALONE_PORT=27017
|
||||
TEST_MONGODB_S1_PRIMARY_PORT=17001
|
||||
TEST_MONGODB_S1_SECONDARY1_PORT=17002
|
||||
TEST_MONGODB_S1_SECONDARY2_PORT=17003
|
||||
TEST_MONGODB_S2_RS=rs2
|
||||
TEST_MONGODB_S2_PRIMARY_PORT=17004
|
||||
TEST_MONGODB_S2_SECONDARY1_PORT=17005
|
||||
TEST_MONGODB_S2_SECONDARY2_PORT=17006
|
||||
TEST_MONGODB_S3_RS=rs3
|
||||
TEST_MONGODB_S3_PRIMARY_PORT=17021
|
||||
TEST_MONGODB_S3_SECONDARY1_PORT=17022
|
||||
TEST_MONGODB_S3_SECONDARY2_PORT=17023
|
||||
TEST_MONGODB_CONFIGSVR_RS=csReplSet
|
||||
TEST_MONGODB_CONFIGSVR1_PORT=17007
|
||||
TEST_MONGODB_CONFIGSVR2_PORT=17008
|
||||
TEST_MONGODB_CONFIGSVR3_PORT=17009
|
||||
TEST_MONGODB_MONGOS_PORT=17000
|
||||
TEST_PSMDB_VERSION=3.6
|
||||
TEST_MONGODB_FLAVOR=percona/percona-server-mongodb
|
@@ -11,11 +11,98 @@ BIN_DIR=$(shell git rev-parse --show-toplevel)/bin
|
||||
SRC_DIR=$(shell git rev-parse --show-toplevel)/src/go
|
||||
LDFLAGS="-X main.Version=${VERSION} -X main.Build=${BUILD} -X main.GoVersion=${GOVERSION} -s -w"
|
||||
|
||||
TEST_PSMDB_VERSION?=3.6
|
||||
TEST_MONGODB_FLAVOR?=percona/percona-server-mongodb
|
||||
TEST_MONGODB_ADMIN_USERNAME?=admin
|
||||
TEST_MONGODB_ADMIN_PASSWORD?=admin123456
|
||||
TEST_MONGODB_USERNAME?=test
|
||||
TEST_MONGODB_PASSWORD?=123456
|
||||
|
||||
TEST_MONGODB_STANDALONE_PORT?=27017
|
||||
|
||||
TEST_MONGODB_MONGOS_PORT?=17000
|
||||
|
||||
TEST_MONGODB_S1_RS?=rs1
|
||||
TEST_MONGODB_S1_PRIMARY_PORT?=17001
|
||||
TEST_MONGODB_S1_SECONDARY1_PORT?=17002
|
||||
TEST_MONGODB_S1_SECONDARY2_PORT?=17003
|
||||
|
||||
TEST_MONGODB_S2_RS?=rs2
|
||||
TEST_MONGODB_S2_PRIMARY_PORT?=17004
|
||||
TEST_MONGODB_S2_SECONDARY1_PORT?=17005
|
||||
TEST_MONGODB_S2_SECONDARY2_PORT?=17006
|
||||
|
||||
TEST_MONGODB_CONFIGSVR_RS?=csReplSet
|
||||
TEST_MONGODB_CONFIGSVR1_PORT?=17007
|
||||
TEST_MONGODB_CONFIGSVR2_PORT?=17008
|
||||
TEST_MONGODB_CONFIGSVR3_PORT?=17009
|
||||
|
||||
TEST_MONGODB_S3_RS?=rs3
|
||||
TEST_MONGODB_S3_PRIMARY_PORT?=17021
|
||||
TEST_MONGODB_S3_SECONDARY1_PORT?=17022
|
||||
TEST_MONGODB_S3_SECONDARY2_PORT?=17023
|
||||
|
||||
AWS_ACCESS_KEY_ID?=
|
||||
AWS_SECRET_ACCESS_KEY?=
|
||||
|
||||
MINIO_PORT=9000
|
||||
MINIO_ACCESS_KEY_ID=example00000
|
||||
MINIO_SECRET_ACCESS_KEY=secret00000
|
||||
export MINIO_ACCESS_KEY_ID
|
||||
export MINIO_SECRET_ACCESS_KEY
|
||||
|
||||
.PHONY: all style format build test vet tarball linux-amd64
|
||||
|
||||
$(GOUTILSDIR)/dep:
|
||||
go get -u github.com/golang/dep/cmd/dep
|
||||
|
||||
define TEST_ENV
|
||||
AWS_ACCESS_KEY_ID=$(AWS_ACCESS_KEY_ID) \
|
||||
AWS_SECRET_ACCESS_KEY=$(AWS_SECRET_ACCESS_KEY) \
|
||||
GOCACHE=$(GOCACHE) \
|
||||
GOLANG_DOCKERHUB_TAG=$(GOLANG_DOCKERHUB_TAG) \
|
||||
TEST_MONGODB_ADMIN_USERNAME=$(TEST_MONGODB_ADMIN_USERNAME) \
|
||||
TEST_MONGODB_ADMIN_PASSWORD=$(TEST_MONGODB_ADMIN_PASSWORD) \
|
||||
TEST_MONGODB_USERNAME=$(TEST_MONGODB_USERNAME) \
|
||||
TEST_MONGODB_PASSWORD=$(TEST_MONGODB_PASSWORD) \
|
||||
TEST_MONGODB_S1_RS=$(TEST_MONGODB_S1_RS) \
|
||||
TEST_MONGODB_STANDALONE_PORT=$(TEST_MONGODB_STANDALONE_PORT) \
|
||||
TEST_MONGODB_S1_PRIMARY_PORT=$(TEST_MONGODB_S1_PRIMARY_PORT) \
|
||||
TEST_MONGODB_S1_SECONDARY1_PORT=$(TEST_MONGODB_S1_SECONDARY1_PORT) \
|
||||
TEST_MONGODB_S1_SECONDARY2_PORT=$(TEST_MONGODB_S1_SECONDARY2_PORT) \
|
||||
TEST_MONGODB_S2_RS=$(TEST_MONGODB_S2_RS) \
|
||||
TEST_MONGODB_S2_PRIMARY_PORT=$(TEST_MONGODB_S2_PRIMARY_PORT) \
|
||||
TEST_MONGODB_S2_SECONDARY1_PORT=$(TEST_MONGODB_S2_SECONDARY1_PORT) \
|
||||
TEST_MONGODB_S2_SECONDARY2_PORT=$(TEST_MONGODB_S2_SECONDARY2_PORT) \
|
||||
TEST_MONGODB_S3_RS=$(TEST_MONGODB_S3_RS) \
|
||||
TEST_MONGODB_S3_PRIMARY_PORT=$(TEST_MONGODB_S3_PRIMARY_PORT) \
|
||||
TEST_MONGODB_S3_SECONDARY1_PORT=$(TEST_MONGODB_S3_SECONDARY1_PORT) \
|
||||
TEST_MONGODB_S3_SECONDARY2_PORT=$(TEST_MONGODB_S3_SECONDARY2_PORT) \
|
||||
TEST_MONGODB_CONFIGSVR_RS=$(TEST_MONGODB_CONFIGSVR_RS) \
|
||||
TEST_MONGODB_CONFIGSVR1_PORT=$(TEST_MONGODB_CONFIGSVR1_PORT) \
|
||||
TEST_MONGODB_CONFIGSVR2_PORT=$(TEST_MONGODB_CONFIGSVR2_PORT) \
|
||||
TEST_MONGODB_CONFIGSVR3_PORT=$(TEST_MONGODB_CONFIGSVR3_PORT) \
|
||||
TEST_MONGODB_MONGOS_PORT=$(TEST_MONGODB_MONGOS_PORT) \
|
||||
TEST_PSMDB_VERSION=$(TEST_PSMDB_VERSION) \
|
||||
TEST_MONGODB_FLAVOR=$(TEST_MONGODB_FLAVOR)
|
||||
endef
|
||||
|
||||
env:
|
||||
@echo $(TEST_ENV) | tr ' ' '\n' >.env
|
||||
|
||||
test-cluster: env
|
||||
TEST_PSMDB_VERSION=$(TEST_PSMDB_VERSION) \
|
||||
docker-compose up \
|
||||
--detach \
|
||||
--force-recreate \
|
||||
--always-recreate-deps \
|
||||
--renew-anon-volumes \
|
||||
init
|
||||
docker/test/init-cluster-wait.sh
|
||||
|
||||
test-cluster-clean: env
|
||||
docker-compose down -v
|
||||
|
||||
linux-amd64:
|
||||
@echo "Building linux/amd64 binaries in ${BIN_DIR}"
|
||||
@cd ${TOP_DIR} && dep ensure
|
||||
|
198
src/go/docker-compose.yml
Normal file
198
src/go/docker-compose.yml
Normal file
@@ -0,0 +1,198 @@
|
||||
---
|
||||
version: '3'
|
||||
services:
|
||||
standalone:
|
||||
network_mode: host
|
||||
image: ${TEST_MONGODB_FLAVOR}:${TEST_PSMDB_VERSION}
|
||||
command: --port=27017
|
||||
volumes:
|
||||
- ./docker/test/entrypoint-mongod.sh:/entrypoint.sh:ro
|
||||
- ./docker/test/entrypoint-mongod.sh:/usr/local/bin/docker-entrypoint.sh:ro
|
||||
- ./docker/test/mongod.key:/mongod.key:ro
|
||||
- ./docker/test/ssl/rootCA.crt:/rootCA.crt:ro
|
||||
- ./docker/test/ssl/mongodb.pem:/mongod.pem:ro
|
||||
s1-mongo1:
|
||||
network_mode: host
|
||||
image: ${TEST_MONGODB_FLAVOR}:${TEST_PSMDB_VERSION}
|
||||
command: --replSet=${TEST_MONGODB_S1_RS} --port=${TEST_MONGODB_S1_PRIMARY_PORT} --shardsvr
|
||||
volumes:
|
||||
- ./docker/test/entrypoint-mongod.sh:/entrypoint.sh:ro
|
||||
- ./docker/test/entrypoint-mongod.sh:/usr/local/bin/docker-entrypoint.sh:ro
|
||||
- ./docker/test/mongod.key:/mongod.key:ro
|
||||
- ./docker/test/ssl/rootCA.crt:/rootCA.crt:ro
|
||||
- ./docker/test/ssl/mongodb.pem:/mongod.pem:ro
|
||||
s1-mongo2:
|
||||
network_mode: host
|
||||
image: ${TEST_MONGODB_FLAVOR}:${TEST_PSMDB_VERSION}
|
||||
command: --replSet=${TEST_MONGODB_S1_RS} --port=${TEST_MONGODB_S1_SECONDARY1_PORT} --shardsvr
|
||||
volumes:
|
||||
- ./docker/test/entrypoint-mongod.sh:/entrypoint.sh:ro
|
||||
- ./docker/test/entrypoint-mongod.sh:/usr/local/bin/docker-entrypoint.sh:ro
|
||||
- ./docker/test/mongod.key:/mongod.key:ro
|
||||
- ./docker/test/ssl/rootCA.crt:/rootCA.crt:ro
|
||||
- ./docker/test/ssl/mongodb.pem:/mongod.pem:ro
|
||||
s1-mongo3:
|
||||
network_mode: host
|
||||
image: ${TEST_MONGODB_FLAVOR}:${TEST_PSMDB_VERSION}
|
||||
command: --replSet=${TEST_MONGODB_S1_RS} --port=${TEST_MONGODB_S1_SECONDARY2_PORT} --shardsvr
|
||||
volumes:
|
||||
- ./docker/test/entrypoint-mongod.sh:/entrypoint.sh:ro
|
||||
- ./docker/test/entrypoint-mongod.sh:/usr/local/bin/docker-entrypoint.sh:ro
|
||||
- ./docker/test/mongod.key:/mongod.key:ro
|
||||
- ./docker/test/ssl/rootCA.crt:/rootCA.crt:ro
|
||||
- ./docker/test/ssl/mongodb.pem:/mongod.pem:ro
|
||||
s2-mongo1:
|
||||
network_mode: host
|
||||
image: ${TEST_MONGODB_FLAVOR}:${TEST_PSMDB_VERSION}
|
||||
command: --replSet=${TEST_MONGODB_S2_RS} --port=${TEST_MONGODB_S2_PRIMARY_PORT} --shardsvr
|
||||
volumes:
|
||||
- ./docker/test/entrypoint-mongod.sh:/entrypoint.sh:ro
|
||||
- ./docker/test/entrypoint-mongod.sh:/usr/local/bin/docker-entrypoint.sh:ro
|
||||
- ./docker/test/mongod.key:/mongod.key:ro
|
||||
- ./docker/test/ssl/rootCA.crt:/rootCA.crt:ro
|
||||
- ./docker/test/ssl/mongodb.pem:/mongod.pem:ro
|
||||
s2-mongo2:
|
||||
network_mode: host
|
||||
image: ${TEST_MONGODB_FLAVOR}:${TEST_PSMDB_VERSION}
|
||||
command: --replSet=${TEST_MONGODB_S2_RS} --port=${TEST_MONGODB_S2_SECONDARY1_PORT} --shardsvr
|
||||
volumes:
|
||||
- ./docker/test/entrypoint-mongod.sh:/entrypoint.sh:ro
|
||||
- ./docker/test/entrypoint-mongod.sh:/usr/local/bin/docker-entrypoint.sh:ro
|
||||
- ./docker/test/mongod.key:/mongod.key:ro
|
||||
- ./docker/test/ssl/rootCA.crt:/rootCA.crt:ro
|
||||
- ./docker/test/ssl/mongodb.pem:/mongod.pem:ro
|
||||
s2-mongo3:
|
||||
network_mode: host
|
||||
image: ${TEST_MONGODB_FLAVOR}:${TEST_PSMDB_VERSION}
|
||||
command: --replSet=${TEST_MONGODB_S2_RS} --port=${TEST_MONGODB_S2_SECONDARY2_PORT} --shardsvr
|
||||
volumes:
|
||||
- ./docker/test/entrypoint-mongod.sh:/entrypoint.sh:ro
|
||||
- ./docker/test/entrypoint-mongod.sh:/usr/local/bin/docker-entrypoint.sh:ro
|
||||
- ./docker/test/mongod.key:/mongod.key:ro
|
||||
- ./docker/test/ssl/rootCA.crt:/rootCA.crt:ro
|
||||
- ./docker/test/ssl/mongodb.pem:/mongod.pem:ro
|
||||
s3-mongo1:
|
||||
network_mode: host
|
||||
image: ${TEST_MONGODB_FLAVOR}:${TEST_PSMDB_VERSION}
|
||||
command: --replSet=${TEST_MONGODB_S3_RS} --port=${TEST_MONGODB_S3_PRIMARY_PORT}
|
||||
volumes:
|
||||
- ./docker/test/entrypoint-mongod.sh:/entrypoint.sh:ro
|
||||
- ./docker/test/entrypoint-mongod.sh:/usr/local/bin/docker-entrypoint.sh:ro
|
||||
- ./docker/test/mongod.key:/mongod.key:ro
|
||||
- ./docker/test/ssl/rootCA.crt:/rootCA.crt:ro
|
||||
- ./docker/test/ssl/mongodb.pem:/mongod.pem:ro
|
||||
s3-mongo2:
|
||||
network_mode: host
|
||||
image: ${TEST_MONGODB_FLAVOR}:${TEST_PSMDB_VERSION}
|
||||
command: --replSet=${TEST_MONGODB_S3_RS} --port=${TEST_MONGODB_S3_SECONDARY1_PORT}
|
||||
volumes:
|
||||
- ./docker/test/entrypoint-mongod.sh:/entrypoint.sh:ro
|
||||
- ./docker/test/entrypoint-mongod.sh:/usr/local/bin/docker-entrypoint.sh:ro
|
||||
- ./docker/test/mongod.key:/mongod.key:ro
|
||||
- ./docker/test/ssl/rootCA.crt:/rootCA.crt:ro
|
||||
- ./docker/test/ssl/mongodb.pem:/mongod.pem:ro
|
||||
s3-mongo3:
|
||||
network_mode: host
|
||||
image: ${TEST_MONGODB_FLAVOR}:${TEST_PSMDB_VERSION}
|
||||
command: --replSet=${TEST_MONGODB_S3_RS} --port=${TEST_MONGODB_S3_SECONDARY2_PORT}
|
||||
volumes:
|
||||
- ./docker/test/entrypoint-mongod.sh:/entrypoint.sh:ro
|
||||
- ./docker/test/entrypoint-mongod.sh:/usr/local/bin/docker-entrypoint.sh:ro
|
||||
- ./docker/test/mongod.key:/mongod.key:ro
|
||||
- ./docker/test/ssl/rootCA.crt:/rootCA.crt:ro
|
||||
- ./docker/test/ssl/mongodb.pem:/mongod.pem:ro
|
||||
configsvr1:
|
||||
network_mode: host
|
||||
image: ${TEST_MONGODB_FLAVOR}:${TEST_PSMDB_VERSION}
|
||||
command: --replSet=${TEST_MONGODB_CONFIGSVR_RS} --port=${TEST_MONGODB_CONFIGSVR1_PORT} --configsvr
|
||||
volumes:
|
||||
- ./docker/test/entrypoint-mongod.sh:/entrypoint.sh:ro
|
||||
- ./docker/test/entrypoint-mongod.sh:/usr/local/bin/docker-entrypoint.sh:ro
|
||||
- ./docker/test/mongod.key:/mongod.key:ro
|
||||
- ./docker/test/ssl/rootCA.crt:/rootCA.crt:ro
|
||||
- ./docker/test/ssl/mongodb.pem:/mongod.pem:ro
|
||||
# configsvr2:
|
||||
# network_mode: host
|
||||
# image: ${TEST_MONGODB_FLAVOR}:${TEST_PSMDB_VERSION}
|
||||
# command: --replSet=${TEST_MONGODB_CONFIGSVR_RS} --port=${TEST_MONGODB_CONFIGSVR2_PORT} --configsvr
|
||||
# volumes:
|
||||
# - ./docker/test/entrypoint-mongod.sh:/entrypoint.sh:ro
|
||||
# - ./docker/test/mongod.key:/mongod.key:ro
|
||||
# - ./docker/test/ssl/rootCA.crt:/rootCA.crt:ro
|
||||
# - ./docker/test/ssl/mongodb.pem:/mongod.pem:ro
|
||||
# configsvr3:
|
||||
# network_mode: host
|
||||
# image: ${TEST_MONGODB_FLAVOR}:${TEST_PSMDB_VERSION}
|
||||
# command: --replSet=${TEST_MONGODB_CONFIGSVR_RS} --port=${TEST_MONGODB_CONFIGSVR3_PORT} --configsvr
|
||||
# volumes:
|
||||
# - ./docker/test/entrypoint-mongod.sh:/entrypoint.sh:ro
|
||||
# - ./docker/test/mongod.key:/mongod.key:ro
|
||||
# - ./docker/test/ssl/rootCA.crt:/rootCA.crt:ro
|
||||
# - ./docker/test/ssl/mongodb.pem:/mongod.pem:ro
|
||||
mongos:
|
||||
network_mode: host
|
||||
image: ${TEST_MONGODB_FLAVOR}:${TEST_PSMDB_VERSION}
|
||||
command: --port=${TEST_MONGODB_MONGOS_PORT} --configdb=${TEST_MONGODB_CONFIGSVR_RS}/127.0.0.1:${TEST_MONGODB_CONFIGSVR1_PORT}
|
||||
volumes:
|
||||
- ./docker/test/entrypoint-mongos.sh:/entrypoint.sh:ro
|
||||
- ./docker/test/entrypoint-mongos.sh:/usr/local/bin/docker-entrypoint.sh:ro
|
||||
- ./docker/test/mongod.key:/mongos.key:ro
|
||||
- ./docker/test/ssl/rootCA.crt:/rootCA.crt:ro
|
||||
- ./docker/test/ssl/mongodb.pem:/mongos.pem:ro
|
||||
depends_on:
|
||||
- configsvr1
|
||||
# - configsvr2
|
||||
# - configsvr3
|
||||
minio:
|
||||
network_mode: host
|
||||
image: minio/minio
|
||||
environment:
|
||||
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY_ID}
|
||||
- MINIO_SECRET_KEY=${MINIO_SECRET_ACCESS_KEY}
|
||||
command: server /data
|
||||
init:
|
||||
network_mode: host
|
||||
image: ${TEST_MONGODB_FLAVOR}:${TEST_PSMDB_VERSION}
|
||||
volumes:
|
||||
- ./docker/test/init-cluster.sh:/entrypoint.sh:ro
|
||||
- ./docker/test/init-cluster.sh:/usr/local/bin/docker-entrypoint.sh:ro
|
||||
- ./docker/test/mongod.key:/mongod.key:ro
|
||||
- ./docker/test/ssl/rootCA.crt:/rootCA.crt:ro
|
||||
- ./docker/test/ssl/client.pem:/client.pem:ro
|
||||
env_file:
|
||||
- .env
|
||||
depends_on:
|
||||
- mongos
|
||||
- s1-mongo1
|
||||
- s1-mongo2
|
||||
- s1-mongo3
|
||||
- s2-mongo1
|
||||
- s2-mongo2
|
||||
- s2-mongo3
|
||||
- s3-mongo1
|
||||
- s3-mongo2
|
||||
- s3-mongo3
|
||||
- standalone
|
||||
- minio
|
||||
test:
|
||||
build:
|
||||
dockerfile: docker/test/Dockerfile
|
||||
context: .
|
||||
args:
|
||||
- GOLANG_DOCKERHUB_TAG=${GOLANG_DOCKERHUB_TAG}
|
||||
network_mode: host
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- ./test-out:/tmp/out
|
||||
- ./docker/test/ssl/rootCA.crt:/rootCA.crt:ro
|
||||
- ./docker/test/ssl/client.pem:/client.pem:ro
|
||||
depends_on:
|
||||
- mongos
|
||||
- s1-mongo1
|
||||
- s1-mongo2
|
||||
- s1-mongo3
|
||||
- s2-mongo1
|
||||
- s2-mongo2
|
||||
- s2-mongo3
|
||||
- init
|
26
src/go/docker/Dockerfile.common
Normal file
26
src/go/docker/Dockerfile.common
Normal file
@@ -0,0 +1,26 @@
|
||||
FROM registry.access.redhat.com/ubi7/ubi-minimal
|
||||
RUN microdnf update && microdnf clean all
|
||||
|
||||
MAINTAINER Percona Development <info@percona.com>
|
||||
LABEL name="Percona Backup for MongoDB" \
|
||||
vendor="Percona" \
|
||||
summary="Percona Backup for MongoDB is a distributed, low-impact solution for achieving consistent backups of MongoDB Sharded Clusters and Replica Sets." \
|
||||
description=" This is a tool for creating consistent backups across a MongoDB sharded cluster (or a single replica set), and for restoring those backups to a specific point in time. Percona Backup for MongoDB uses a distributed client/server architecture to perform backup/restore actions."
|
||||
|
||||
COPY LICENSE /licenses/
|
||||
|
||||
# kubectl needed for Percona Operator for PSMDB
|
||||
ENV KUBECTL_VERSION=v1.14.1
|
||||
ENV KUBECTL_MD5SUM=223668b6d47121a9011645b04f5ef349
|
||||
RUN curl -o /usr/bin/kubectl \
|
||||
https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl \
|
||||
&& chmod +x /usr/bin/kubectl \
|
||||
&& echo "${KUBECTL_MD5SUM} /usr/bin/kubectl" | md5sum -c - \
|
||||
&& curl -o /licenses/LICENSE.kubectl \
|
||||
https://raw.githubusercontent.com/kubernetes/kubectl/master/LICENSE
|
||||
|
||||
COPY pbmctl pbm-agent pbm-coordinator /usr/local/bin/
|
||||
|
||||
USER nobody
|
||||
|
||||
CMD ["pbmctl"]
|
9
src/go/docker/Dockerfile.release
Normal file
9
src/go/docker/Dockerfile.release
Normal file
@@ -0,0 +1,9 @@
|
||||
FROM goreleaser/goreleaser
|
||||
|
||||
RUN apk --no-cache add upx
|
||||
|
||||
WORKDIR /go/src/github.com/percona/percona-backup-mongodb
|
||||
COPY . .
|
||||
|
||||
ENTRYPOINT ["goreleaser"]
|
||||
CMD ["release"]
|
15
src/go/docker/test/Dockerfile
Normal file
15
src/go/docker/test/Dockerfile
Normal file
@@ -0,0 +1,15 @@
|
||||
ARG GOLANG_DOCKERHUB_TAG
|
||||
FROM golang:$GOLANG_DOCKERHUB_TAG
|
||||
|
||||
RUN wget https://repo.percona.com/apt/percona-release_0.1-7.stretch_all.deb && dpkg -i percona-release_0.1-7.stretch_all.deb
|
||||
RUN apt-get update && apt-get install -y percona-server-mongodb-36-server
|
||||
|
||||
WORKDIR /go/src/github.com/percona/percona-backup-mongodb
|
||||
COPY . .
|
||||
RUN chown -R mongod.mongod /go
|
||||
|
||||
USER mongod
|
||||
RUN make vendor
|
||||
CMD make test && \
|
||||
make test-gosec && \
|
||||
make
|
18
src/go/docker/test/entrypoint-mongod.sh
Executable file
18
src/go/docker/test/entrypoint-mongod.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
cp /mongod.key /tmp/mongod.key
|
||||
cp /mongod.pem /tmp/mongod.pem
|
||||
cp /rootCA.crt /tmp/mongod-rootCA.crt
|
||||
chmod 400 /tmp/mongod.key /tmp/mongod.pem /tmp/mongod-rootCA.pem
|
||||
|
||||
/usr/bin/mongod \
|
||||
--bind_ip=0.0.0.0 \
|
||||
--dbpath=/data/db \
|
||||
--keyFile=/tmp/mongod.key \
|
||||
--oplogSize=50 \
|
||||
--profile=2 \
|
||||
--sslMode=preferSSL \
|
||||
--sslCAFile=/tmp/mongod-rootCA.crt \
|
||||
--sslPEMKeyFile=/tmp/mongod.pem \
|
||||
--wiredTigerCacheSizeGB=0.1 \
|
||||
$*
|
14
src/go/docker/test/entrypoint-mongos.sh
Executable file
14
src/go/docker/test/entrypoint-mongos.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
cp /mongos.key /tmp/mongos.key
|
||||
cp /mongos.pem /tmp/mongos.pem
|
||||
cp /rootCA.crt /tmp/mongod-rootCA.crt
|
||||
chmod 400 /tmp/mongos.key /tmp/mongos.pem /tmp/mongod-rootCA.pem
|
||||
|
||||
/usr/bin/mongos \
|
||||
--keyFile=/tmp/mongos.key \
|
||||
--bind_ip=127.0.0.1 \
|
||||
--sslMode=preferSSL \
|
||||
--sslCAFile=/tmp/mongod-rootCA.crt \
|
||||
--sslPEMKeyFile=/tmp/mongos.pem \
|
||||
$*
|
17
src/go/docker/test/init-cluster-wait.sh
Executable file
17
src/go/docker/test/init-cluster-wait.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
tries=1
|
||||
max_tries=60
|
||||
sleep_secs=1
|
||||
|
||||
while [ $tries -lt $max_tries ]; do
|
||||
docker-compose ps init 2>/dev/null | grep -q 'Exit 0'
|
||||
[ $? == 0 ] && break
|
||||
echo "# INFO: 'init' has not completed, retrying check in $sleep_secs secs (try $tries/$max_tries)"
|
||||
sleep $sleep_secs
|
||||
tries=$(($tries + 1))
|
||||
done
|
||||
if [ $tries -ge $max_tries ]; then
|
||||
echo "# ERROR: reached max tries $max_tries, exiting"
|
||||
exit 1
|
||||
fi
|
245
src/go/docker/test/init-cluster.sh
Executable file
245
src/go/docker/test/init-cluster.sh
Executable file
@@ -0,0 +1,245 @@
|
||||
#!/bin/bash
|
||||
|
||||
max_tries=45
|
||||
sleep_secs=1
|
||||
|
||||
cp /rootCA.crt /tmp/rootCA.crt
|
||||
cp /client.pem /tmp/client.pem
|
||||
chmod 400 /tmp/rootCA.crt /tmp/client.pem
|
||||
|
||||
MONGODB_IP=127.0.0.1
|
||||
MONGO_FLAGS="--quiet --host=${MONGODB_IP} --ssl --sslCAFile=/tmp/rootCA.crt --sslPEMKeyFile=/tmp/client.pem"
|
||||
|
||||
sleep $sleep_secs
|
||||
|
||||
/usr/bin/mongo --version
|
||||
|
||||
|
||||
## Shard 1
|
||||
tries=1
|
||||
while [ $tries -lt $max_tries ]; do
|
||||
/usr/bin/mongo ${MONGO_FLAGS} \
|
||||
--port=${TEST_MONGODB_S1_PRIMARY_PORT} \
|
||||
--eval='rs.initiate({
|
||||
_id: "'${TEST_MONGODB_S1_RS}'",
|
||||
version: 1,
|
||||
members: [
|
||||
{ _id: 0, host: "'${MONGODB_IP}':'${TEST_MONGODB_S1_PRIMARY_PORT}'", priority: 10 },
|
||||
{ _id: 1, host: "'${MONGODB_IP}':'${TEST_MONGODB_S1_SECONDARY1_PORT}'", priority: 1 },
|
||||
{ _id: 2, host: "'${MONGODB_IP}':'${TEST_MONGODB_S1_SECONDARY2_PORT}'", priority: 0, hidden: true, tags: { role: "backup" } }
|
||||
]})' | tee /tmp/init-result.json
|
||||
if [ $? == 0 ]; then
|
||||
grep -q '"ok" : 1' /tmp/init-result.json
|
||||
[ $? == 0 ] && rm -vf /tmp/init-result.json && break
|
||||
fi
|
||||
echo "# INFO: retrying rs.initiate() on ${TEST_MONGODB_S1_RS} in $sleep_secs secs (try $tries/$max_tries)"
|
||||
sleep $sleep_secs
|
||||
tries=$(($tries + 1))
|
||||
done
|
||||
if [ $tries -ge $max_tries ]; then
|
||||
echo "# ERROR: reached max tries $max_tries for ${TEST_MONGODB_S1_RS}, exiting"
|
||||
exit 1
|
||||
fi
|
||||
echo "# INFO: replset ${TEST_MONGODB_S1_RS} is initiated"
|
||||
|
||||
|
||||
## Shard 2
|
||||
tries=1
|
||||
while [ $tries -lt $max_tries ]; do
|
||||
/usr/bin/mongo ${MONGO_FLAGS} \
|
||||
--port=${TEST_MONGODB_S2_PRIMARY_PORT} \
|
||||
--eval='rs.initiate({
|
||||
_id: "'${TEST_MONGODB_S2_RS}'",
|
||||
version: 1,
|
||||
members: [
|
||||
{ _id: 0, host: "'${MONGODB_IP}':'${TEST_MONGODB_S2_PRIMARY_PORT}'", priority: 10 },
|
||||
{ _id: 1, host: "'${MONGODB_IP}':'${TEST_MONGODB_S2_SECONDARY1_PORT}'", priority: 1 },
|
||||
{ _id: 2, host: "'${MONGODB_IP}':'${TEST_MONGODB_S2_SECONDARY2_PORT}'", priority: 0, hidden: true, tags: { role: "backup" } }
|
||||
]})' | tee /tmp/init-result.json
|
||||
if [ $? == 0 ]; then
|
||||
grep -q '"ok" : 1' /tmp/init-result.json
|
||||
[ $? == 0 ] && rm -vf /tmp/init-result.json && break
|
||||
fi
|
||||
echo "# INFO: retrying rs.initiate() on ${TEST_MONGODB_S2_RS} in $sleep_secs secs (try $tries/$max_tries)"
|
||||
sleep $sleep_secs
|
||||
tries=$(($tries + 1))
|
||||
done
|
||||
if [ $tries -ge $max_tries ]; then
|
||||
echo "# ERROR: reached max tries $max_tries for ${TEST_MONGODB_S2_RS}, exiting"
|
||||
exit 1
|
||||
fi
|
||||
echo "# INFO: replset ${TEST_MONGODB_S2_RS} is initiated"
|
||||
|
||||
|
||||
## Configsvr replset
|
||||
tries=1
|
||||
while [ $tries -lt $max_tries ]; do
|
||||
/usr/bin/mongo ${MONGO_FLAGS} \
|
||||
--port=${TEST_MONGODB_CONFIGSVR1_PORT} \
|
||||
--eval='rs.initiate({
|
||||
_id: "'${TEST_MONGODB_CONFIGSVR_RS}'",
|
||||
configsvr: true,
|
||||
version: 1,
|
||||
members: [
|
||||
{ _id: 0, host: "'${MONGODB_IP}':'${TEST_MONGODB_CONFIGSVR1_PORT}'" }
|
||||
]
|
||||
})'
|
||||
[ $? == 0 ] && break
|
||||
echo "# INFO: retrying rs.initiate() for configsvr in $sleep_secs secs (try $tries/$max_tries)"
|
||||
sleep $sleep_secs
|
||||
tries=$(($tries + 1))
|
||||
done
|
||||
if [ $tries -ge $max_tries ]; then
|
||||
echo "# ERROR: reached max tries $max_tries, exiting"
|
||||
exit 1
|
||||
fi
|
||||
echo "# INFO: sharding configsvr is initiated"
|
||||
|
||||
## Replica set 3 (non sharded)
|
||||
tries=1
|
||||
while [ $tries -lt $max_tries ]; do
|
||||
/usr/bin/mongo ${MONGO_FLAGS} \
|
||||
--port=${TEST_MONGODB_S3_PRIMARY_PORT} \
|
||||
--eval='rs.initiate({
|
||||
_id: "'${TEST_MONGODB_S3_RS}'",
|
||||
version: 1,
|
||||
members: [
|
||||
{ _id: 0, host: "'${MONGODB_IP}':'${TEST_MONGODB_S3_PRIMARY_PORT}'", priority: 10 },
|
||||
{ _id: 1, host: "'${MONGODB_IP}':'${TEST_MONGODB_S3_SECONDARY1_PORT}'", priority: 1 },
|
||||
{ _id: 2, host: "'${MONGODB_IP}':'${TEST_MONGODB_S3_SECONDARY2_PORT}'", priority: 0, hidden: true, tags: { role: "backup" } }
|
||||
]})' | tee /tmp/init3-result.json
|
||||
if [ $? == 0 ]; then
|
||||
grep -q '"ok" : 1' /tmp/init3-result.json
|
||||
[ $? == 0 ] && rm -vf /tmp/init3-result.json && break
|
||||
fi
|
||||
echo "# INFO: retrying rs.initiate() on ${TEST_MONGODB_S3_RS} in $sleep_secs secs (try $tries/$max_tries)"
|
||||
sleep $sleep_secs
|
||||
tries=$(($tries + 1))
|
||||
done
|
||||
if [ $tries -ge $max_tries ]; then
|
||||
echo "# ERROR: reached max tries $max_tries for ${TEST_MONGODB_S3_RS}, exiting"
|
||||
exit 1
|
||||
fi
|
||||
echo "# INFO: replset ${TEST_MONGODB_S3_RS} is initiated"
|
||||
|
||||
for MONGODB_PORT in ${TEST_MONGODB_S1_PRIMARY_PORT} ${TEST_MONGODB_S2_PRIMARY_PORT} ${TEST_MONGODB_CONFIGSVR1_PORT}; do
|
||||
tries=1
|
||||
while [ $tries -lt $max_tries ]; do
|
||||
ISMASTER=$(/usr/bin/mongo ${MONGO_FLAGS} \
|
||||
--port=${MONGODB_PORT} \
|
||||
--eval='printjson(db.isMaster().ismaster)' 2>/dev/null)
|
||||
[ "$ISMASTER" == "true" ] && break
|
||||
echo "# INFO: retrying db.isMaster() check on 127.0.0.1:${MONGODB_PORT} in $sleep_secs secs (try $tries/$max_tries)"
|
||||
sleep $sleep_secs
|
||||
tries=$(($tries + 1))
|
||||
done
|
||||
if [ $tries -ge $max_tries ]; then
|
||||
echo "# ERROR: reached max tries $max_tries, exiting"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
echo "# INFO: all replsets have primary"
|
||||
|
||||
|
||||
for MONGODB_PORT in 27017 ${TEST_MONGODB_S1_PRIMARY_PORT} ${TEST_MONGODB_S2_PRIMARY_PORT} ${TEST_MONGODB_CONFIGSVR1_PORT} ${TEST_MONGODB_S3_PRIMARY_PORT}; do
|
||||
echo "PORT $MONGODB_PORT"
|
||||
tries=1
|
||||
while [ $tries -lt $max_tries ]; do
|
||||
/usr/bin/mongo ${MONGO_FLAGS} \
|
||||
--port=${MONGODB_PORT} \
|
||||
--eval='db.createUser({
|
||||
user: "'${TEST_MONGODB_ADMIN_USERNAME}'",
|
||||
pwd: "'${TEST_MONGODB_ADMIN_PASSWORD}'",
|
||||
roles: [
|
||||
{ db: "admin", role: "root" }
|
||||
]
|
||||
})' \
|
||||
admin
|
||||
if [ $? == 0 ]; then
|
||||
echo "# INFO: added admin user to 127.0.0.1:${MONGODB_PORT}"
|
||||
/usr/bin/mongo ${MONGO_FLAGS} \
|
||||
--username=${TEST_MONGODB_ADMIN_USERNAME} \
|
||||
--password=${TEST_MONGODB_ADMIN_PASSWORD} \
|
||||
--port=${MONGODB_PORT} \
|
||||
--eval='db.createUser({
|
||||
user: "'${TEST_MONGODB_USERNAME}'",
|
||||
pwd: "'${TEST_MONGODB_PASSWORD}'",
|
||||
roles: [
|
||||
{ db: "admin", role: "backup" },
|
||||
{ db: "admin", role: "clusterMonitor" },
|
||||
{ db: "admin", role: "restore" },
|
||||
{ db: "config", role: "read" },
|
||||
{ db: "test", role: "readWrite" }
|
||||
]
|
||||
})' \
|
||||
admin
|
||||
[ $? == 0 ] && echo "# INFO: added test user to 127.0.0.1:${MONGODB_PORT}" && break
|
||||
fi
|
||||
echo "# INFO: retrying db.createUser() on 127.0.0.1:${MONGODB_PORT} in $sleep_secs secs (try $tries/$max_tries)"
|
||||
sleep $sleep_secs
|
||||
tries=$(($tries + 1))
|
||||
done
|
||||
done
|
||||
echo "# INFO: all replsets have auth user(s)"
|
||||
|
||||
|
||||
shard1=${TEST_MONGODB_S1_RS}'/127.0.0.1:'${TEST_MONGODB_S1_PRIMARY_PORT}',127.0.0.1:'${TEST_MONGODB_S1_SECONDARY1_PORT}
|
||||
shard2=${TEST_MONGODB_S2_RS}'/127.0.0.1:'${TEST_MONGODB_S2_PRIMARY_PORT}',127.0.0.1:'${TEST_MONGODB_S2_SECONDARY1_PORT}
|
||||
for shard in $shard1 $shard2; do
|
||||
tries=1
|
||||
while [ $tries -lt $max_tries ]; do
|
||||
ADDSHARD=$(/usr/bin/mongo ${MONGO_FLAGS} \
|
||||
--username=${TEST_MONGODB_ADMIN_USERNAME} \
|
||||
--password=${TEST_MONGODB_ADMIN_PASSWORD} \
|
||||
--port=${TEST_MONGODB_MONGOS_PORT} \
|
||||
--eval='printjson(sh.addShard("'$shard'").ok)' \
|
||||
admin 2>/dev/null)
|
||||
[ $? == 0 ] && [ "$ADDSHARD" == "1" ] && break
|
||||
echo "# INFO: retrying sh.addShard() check for '$shard' in $sleep_secs secs (try $tries/$max_tries)"
|
||||
sleep $sleep_secs
|
||||
tries=$(($tries + 1))
|
||||
done
|
||||
if [ $tries -ge $max_tries ]; then
|
||||
echo "# ERROR: reached max tries $max_tries for '$shard', exiting"
|
||||
exit 1
|
||||
fi
|
||||
echo "# INFO: added shard: $shard"
|
||||
done
|
||||
|
||||
tries=1
|
||||
while [ $tries -lt $max_tries ]; do
|
||||
ENABLESHARDING=$(/usr/bin/mongo ${MONGO_FLAGS} \
|
||||
--username=${TEST_MONGODB_ADMIN_USERNAME} \
|
||||
--password=${TEST_MONGODB_ADMIN_PASSWORD} \
|
||||
--port=${TEST_MONGODB_MONGOS_PORT} \
|
||||
--eval='sh.enableSharding("test").ok' \
|
||||
admin 2>/dev/null)
|
||||
[ $? == 0 ] && [ "$ENABLESHARDING" == "1" ] && break
|
||||
echo "# INFO: retrying sh.enableSharding(\"test\") check in $sleep_secs secs (try $tries/$max_tries)"
|
||||
sleep $sleep_secs
|
||||
tries=$(($tries + 1))
|
||||
done
|
||||
if [ $tries -ge $max_tries ]; then
|
||||
echo "# ERROR: reached max tries $max_tries for '$shard', exiting"
|
||||
exit 1
|
||||
fi
|
||||
echo "# INFO: \"test\" database is now sharded"
|
||||
|
||||
tries=1
|
||||
while [ $tries -lt $max_tries ]; do
|
||||
SHARDCOL=$(/usr/bin/mongo ${MONGO_FLAGS} \
|
||||
--username=${TEST_MONGODB_ADMIN_USERNAME} \
|
||||
--password=${TEST_MONGODB_ADMIN_PASSWORD} \
|
||||
--port=${TEST_MONGODB_MONGOS_PORT} \
|
||||
--eval='sh.shardCollection("test.test", {_id: 1}).ok' \
|
||||
admin 2>/dev/null)
|
||||
[ $? == 0 ] && [ "$ENABLESHARDING" == "1" ] && break
|
||||
echo "# INFO: retrying sh.shardCollection(\"test.test\", {_id: 1}) check in $sleep_secs secs (try $tries/$max_tries)"
|
||||
sleep $sleep_secs
|
||||
tries=$(($tries + 1))
|
||||
done
|
||||
if [ $tries -ge $max_tries ]; then
|
||||
echo "# ERROR: reached max tries $max_tries for '$shard', exiting"
|
||||
exit 1
|
||||
fi
|
||||
echo "# INFO: \"test.test\" collection is now sharded"
|
16
src/go/docker/test/mongod.key
Normal file
16
src/go/docker/test/mongod.key
Normal file
@@ -0,0 +1,16 @@
|
||||
T774VtseEvMWhy5MjNvfZaxaMHRAEeSw/2coSJgL1NZGgMhOsVHR+5srsyQqVSu3
|
||||
E4trWyzH/lL/NV5gNaVpKiqGZbCxVbsVU4IwPIvkbNBqa2qrlO6pPzSLkOY+9S72
|
||||
3RFZX2h7NgldxofH5OXRpQeldKYwJOvZCBgH4sCzN/hZwBLGfqUOGHbOotuxcWXP
|
||||
Jhm4HIlIu1F7OFv2SOFGSX+EBpGrMKVElbvCV16A8s4hV2hjqq/ZMiLyzK5sX1hV
|
||||
CiQ8P8fmRJ83k3C6It6b714Kq0NYhRiAJDVzEOBaU7m76x+QnADZ0op/Zhd/mseb
|
||||
H22kqXSJgJnfd8D8xaEdUU0UKNoMLrluQnuKYcM6OrbKqQcucEh68kAiRYgdMvBi
|
||||
QFDV5QmBBSUwY3ZbbBzr8jZs1AnkAt8I01TQzjPrzA7jIP5YdcvHYd/0YA9tug4B
|
||||
pyDSUnSysDuJOMSB0Lsc5bj4G+vx1IL73uBMZXzIJZKaSFLkvpWVTyGydqDQkwl4
|
||||
+bWZwPBDWegL+rCCm8pvjVHuEpb7tmZqlZnE73Snm6QGdAy2hXu0iAk/wnNlpS1B
|
||||
UqOUwWa4H+u2oTHGT75T3uui4RfMMF1LqMooTsPMTsRGCU8mH3FAKZIIDSy9LTVf
|
||||
x8CpffgMMvmBfHuiOEwh/79KjWX/906dUx9wcGiOoKFbH/8DHFYePqd6J/HFSKLb
|
||||
w0izT8aU+VlDebRhe6uMTCU8eIOzBJ6vXxIbaXovO/69vBBEqVGMMOF9zr2soFxr
|
||||
0P7Fv/fAPD6HeBCzQbd/O2WUlvYCLaYwTZYhZYVnuESfur93Bh82QxEMQP+9GHU6
|
||||
o912HkSNvCwSt2kpG4BRHbCYRm4AE2YUNx4LjcY6kpo6RGNPCcyXduC1BVFDFPj2
|
||||
ZVJ/GN3MGf8QdhswPo7fnYsG7QF2oke45CHExsYC99XJPb+QdEElzuKeiUoEjvHL
|
||||
Pjlglnj+mGuAPMiVyKxzsz6yU6iKSl8LFBrfTA2GkbeT4G3h
|
52
src/go/docker/test/ssl/client.pem
Normal file
52
src/go/docker/test/ssl/client.pem
Normal file
@@ -0,0 +1,52 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIESjCCAjKgAwIBAgIRAKG/1k+fiFpnx/pdBHp0Hc8wDQYJKoZIhvcNAQELBQAw
|
||||
ETEPMA0GA1UEAxMGcm9vdENBMB4XDTE4MDkwNjExMDQzNloXDTIwMDMwNjExMDQz
|
||||
M1owFDESMBAGA1UEAxMJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
|
||||
MIIBCgKCAQEAl+Qnlg3dSlyYi34yF2D35g9PhU6o5gm3qN22pEdDTOrTEihnF6lq
|
||||
XqieDAaTTYehilqSsBjdZN8uTeJQ5Dsr/g8n43y8KCQFIcpNJLldV+pNEZydCK3R
|
||||
sPr4+GgWGdpmA5Za8VlRgilYNVzSmABz9LZoa33YIjMSQ0BftAFnFl6N0ikDwPuN
|
||||
L1A40EaE121QeEQgUTbcWcrJ1vJkJgcSGK6blVOy1dmHL4ABoD+n1+abDsoKM1Yz
|
||||
XOgci8rbNUTS6P/2j4VW+MZRnM6rFCbo7wW11IUYSyShhTJoWFMdtc7zmQdTVBlo
|
||||
RhKU0Ok1QDVr6vO+3PKriUcWY0cLFRcsGwIDAQABo4GZMIGWMA4GA1UdDwEB/wQE
|
||||
AwIDuDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0OBBYEFAaP
|
||||
Vu/GKnWUUjNznMkpU7t1V8V3MB8GA1UdIwQYMBaAFNVjkwWn6aryJsRlIsOr4gFM
|
||||
ISnFMCUGA1UdEQQeMByCCWxvY2FsaG9zdIIJMTI3LjAuMC4xhwR/AAABMA0GCSqG
|
||||
SIb3DQEBCwUAA4ICAQCYl8SMZJlaBOyUM0VAOZDks+OICJn8PUdu5XqvFA5ATnXE
|
||||
MRGP1h34R/6Vkl/jwB/+2+9zEgT1z4hZ55blyOmZFB/j/jPrlqEIkUMHK1CVC6jI
|
||||
f8ubNp4MNHVoV2Cw6wLw8E/GMZIcZ4t0G9Z7kFjHh0qS5PN00zbBCj927Q2FoBc2
|
||||
oybS4KVgFeD9fklYCMoETp+WWaM7dec0OBdKWyCEP3JmMHITKTipro5yf6RZDvAB
|
||||
TvoGcZIsIdKnoAknMYwG4ibcyI0z6XIF6/Hy/E6XdbsnmCHGIBHbRMMvqNXz3XJa
|
||||
1s/vA4MHkUF9N14MRVI8cepFMsYBZkztNylYw159b9qiHzbeUm3BrSaJzQjefqkD
|
||||
cMFLJ0jcZDg8N+pyMi3vvr41HfONw8iyqis3ZAjftZ56fwoj6ap4QZI8P+M7R//X
|
||||
A4r11+ldDRsJRnLi6kCJK/ta2pKGuUvFeVqDDc/wNfBUMkmUeyZ9AcNoxFNDMmZT
|
||||
sEhj6mTHcKlo+BcVdYMO4FrrgXkuRS0gY82qZucHRGQh3G1QPs4di7pVCopXuWjQ
|
||||
8foI+SSRFqfcdPS5ljVyLV1g+RVBJnGYQiCM/JAPokRZpimZherxsmdnAW1A/XR1
|
||||
/LXHw/5upwkouzsjFTEM8g1WDwtfp3HGFnHUxVHSe2lXI/2o+DZBU/ILEpxrgQ==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAl+Qnlg3dSlyYi34yF2D35g9PhU6o5gm3qN22pEdDTOrTEihn
|
||||
F6lqXqieDAaTTYehilqSsBjdZN8uTeJQ5Dsr/g8n43y8KCQFIcpNJLldV+pNEZyd
|
||||
CK3RsPr4+GgWGdpmA5Za8VlRgilYNVzSmABz9LZoa33YIjMSQ0BftAFnFl6N0ikD
|
||||
wPuNL1A40EaE121QeEQgUTbcWcrJ1vJkJgcSGK6blVOy1dmHL4ABoD+n1+abDsoK
|
||||
M1YzXOgci8rbNUTS6P/2j4VW+MZRnM6rFCbo7wW11IUYSyShhTJoWFMdtc7zmQdT
|
||||
VBloRhKU0Ok1QDVr6vO+3PKriUcWY0cLFRcsGwIDAQABAoIBACCfhFEaUTkzgiAT
|
||||
zrZuwU/MYgwDxQsDc0r1s9C87ZuLpCH2Q441eP8zwB5dGy4/v1Zz9aWU8ZhExzE7
|
||||
NpyOiPhcUPjvRYppkiCbzs3gckf7runldWTz0GHuxaK02GpdGiQTGx1TTcpjDusH
|
||||
MMQs9LrOosbTlKRjy7xeCzAiTP3kpGRw0+C8ey5GJ6PxchPQVDp0ONlfUjpsPO+c
|
||||
FussLv5zg0UwaI62HPuJCGYEOXF8DLKcq/0YuZjesdbyrRzJ3B4KVVsG07BOWpoc
|
||||
4Rn7E45oa1nXclfAo2ivJPuNsa/77lYtJnk+/o48U1UwzysjfYvrtZ6QeJ9nNcG9
|
||||
9bbSsmECgYEAxZVHZTwoEVsa9rqWFs1gBU4ZziH57Sxt42zD0uQ5cBRvtAbNqRo6
|
||||
C/nnSuJEdxanPB8YRCkV2iJcsrrVY2AuEci1WJyGdCoP5LMl1DEUEYIRsot1hxL8
|
||||
l0Cab2IwpHZ52hYpEfR/Zfa7G2/UBJ+sLu8IDwNqGxqljFCzmO4PSBcCgYEAxMyJ
|
||||
TCPGGX8Rk6t1GoBxGl97OrsOdKNQsKgk/c91tsZKqItUGeYhx0YS29xg5uJ3WNmN
|
||||
3I9LW3RyVrn2DIn1fftKe4PCvy0/bf7Wr1U2PeaD8vLgWbos7fHn0cYlJInMABV2
|
||||
8QQheCOj+fhSummiwqH7OhorGQ4Y+Gnzjkqrep0CgYA5pMOflV1bMuk68lS3clOB
|
||||
OLfum5r+xueIYkL/U/Yt7MhqDVIS88Pbmp0QC9NNqx4/PBMoT5RAf6XrvvZid7z5
|
||||
E0VVBNV1LpBUeK+gqHDiasAfBvDS54cp2X8038CxOp9yMOTqiBpi9QjBiG6iqrLh
|
||||
PntrZeOe5LdHqIO9KjbrIQKBgBaEvPUcZs+GDbHS/axRpB97a3NV8hqAkXwVUV5F
|
||||
fdezKtnMT4xDG/xcVU4ZEnF42mUtR6FEOEA3u9mWn8PhiVioB2bIteEAQXDJpzEa
|
||||
1AETPmfvSKKbvgZgFsGXJarfpZsg2aJMcbP4iAvTUUwJSFlzBXcphWLxjQPnw7m1
|
||||
a5e1AoGBALK70cpPmDMtKp3kmmTIDVlry42rMH/vSd31uXeEuO7xGOA2ijzpgoU2
|
||||
sS7sD/Rf4m+3rJ5E+ys5aWi0vffnSBcLCxXJQS0Ck4lK+hTmPucHcZKy3o/cJNEM
|
||||
rhkNdLdtzhtKMwbBcKeFAHdnp+2yzFOrjbbRKFFyirWfOZ9eVoZ3
|
||||
-----END RSA PRIVATE KEY-----
|
52
src/go/docker/test/ssl/mongodb.pem
Normal file
52
src/go/docker/test/ssl/mongodb.pem
Normal file
@@ -0,0 +1,52 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIESjCCAjKgAwIBAgIRAKG/1k+fiFpnx/pdBHp0Hc8wDQYJKoZIhvcNAQELBQAw
|
||||
ETEPMA0GA1UEAxMGcm9vdENBMB4XDTE4MDkwNjExMDQzNloXDTIwMDMwNjExMDQz
|
||||
M1owFDESMBAGA1UEAxMJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
|
||||
MIIBCgKCAQEAl+Qnlg3dSlyYi34yF2D35g9PhU6o5gm3qN22pEdDTOrTEihnF6lq
|
||||
XqieDAaTTYehilqSsBjdZN8uTeJQ5Dsr/g8n43y8KCQFIcpNJLldV+pNEZydCK3R
|
||||
sPr4+GgWGdpmA5Za8VlRgilYNVzSmABz9LZoa33YIjMSQ0BftAFnFl6N0ikDwPuN
|
||||
L1A40EaE121QeEQgUTbcWcrJ1vJkJgcSGK6blVOy1dmHL4ABoD+n1+abDsoKM1Yz
|
||||
XOgci8rbNUTS6P/2j4VW+MZRnM6rFCbo7wW11IUYSyShhTJoWFMdtc7zmQdTVBlo
|
||||
RhKU0Ok1QDVr6vO+3PKriUcWY0cLFRcsGwIDAQABo4GZMIGWMA4GA1UdDwEB/wQE
|
||||
AwIDuDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0OBBYEFAaP
|
||||
Vu/GKnWUUjNznMkpU7t1V8V3MB8GA1UdIwQYMBaAFNVjkwWn6aryJsRlIsOr4gFM
|
||||
ISnFMCUGA1UdEQQeMByCCWxvY2FsaG9zdIIJMTI3LjAuMC4xhwR/AAABMA0GCSqG
|
||||
SIb3DQEBCwUAA4ICAQCYl8SMZJlaBOyUM0VAOZDks+OICJn8PUdu5XqvFA5ATnXE
|
||||
MRGP1h34R/6Vkl/jwB/+2+9zEgT1z4hZ55blyOmZFB/j/jPrlqEIkUMHK1CVC6jI
|
||||
f8ubNp4MNHVoV2Cw6wLw8E/GMZIcZ4t0G9Z7kFjHh0qS5PN00zbBCj927Q2FoBc2
|
||||
oybS4KVgFeD9fklYCMoETp+WWaM7dec0OBdKWyCEP3JmMHITKTipro5yf6RZDvAB
|
||||
TvoGcZIsIdKnoAknMYwG4ibcyI0z6XIF6/Hy/E6XdbsnmCHGIBHbRMMvqNXz3XJa
|
||||
1s/vA4MHkUF9N14MRVI8cepFMsYBZkztNylYw159b9qiHzbeUm3BrSaJzQjefqkD
|
||||
cMFLJ0jcZDg8N+pyMi3vvr41HfONw8iyqis3ZAjftZ56fwoj6ap4QZI8P+M7R//X
|
||||
A4r11+ldDRsJRnLi6kCJK/ta2pKGuUvFeVqDDc/wNfBUMkmUeyZ9AcNoxFNDMmZT
|
||||
sEhj6mTHcKlo+BcVdYMO4FrrgXkuRS0gY82qZucHRGQh3G1QPs4di7pVCopXuWjQ
|
||||
8foI+SSRFqfcdPS5ljVyLV1g+RVBJnGYQiCM/JAPokRZpimZherxsmdnAW1A/XR1
|
||||
/LXHw/5upwkouzsjFTEM8g1WDwtfp3HGFnHUxVHSe2lXI/2o+DZBU/ILEpxrgQ==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAl+Qnlg3dSlyYi34yF2D35g9PhU6o5gm3qN22pEdDTOrTEihn
|
||||
F6lqXqieDAaTTYehilqSsBjdZN8uTeJQ5Dsr/g8n43y8KCQFIcpNJLldV+pNEZyd
|
||||
CK3RsPr4+GgWGdpmA5Za8VlRgilYNVzSmABz9LZoa33YIjMSQ0BftAFnFl6N0ikD
|
||||
wPuNL1A40EaE121QeEQgUTbcWcrJ1vJkJgcSGK6blVOy1dmHL4ABoD+n1+abDsoK
|
||||
M1YzXOgci8rbNUTS6P/2j4VW+MZRnM6rFCbo7wW11IUYSyShhTJoWFMdtc7zmQdT
|
||||
VBloRhKU0Ok1QDVr6vO+3PKriUcWY0cLFRcsGwIDAQABAoIBACCfhFEaUTkzgiAT
|
||||
zrZuwU/MYgwDxQsDc0r1s9C87ZuLpCH2Q441eP8zwB5dGy4/v1Zz9aWU8ZhExzE7
|
||||
NpyOiPhcUPjvRYppkiCbzs3gckf7runldWTz0GHuxaK02GpdGiQTGx1TTcpjDusH
|
||||
MMQs9LrOosbTlKRjy7xeCzAiTP3kpGRw0+C8ey5GJ6PxchPQVDp0ONlfUjpsPO+c
|
||||
FussLv5zg0UwaI62HPuJCGYEOXF8DLKcq/0YuZjesdbyrRzJ3B4KVVsG07BOWpoc
|
||||
4Rn7E45oa1nXclfAo2ivJPuNsa/77lYtJnk+/o48U1UwzysjfYvrtZ6QeJ9nNcG9
|
||||
9bbSsmECgYEAxZVHZTwoEVsa9rqWFs1gBU4ZziH57Sxt42zD0uQ5cBRvtAbNqRo6
|
||||
C/nnSuJEdxanPB8YRCkV2iJcsrrVY2AuEci1WJyGdCoP5LMl1DEUEYIRsot1hxL8
|
||||
l0Cab2IwpHZ52hYpEfR/Zfa7G2/UBJ+sLu8IDwNqGxqljFCzmO4PSBcCgYEAxMyJ
|
||||
TCPGGX8Rk6t1GoBxGl97OrsOdKNQsKgk/c91tsZKqItUGeYhx0YS29xg5uJ3WNmN
|
||||
3I9LW3RyVrn2DIn1fftKe4PCvy0/bf7Wr1U2PeaD8vLgWbos7fHn0cYlJInMABV2
|
||||
8QQheCOj+fhSummiwqH7OhorGQ4Y+Gnzjkqrep0CgYA5pMOflV1bMuk68lS3clOB
|
||||
OLfum5r+xueIYkL/U/Yt7MhqDVIS88Pbmp0QC9NNqx4/PBMoT5RAf6XrvvZid7z5
|
||||
E0VVBNV1LpBUeK+gqHDiasAfBvDS54cp2X8038CxOp9yMOTqiBpi9QjBiG6iqrLh
|
||||
PntrZeOe5LdHqIO9KjbrIQKBgBaEvPUcZs+GDbHS/axRpB97a3NV8hqAkXwVUV5F
|
||||
fdezKtnMT4xDG/xcVU4ZEnF42mUtR6FEOEA3u9mWn8PhiVioB2bIteEAQXDJpzEa
|
||||
1AETPmfvSKKbvgZgFsGXJarfpZsg2aJMcbP4iAvTUUwJSFlzBXcphWLxjQPnw7m1
|
||||
a5e1AoGBALK70cpPmDMtKp3kmmTIDVlry42rMH/vSd31uXeEuO7xGOA2ijzpgoU2
|
||||
sS7sD/Rf4m+3rJ5E+ys5aWi0vffnSBcLCxXJQS0Ck4lK+hTmPucHcZKy3o/cJNEM
|
||||
rhkNdLdtzhtKMwbBcKeFAHdnp+2yzFOrjbbRKFFyirWfOZ9eVoZ3
|
||||
-----END RSA PRIVATE KEY-----
|
29
src/go/docker/test/ssl/rootCA.crt
Normal file
29
src/go/docker/test/ssl/rootCA.crt
Normal file
@@ -0,0 +1,29 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIE4jCCAsqgAwIBAgIBATANBgkqhkiG9w0BAQsFADARMQ8wDQYDVQQDEwZyb290
|
||||
Q0EwHhcNMTgwOTA2MTEwNDM0WhcNMjAwMzA2MTEwNDM0WjARMQ8wDQYDVQQDEwZy
|
||||
b290Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDqVvFpIft91o1x
|
||||
ALjDQ+zoCke2daQ5tntwbDwosbiGB58Jz5p/KVwGHQvYIt5yKSzc7KN7OkTFF8Xt
|
||||
QGbVY2ZHOONHHZHOtuiBPz05fE/HDPxic2MO9jN4GGTphgFDBBfaEaF0G/19Rffk
|
||||
1wtB8PoOY3MU0mSTxT1q2Ka2yY2VRbvoPxo7qbhCfXuAu1cA8RmbYCWQzGsqxPC6
|
||||
s7cen9C5IOhHB/osQcI7ZoSL2fkiDch8SLFBPj7W5nofjH+P4Xncm2c65tHSy4pI
|
||||
hbYW44NDR9o2RS1OdD2GhS9MHBppzQGAnXM3yxsbKDyzjcxZpIjwMlxaGz97y404
|
||||
8ROET8Hu7CoOK9kPA20rmhpX/3ET7kiHs2f4/BlD7xNVL74tr1Plva/C8DcCWDC8
|
||||
sf1PW4RJsuwadbkMAST4MY87HknIiN4NGKE6mSlfukaKKkW1HdCY7ynpCyv3Uru3
|
||||
FiD5XrphSvonZbSfcnKjYQVKEudJWyMbdoO5JX6cDIb7QP3jsKADzedwrtWBWx2Z
|
||||
CxWOJPeVan/I6OfV45q3LZFAsNjK2gquOe/3WmJKpO+EspivY+Fv/32IAACmjoY/
|
||||
90Szf6YUKEE1Etpj9PT2gqmFleOx51A7jg3z4wUl3KI8CTcdVlDogct+4CHiQfJU
|
||||
4ajXTqd3O0qGbulQPrZuhPZWBSbVqQIDAQABo0UwQzAOBgNVHQ8BAf8EBAMCAQYw
|
||||
EgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQU1WOTBafpqvImxGUiw6viAUwh
|
||||
KcUwDQYJKoZIhvcNAQELBQADggIBAMEw6cOWi7s/6A5cMObSbbfS91yXx4tjFda/
|
||||
5lJ+7gxO935pQas2ZppxVpGUZezXH5rYl8bR/xGTZ1SLDqp0mjphVp4G6VQFtonK
|
||||
E9nNRuVK9jDJ41IXjWEtdgeig5Sf6hRUbwTDTBulhSviQQzo7hQUpSknMC1UNAgy
|
||||
op3q1dluBaG8BWi9aZu0WL5VdxdQdTCAXSrYqmeGZlc0IgiNiebRmkQwNImnvfzb
|
||||
WmrCK8rThNdXml7G/BD+m9na1OwUVoee1oohbHmxH1YsNwe1rSEBL7oAHzNi695H
|
||||
QrriZWu7t7QdO5ZITGZpzmVU1nrwSB/VgPH0tBAUeZSifctNII9NuW9FS1h3Gys1
|
||||
JV2cwQYVCLK9+M/VhRdSv6u+UCHE1TtZwHmSKYjcdN52pUEnWZNtlwPyrJ7cbSEj
|
||||
Wrq+iZBBO9qcPg20ldYLkjv1QlOGLnVbl2K9ePTTYbUaGo0DLGlA6E2lVjoD8FvS
|
||||
DQYS6qQGHCgVgOPhca8FOCxKEfMvXSzKOF9eGn0rnzsUcJbiYxNArjDDKSRSyMhD
|
||||
2TfBupFV+tYM8OXBDArgk464IZnjsrT4DeQQ+WOtEm3kHo/NVhZ/6A1uV/JyQhkF
|
||||
D6FSNoKvWz3LIC5v42+hvj6teAk4wC9tFk4Q76c2PQxiwY1Ur8ySVUYiIv8bETCt
|
||||
nQT44DuY
|
||||
-----END CERTIFICATE-----
|
135
src/go/internal/testutils/env.go
Normal file
135
src/go/internal/testutils/env.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package testutils
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
envMongoDBShard1ReplsetName = "TEST_MONGODB_S1_RS"
|
||||
envMongoDBShard1PrimaryPort = "TEST_MONGODB_S1_PRIMARY_PORT"
|
||||
envMongoDBShard1Secondary1Port = "TEST_MONGODB_S1_SECONDARY1_PORT"
|
||||
envMongoDBShard1Secondary2Port = "TEST_MONGODB_S1_SECONDARY2_PORT"
|
||||
//
|
||||
envMongoDBShard2ReplsetName = "TEST_MONGODB_S2_RS"
|
||||
envMongoDBShard2PrimaryPort = "TEST_MONGODB_S2_PRIMARY_PORT"
|
||||
envMongoDBShard2Secondary1Port = "TEST_MONGODB_S2_SECONDARY1_PORT"
|
||||
envMongoDBShard2Secondary2Port = "TEST_MONGODB_S2_SECONDARY2_PORT"
|
||||
//
|
||||
envMongoDBShard3ReplsetName = "TEST_MONGODB_S3_RS"
|
||||
envMongoDBShard3PrimaryPort = "TEST_MONGODB_S3_PRIMARY_PORT"
|
||||
envMongoDBShard3Secondary1Port = "TEST_MONGODB_S3_SECONDARY1_PORT"
|
||||
envMongoDBShard3Secondary2Port = "TEST_MONGODB_S3_SECONDARY2_PORT"
|
||||
//
|
||||
envMongoDBConfigsvrReplsetName = "TEST_MONGODB_CONFIGSVR_RS"
|
||||
envMongoDBConfigsvr1Port = "TEST_MONGODB_CONFIGSVR1_PORT"
|
||||
envMongoDBConfigsvr2Port = "TEST_MONGODB_CONFIGSVR2_PORT"
|
||||
envMongoDBConfigsvr3Port = "TEST_MONGODB_CONFIGSVR3_PORT"
|
||||
//
|
||||
envMongoDBMongosPort = "TEST_MONGODB_MONGOS_PORT"
|
||||
//
|
||||
envMongoDBUser = "TEST_MONGODB_ADMIN_USERNAME"
|
||||
envMongoDBPassword = "TEST_MONGODB_ADMIN_PASSWORD"
|
||||
)
|
||||
|
||||
var (
|
||||
MongoDBHost = "127.0.0.1"
|
||||
//
|
||||
MongoDBShard1ReplsetName = os.Getenv(envMongoDBShard1ReplsetName)
|
||||
MongoDBShard1PrimaryPort = os.Getenv(envMongoDBShard1PrimaryPort)
|
||||
MongoDBShard1Secondary1Port = os.Getenv(envMongoDBShard1Secondary1Port)
|
||||
MongoDBShard1Secondary2Port = os.Getenv(envMongoDBShard1Secondary2Port)
|
||||
//
|
||||
MongoDBShard2ReplsetName = os.Getenv(envMongoDBShard2ReplsetName)
|
||||
MongoDBShard2PrimaryPort = os.Getenv(envMongoDBShard2PrimaryPort)
|
||||
MongoDBShard2Secondary1Port = os.Getenv(envMongoDBShard2Secondary1Port)
|
||||
MongoDBShard2Secondary2Port = os.Getenv(envMongoDBShard2Secondary2Port)
|
||||
//
|
||||
MongoDBShard3ReplsetName = os.Getenv(envMongoDBShard3ReplsetName)
|
||||
MongoDBShard3PrimaryPort = os.Getenv(envMongoDBShard3PrimaryPort)
|
||||
MongoDBShard3Secondary1Port = os.Getenv(envMongoDBShard3Secondary1Port)
|
||||
MongoDBShard3Secondary2Port = os.Getenv(envMongoDBShard3Secondary2Port)
|
||||
//
|
||||
MongoDBConfigsvrReplsetName = os.Getenv(envMongoDBConfigsvrReplsetName)
|
||||
MongoDBConfigsvr1Port = os.Getenv(envMongoDBConfigsvr1Port)
|
||||
MongoDBConfigsvr2Port = os.Getenv(envMongoDBConfigsvr2Port)
|
||||
MongoDBConfigsvr3Port = os.Getenv(envMongoDBConfigsvr3Port)
|
||||
//
|
||||
MongoDBMongosPort = os.Getenv(envMongoDBMongosPort)
|
||||
MongoDBUser = os.Getenv(envMongoDBUser)
|
||||
MongoDBPassword = os.Getenv(envMongoDBPassword)
|
||||
MongoDBTimeout = time.Duration(10) * time.Second
|
||||
|
||||
// test mongodb hosts map
|
||||
hosts = map[string]map[string]string{
|
||||
MongoDBShard1ReplsetName: {
|
||||
"primary": MongoDBHost + ":" + MongoDBShard1PrimaryPort,
|
||||
"secondary1": MongoDBHost + ":" + MongoDBShard1Secondary1Port,
|
||||
"secondary2": MongoDBHost + ":" + MongoDBShard1Secondary2Port,
|
||||
},
|
||||
MongoDBShard2ReplsetName: {
|
||||
"primary": MongoDBHost + ":" + MongoDBShard2PrimaryPort,
|
||||
"secondary1": MongoDBHost + ":" + MongoDBShard2Secondary1Port,
|
||||
"secondary2": MongoDBHost + ":" + MongoDBShard2Secondary2Port,
|
||||
},
|
||||
MongoDBShard3ReplsetName: {
|
||||
"primary": MongoDBHost + ":" + MongoDBShard3PrimaryPort,
|
||||
"secondary1": MongoDBHost + ":" + MongoDBShard3Secondary1Port,
|
||||
"secondary2": MongoDBHost + ":" + MongoDBShard3Secondary2Port,
|
||||
},
|
||||
MongoDBConfigsvrReplsetName: {
|
||||
"primary": MongoDBHost + ":" + MongoDBConfigsvr1Port,
|
||||
},
|
||||
}
|
||||
|
||||
// The values here are just placeholders. They will be overridden by init()
|
||||
basedir string
|
||||
MongoDBSSLDir = "../docker/test/ssl"
|
||||
MongoDBSSLPEMKeyFile = filepath.Join(MongoDBSSLDir, "client.pem")
|
||||
MongoDBSSLCACertFile = filepath.Join(MongoDBSSLDir, "rootCA.crt")
|
||||
)
|
||||
|
||||
func init() {
|
||||
MongoDBSSLDir = filepath.Join(BaseDir(), "docker/test/ssl")
|
||||
MongoDBSSLPEMKeyFile = filepath.Join(MongoDBSSLDir, "client.pem")
|
||||
MongoDBSSLCACertFile = filepath.Join(MongoDBSSLDir, "rootCA.crt")
|
||||
}
|
||||
|
||||
// BaseDir returns the project's root dir by asking git
|
||||
func BaseDir() string {
|
||||
if basedir != "" {
|
||||
return basedir
|
||||
}
|
||||
out, err := exec.Command("git", "rev-parse", "--show-toplevel").Output()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
basedir = strings.TrimSpace(string(out))
|
||||
return basedir
|
||||
}
|
||||
|
||||
func GetMongoDBAddr(rs, name string) string {
|
||||
if _, ok := hosts[rs]; !ok {
|
||||
return ""
|
||||
}
|
||||
replset := hosts[rs]
|
||||
if host, ok := replset[name]; ok {
|
||||
return host
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetMongoDBReplsetAddrs(rs string) []string {
|
||||
addrs := []string{}
|
||||
if _, ok := hosts[rs]; !ok {
|
||||
return addrs
|
||||
}
|
||||
for _, host := range hosts[rs] {
|
||||
addrs = append(addrs, host)
|
||||
}
|
||||
return addrs
|
||||
}
|
@@ -1,13 +1,14 @@
|
||||
package profiler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/stats"
|
||||
"github.com/percona/percona-toolkit/src/go/pt-mongodb-query-digest/filter"
|
||||
"github.com/percona/pmgo"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -20,16 +21,16 @@ type Profiler interface {
|
||||
QueriesChan() chan stats.Queries
|
||||
TimeoutsChan() <-chan time.Time
|
||||
FlushQueries()
|
||||
Start()
|
||||
Start(context.Context)
|
||||
Stop()
|
||||
}
|
||||
|
||||
type Profile struct {
|
||||
// dependencies
|
||||
iterator pmgo.IterManager
|
||||
filters []filter.Filter
|
||||
ticker <-chan time.Time
|
||||
stats Stats
|
||||
cursor *mongo.Cursor
|
||||
filters []filter.Filter
|
||||
ticker <-chan time.Time
|
||||
stats Stats
|
||||
|
||||
// internal
|
||||
queriesChan chan stats.Queries
|
||||
@@ -47,13 +48,12 @@ type Profile struct {
|
||||
stopWaitGroup sync.WaitGroup
|
||||
}
|
||||
|
||||
func NewProfiler(iterator pmgo.IterManager, filters []filter.Filter, ticker <-chan time.Time, stats Stats) Profiler {
|
||||
func NewProfiler(cursor *mongo.Cursor, filters []filter.Filter, ticker <-chan time.Time, stats Stats) Profiler {
|
||||
return &Profile{
|
||||
// dependencies
|
||||
iterator: iterator,
|
||||
filters: filters,
|
||||
ticker: ticker,
|
||||
stats: stats,
|
||||
cursor: cursor,
|
||||
filters: filters,
|
||||
ticker: ticker,
|
||||
stats: stats,
|
||||
|
||||
// internal
|
||||
docsChan: make(chan proto.SystemProfile, DocsBufferSize),
|
||||
@@ -70,14 +70,14 @@ func (p *Profile) QueriesChan() chan stats.Queries {
|
||||
return p.queriesChan
|
||||
}
|
||||
|
||||
func (p *Profile) Start() {
|
||||
func (p *Profile) Start(ctx context.Context) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
if !p.running {
|
||||
p.running = true
|
||||
p.queriesChan = make(chan stats.Queries)
|
||||
p.stopChan = make(chan bool)
|
||||
go p.getData()
|
||||
go p.getData(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,43 +100,39 @@ func (p *Profile) TimeoutsChan() <-chan time.Time {
|
||||
return p.timeoutsChan
|
||||
}
|
||||
|
||||
func (p *Profile) getData() {
|
||||
go p.getDocs()
|
||||
func (p *Profile) getData(ctx context.Context) {
|
||||
go p.getDocs(ctx)
|
||||
p.stopWaitGroup.Add(1)
|
||||
defer p.stopWaitGroup.Done()
|
||||
|
||||
MAIN_GETDATA_LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-p.ticker:
|
||||
p.FlushQueries()
|
||||
case <-p.stopChan:
|
||||
// Close the iterator to break the loop on getDocs
|
||||
p.iterator.Close()
|
||||
break MAIN_GETDATA_LOOP
|
||||
p.cursor.Close(ctx)
|
||||
return
|
||||
}
|
||||
}
|
||||
p.stopWaitGroup.Done()
|
||||
}
|
||||
|
||||
func (p *Profile) getDocs() {
|
||||
func (p *Profile) getDocs(ctx context.Context) {
|
||||
defer p.Stop()
|
||||
defer p.FlushQueries()
|
||||
|
||||
var doc proto.SystemProfile
|
||||
|
||||
for p.iterator.Next(&doc) || p.iterator.Timeout() {
|
||||
if p.iterator.Timeout() {
|
||||
select {
|
||||
case p.timeoutsChan <- time.Now().UTC():
|
||||
default:
|
||||
}
|
||||
continue
|
||||
for p.cursor.Next(ctx) {
|
||||
if err := p.cursor.Decode(&doc); err != nil {
|
||||
p.lastError = err
|
||||
return
|
||||
}
|
||||
valid := true
|
||||
for _, filter := range p.filters {
|
||||
if !filter(doc) {
|
||||
valid = false
|
||||
break
|
||||
return
|
||||
}
|
||||
}
|
||||
if !valid {
|
||||
|
@@ -1,20 +1,21 @@
|
||||
package profiler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
tu "github.com/percona/percona-toolkit/src/go/internal/testutils"
|
||||
"github.com/percona/percona-toolkit/src/go/lib/tutil"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/fingerprinter"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/stats"
|
||||
"github.com/percona/percona-toolkit/src/go/pt-mongodb-query-digest/filter"
|
||||
"github.com/percona/pmgo/pmgomock"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -42,319 +43,69 @@ func TestMain(m *testing.M) {
|
||||
}
|
||||
|
||||
func TestRegularIterator(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
docs := []proto.SystemProfile{}
|
||||
err := tutil.LoadJson(vars.RootPath+samples+"profiler_docs.json", &docs)
|
||||
uri := fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBShard1PrimaryPort)
|
||||
client, err := mongo.NewClient(options.Client().ApplyURI(uri))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot load samples: %s", err.Error())
|
||||
t.Fatalf("Cannot create a new MongoDB client: %s", err)
|
||||
}
|
||||
|
||||
iter := pmgomock.NewMockIterManager(ctrl)
|
||||
gomock.InOrder(
|
||||
iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[0]).Return(true),
|
||||
iter.EXPECT().Timeout().Return(false),
|
||||
iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[1]).Return(true),
|
||||
iter.EXPECT().Timeout().Return(false),
|
||||
iter.EXPECT().Next(gomock.Any()).Return(false),
|
||||
iter.EXPECT().Timeout().Return(false),
|
||||
iter.EXPECT().Close(),
|
||||
)
|
||||
filters := []filter.Filter{}
|
||||
fp := fingerprinter.NewFingerprinter(fingerprinter.DEFAULT_KEY_FILTERS)
|
||||
s := stats.New(fp)
|
||||
prof := NewProfiler(iter, filters, nil, s)
|
||||
ctx := context.Background()
|
||||
|
||||
firstSeen, _ := time.Parse(time.RFC3339Nano, "2017-04-01T23:01:19.914+00:00")
|
||||
lastSeen, _ := time.Parse(time.RFC3339Nano, "2017-04-01T23:01:20.214+00:00")
|
||||
want := stats.Queries{
|
||||
{
|
||||
ID: "95575e896c2830043dc333cb8ee61339",
|
||||
Namespace: "samples.col1",
|
||||
Operation: "FIND",
|
||||
Query: "{\"ns\":\"samples.col1\",\"op\":\"query\",\"query\":{\"find\":\"col1\",\"shardVersion\":[0,\"000000000000000000000000\"]}}\n",
|
||||
Fingerprint: "FIND col1 find",
|
||||
FirstSeen: firstSeen,
|
||||
LastSeen: lastSeen,
|
||||
TableScan: false,
|
||||
Count: 2,
|
||||
NReturned: []float64{50, 75},
|
||||
NScanned: []float64{100, 75},
|
||||
QueryTime: []float64{0, 1},
|
||||
ResponseLength: []float64{1.06123e+06, 1.06123e+06},
|
||||
},
|
||||
if err := client.Connect(ctx); err != nil {
|
||||
t.Fatalf("Cannot connect to MongoDB: %s", err)
|
||||
}
|
||||
prof.Start()
|
||||
defer prof.Stop()
|
||||
select {
|
||||
case queries := <-prof.QueriesChan():
|
||||
if !reflect.DeepEqual(queries, want) {
|
||||
t.Errorf("invalid queries. \nGot: %#v,\nWant: %#v\n", queries, want)
|
||||
}
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Error("Didn't get any query")
|
||||
|
||||
database := "test"
|
||||
// Disable the profiler and drop the db. This should also remove the system.profile collection
|
||||
// so the stats should be re-initialized
|
||||
res := client.Database("admin").RunCommand(ctx, primitive.M{"profile": 0})
|
||||
if res.Err() != nil {
|
||||
t.Fatalf("Cannot enable profiler: %s", res.Err())
|
||||
}
|
||||
}
|
||||
client.Database(database).Drop(ctx)
|
||||
|
||||
func TestIteratorTimeout(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
// re-enable the profiler
|
||||
res = client.Database("admin").RunCommand(ctx, primitive.M{"profile": 2, "slowms": 2})
|
||||
if res.Err() != nil {
|
||||
t.Fatalf("Cannot enable profiler: %s", res.Err())
|
||||
}
|
||||
|
||||
docs := []proto.SystemProfile{}
|
||||
err := tutil.LoadJson(vars.RootPath+samples+"profiler_docs.json", &docs)
|
||||
// run some queries to have something to profile
|
||||
count := 1000
|
||||
for j := 0; j < count; j++ {
|
||||
client.Database("test").Collection("testc").InsertOne(ctx, primitive.M{"number": j})
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
}
|
||||
|
||||
cursor, err := client.Database(database).Collection("system.profile").Find(ctx, primitive.M{})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot load samples: %s", err.Error())
|
||||
panic(err)
|
||||
}
|
||||
|
||||
iter := pmgomock.NewMockIterManager(ctrl)
|
||||
gomock.InOrder(
|
||||
iter.EXPECT().Next(gomock.Any()).Return(true),
|
||||
iter.EXPECT().Timeout().Return(true),
|
||||
iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[1]).Return(true),
|
||||
iter.EXPECT().Timeout().Return(false),
|
||||
iter.EXPECT().Next(gomock.Any()).Return(false),
|
||||
iter.EXPECT().Timeout().Return(false),
|
||||
// When there are no more docs, iterator will close
|
||||
iter.EXPECT().Close(),
|
||||
)
|
||||
filters := []filter.Filter{}
|
||||
|
||||
fp := fingerprinter.NewFingerprinter(fingerprinter.DEFAULT_KEY_FILTERS)
|
||||
s := stats.New(fp)
|
||||
prof := NewProfiler(iter, filters, nil, s)
|
||||
prof := NewProfiler(cursor, filters, nil, s)
|
||||
prof.Start(ctx)
|
||||
|
||||
firstSeen, _ := time.Parse(time.RFC3339Nano, "2017-04-01T23:01:19.914+00:00")
|
||||
lastSeen, _ := time.Parse(time.RFC3339Nano, "2017-04-01T23:01:19.914+00:00")
|
||||
want := stats.Queries{
|
||||
{
|
||||
ID: "95575e896c2830043dc333cb8ee61339",
|
||||
Namespace: "samples.col1",
|
||||
Operation: "FIND",
|
||||
Query: "{\"ns\":\"samples.col1\",\"op\":\"query\",\"query\":{\"find\":\"col1\",\"shardVersion\":[0,\"000000000000000000000000\"]}}\n",
|
||||
Fingerprint: "FIND col1 find",
|
||||
FirstSeen: firstSeen,
|
||||
LastSeen: lastSeen,
|
||||
TableScan: false,
|
||||
Count: 1,
|
||||
NReturned: []float64{75},
|
||||
NScanned: []float64{75},
|
||||
QueryTime: []float64{1},
|
||||
ResponseLength: []float64{1.06123e+06},
|
||||
},
|
||||
}
|
||||
queries := <-prof.QueriesChan()
|
||||
found := false
|
||||
valid := false
|
||||
|
||||
prof.Start()
|
||||
defer prof.Stop()
|
||||
gotTimeout := false
|
||||
|
||||
// Get a timeout
|
||||
select {
|
||||
case <-prof.TimeoutsChan():
|
||||
gotTimeout = true
|
||||
case <-prof.QueriesChan():
|
||||
t.Error("Got queries before timeout")
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Error("Timeout checking timeout")
|
||||
}
|
||||
if !gotTimeout {
|
||||
t.Error("Didn't get a timeout")
|
||||
}
|
||||
|
||||
// After the first document returned a timeout, we should still receive the second document
|
||||
select {
|
||||
case queries := <-prof.QueriesChan():
|
||||
if !reflect.DeepEqual(queries, want) {
|
||||
t.Errorf("invalid queries. \nGot: %#v,\nWant: %#v\n", queries, want)
|
||||
}
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Error("Didn't get any query after 2 seconds")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTailIterator(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
docs := []proto.SystemProfile{}
|
||||
err := tutil.LoadJson(vars.RootPath+samples+"profiler_docs.json", &docs)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot load samples: %s", err.Error())
|
||||
}
|
||||
|
||||
sleep := func(param interface{}) {
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
}
|
||||
|
||||
iter := pmgomock.NewMockIterManager(ctrl)
|
||||
gomock.InOrder(
|
||||
iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[0]).Return(true),
|
||||
iter.EXPECT().Timeout().Return(false),
|
||||
// A Tail iterator will wait if the are no available docs.
|
||||
// Do a 1500 ms sleep before returning the second doc to simulate a tail wait
|
||||
// and to let the ticker tick
|
||||
iter.EXPECT().Next(gomock.Any()).Do(sleep).SetArg(0, docs[1]).Return(true),
|
||||
iter.EXPECT().Timeout().Return(false),
|
||||
iter.EXPECT().Next(gomock.Any()).Return(false),
|
||||
iter.EXPECT().Timeout().Return(false),
|
||||
iter.EXPECT().Close(),
|
||||
)
|
||||
|
||||
filters := []filter.Filter{}
|
||||
ticker := time.NewTicker(time.Second)
|
||||
fp := fingerprinter.NewFingerprinter(fingerprinter.DEFAULT_KEY_FILTERS)
|
||||
s := stats.New(fp)
|
||||
prof := NewProfiler(iter, filters, ticker.C, s)
|
||||
|
||||
want := stats.Queries{
|
||||
{
|
||||
ID: "95575e896c2830043dc333cb8ee61339",
|
||||
Namespace: "samples.col1",
|
||||
Operation: "FIND",
|
||||
Query: "{\"ns\":\"samples.col1\",\"op\":\"query\",\"query\":{\"find\":\"col1\",\"shardVersion\":[0,\"000000000000000000000000\"]}}\n",
|
||||
Fingerprint: "FIND col1 find",
|
||||
FirstSeen: parseDate("2017-04-01T23:01:20.214+00:00"),
|
||||
LastSeen: parseDate("2017-04-01T23:01:20.214+00:00"),
|
||||
TableScan: false,
|
||||
Count: 1,
|
||||
NReturned: []float64{50},
|
||||
NScanned: []float64{100},
|
||||
QueryTime: []float64{0},
|
||||
ResponseLength: []float64{1.06123e+06},
|
||||
},
|
||||
{
|
||||
ID: "95575e896c2830043dc333cb8ee61339",
|
||||
Namespace: "samples.col1",
|
||||
Operation: "FIND",
|
||||
Query: "{\"ns\":\"samples.col1\",\"op\":\"query\",\"query\":{\"find\":\"col1\",\"shardVersion\":[0,\"000000000000000000000000\"]}}\n",
|
||||
Fingerprint: "FIND col1 find",
|
||||
FirstSeen: parseDate("2017-04-01T23:01:19.914+00:00"),
|
||||
LastSeen: parseDate("2017-04-01T23:01:19.914+00:00"),
|
||||
TableScan: false,
|
||||
Count: 1,
|
||||
NReturned: []float64{75},
|
||||
NScanned: []float64{75},
|
||||
QueryTime: []float64{1},
|
||||
ResponseLength: []float64{1.06123e+06},
|
||||
},
|
||||
}
|
||||
prof.Start()
|
||||
defer prof.Stop()
|
||||
index := 0
|
||||
// Since the mocked iterator has a Sleep(1500 ms) between Next methods calls,
|
||||
// we are going to have two ticker ticks and on every tick it will return one document.
|
||||
for index < 2 {
|
||||
select {
|
||||
case queries := <-prof.QueriesChan():
|
||||
if !reflect.DeepEqual(queries, stats.Queries{want[index]}) {
|
||||
t.Errorf("invalid queries. \nGot: %#v,\nWant: %#v\n", queries, want)
|
||||
for _, query := range queries {
|
||||
if query.Namespace == "test.testc" && query.Operation == "INSERT" {
|
||||
found = true
|
||||
if query.Fingerprint == "INSERT testc" && query.Count == count {
|
||||
valid = true
|
||||
}
|
||||
index++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCalcStats(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
docs := []proto.SystemProfile{}
|
||||
err := tutil.LoadBson(vars.RootPath+samples+"profiler_docs_stats.json", &docs)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot load samples: %s", err.Error())
|
||||
}
|
||||
|
||||
want := []stats.QueryStats{}
|
||||
err = tutil.LoadBson(vars.RootPath+samples+"profiler_docs_stats.want.json", &want)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot load expected results: %s", err.Error())
|
||||
}
|
||||
|
||||
iter := pmgomock.NewMockIterManager(ctrl)
|
||||
gomock.InOrder(
|
||||
iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[0]).Return(true),
|
||||
iter.EXPECT().Timeout().Return(false),
|
||||
iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[1]).Return(true),
|
||||
iter.EXPECT().Timeout().Return(false),
|
||||
iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[2]).Return(true),
|
||||
iter.EXPECT().Timeout().Return(false),
|
||||
iter.EXPECT().Next(gomock.Any()).Return(false),
|
||||
iter.EXPECT().Timeout().Return(false),
|
||||
iter.EXPECT().Close(),
|
||||
)
|
||||
|
||||
filters := []filter.Filter{}
|
||||
fp := fingerprinter.NewFingerprinter(fingerprinter.DEFAULT_KEY_FILTERS)
|
||||
s := stats.New(fp)
|
||||
prof := NewProfiler(iter, filters, nil, s)
|
||||
|
||||
prof.Start()
|
||||
defer prof.Stop()
|
||||
|
||||
select {
|
||||
case queries := <-prof.QueriesChan():
|
||||
s := queries.CalcQueriesStats(1)
|
||||
if os.Getenv("UPDATE_SAMPLES") != "" {
|
||||
tutil.WriteJson(vars.RootPath+samples+"profiler_docs_stats.want.json", s)
|
||||
}
|
||||
if !reflect.DeepEqual(s, want) {
|
||||
t.Errorf("Invalid stats.\nGot:%#v\nWant: %#v\n", s, want)
|
||||
}
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Error("Didn't get any query")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCalcTotalStats(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
docs := []proto.SystemProfile{}
|
||||
err := tutil.LoadBson(vars.RootPath+samples+"profiler_docs_stats.json", &docs)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot load samples: %s", err.Error())
|
||||
}
|
||||
|
||||
want := stats.QueryStats{}
|
||||
err = tutil.LoadBson(vars.RootPath+samples+"profiler_docs_total_stats.want.json", &want)
|
||||
if err != nil && !tutil.ShouldUpdateSamples() {
|
||||
t.Fatalf("cannot load expected results: %s", err.Error())
|
||||
}
|
||||
|
||||
iter := pmgomock.NewMockIterManager(ctrl)
|
||||
gomock.InOrder(
|
||||
iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[0]).Return(true),
|
||||
iter.EXPECT().Timeout().Return(false),
|
||||
iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[1]).Return(true),
|
||||
iter.EXPECT().Timeout().Return(false),
|
||||
iter.EXPECT().Next(gomock.Any()).SetArg(0, docs[2]).Return(true),
|
||||
iter.EXPECT().Timeout().Return(false),
|
||||
iter.EXPECT().Next(gomock.Any()).Return(false),
|
||||
iter.EXPECT().Timeout().Return(false),
|
||||
iter.EXPECT().Close(),
|
||||
)
|
||||
|
||||
filters := []filter.Filter{}
|
||||
fp := fingerprinter.NewFingerprinter(fingerprinter.DEFAULT_KEY_FILTERS)
|
||||
s := stats.New(fp)
|
||||
prof := NewProfiler(iter, filters, nil, s)
|
||||
|
||||
prof.Start()
|
||||
defer prof.Stop()
|
||||
select {
|
||||
case queries := <-prof.QueriesChan():
|
||||
s := queries.CalcTotalQueriesStats(1)
|
||||
if os.Getenv("UPDATE_SAMPLES") != "" {
|
||||
fmt.Println("Updating samples: " + vars.RootPath + samples + "profiler_docs_total_stats.want.json")
|
||||
err := tutil.WriteJson(vars.RootPath+samples+"profiler_docs_total_stats.want.json", s)
|
||||
if err != nil {
|
||||
fmt.Printf("cannot update samples: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(s, want) {
|
||||
t.Errorf("Invalid stats.\nGot:%#v\nWant: %#v\n", s, want)
|
||||
}
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Error("Didn't get any query")
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Errorf("Insert query was not found")
|
||||
}
|
||||
if !valid {
|
||||
t.Errorf("Query stats are not valid")
|
||||
}
|
||||
}
|
||||
|
@@ -1,5 +1,7 @@
|
||||
package proto
|
||||
|
||||
import "go.mongodb.org/mongo-driver/bson/primitive"
|
||||
|
||||
type Extra struct {
|
||||
LibcVersion string `bson:"libcVersion"`
|
||||
PageSize float64 `bson:"pageSize"`
|
||||
@@ -19,13 +21,13 @@ type Os struct {
|
||||
}
|
||||
|
||||
type System struct {
|
||||
CurrentTime string `bson:"currentTime"`
|
||||
Hostname string `bson:"hostname"`
|
||||
MemSizeMB float64 `bson:"memSizeMB"`
|
||||
NumCores float64 `bson:"numCores"`
|
||||
NumaEnabled bool `bson:"numaEnabled"`
|
||||
CpuAddrSize float64 `bson:"cpuAddrSize"`
|
||||
CpuArch string `bson:"cpuArch"`
|
||||
CurrentTime primitive.DateTime `bson:"currentTime"`
|
||||
Hostname string `bson:"hostname"`
|
||||
MemSizeMB float64 `bson:"memSizeMB"`
|
||||
NumCores float64 `bson:"numCores"`
|
||||
NumaEnabled bool `bson:"numaEnabled"`
|
||||
CpuAddrSize float64 `bson:"cpuAddrSize"`
|
||||
CpuArch string `bson:"cpuArch"`
|
||||
}
|
||||
|
||||
// HostInfo has exported field for the 'hostInfo' command plus some other
|
||||
|
18
src/go/mongolib/proto/listdatabases.go
Normal file
18
src/go/mongolib/proto/listdatabases.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package proto
|
||||
|
||||
import "go.mongodb.org/mongo-driver/bson/primitive"
|
||||
|
||||
// CollectionEntry represents an entry for ListCollections
|
||||
type CollectionEntry struct {
|
||||
Name string `bson:"name"`
|
||||
Type string `bson:"type"`
|
||||
Options struct {
|
||||
Capped bool `bson:"capped"`
|
||||
Size int64 `bson:"size"`
|
||||
AutoIndexID bool `bson:"autoIndexId"`
|
||||
} `bson:"options"`
|
||||
Info struct {
|
||||
ReadOnly bool `bson:"readOnly"`
|
||||
UUID primitive.Binary `bson:"uuid"`
|
||||
} `bson:"info"`
|
||||
}
|
@@ -4,6 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
type OplogEntry struct {
|
||||
@@ -19,7 +20,7 @@ type OplogInfo struct {
|
||||
Hostname string
|
||||
Size int64
|
||||
UsedMB int64
|
||||
TimeDiff int64
|
||||
TimeDiff time.Duration
|
||||
TimeDiffHours float64
|
||||
Running string // TimeDiffHours in human readable format
|
||||
TFirst time.Time
|
||||
@@ -41,13 +42,13 @@ func (s OpLogs) Less(i, j int) bool {
|
||||
}
|
||||
|
||||
type OplogRow struct {
|
||||
Timestamp int64 `bson:"ts,omitempty"`
|
||||
HistoryId int64 `bson:"h,omitempty"`
|
||||
Version int64 `bson:"v,omitempty"`
|
||||
Operation string `bson:"op,omitempty"`
|
||||
Namespace string `bson:"ns,omitempty"`
|
||||
Object bson.D `bson:"o,omitempty"`
|
||||
Query bson.D `bson:"o2,omitempty"`
|
||||
Timestamp primitive.Timestamp `bson:"ts,omitempty"`
|
||||
HistoryId int64 `bson:"h,omitempty"`
|
||||
Version int64 `bson:"v,omitempty"`
|
||||
Operation string `bson:"op,omitempty"`
|
||||
Namespace string `bson:"ns,omitempty"`
|
||||
Object bson.D `bson:"o,omitempty"`
|
||||
Query bson.D `bson:"o2,omitempty"`
|
||||
}
|
||||
|
||||
type OplogColStats struct {
|
||||
|
@@ -1,5 +1,7 @@
|
||||
package proto
|
||||
|
||||
import "go.mongodb.org/mongo-driver/bson/primitive"
|
||||
|
||||
const (
|
||||
REPLICA_SET_MEMBER_STARTUP = iota
|
||||
REPLICA_SET_MEMBER_PRIMARY
|
||||
@@ -14,35 +16,35 @@ const (
|
||||
)
|
||||
|
||||
type Optime struct {
|
||||
Ts float64 `bson:"ts"` // The Timestamp of the last operation applied to this member of the replica set from the oplog.
|
||||
T float64 `bson:"t"` // The term in which the last applied operation was originally generated on the primary.
|
||||
Ts primitive.Timestamp `bson:"ts"` // The Timestamp of the last operation applied to this member of the replica set from the oplog.
|
||||
T float64 `bson:"t"` // The term in which the last applied operation was originally generated on the primary.
|
||||
}
|
||||
|
||||
type Members struct {
|
||||
Optime *Optime `bson:"optime"` // See Optime struct
|
||||
OptimeDate string `bson:"optimeDate"` // The last entry from the oplog that this member applied.
|
||||
InfoMessage string `bson:"infoMessage"` // A message
|
||||
ID int64 `bson:"_id"` // Server ID
|
||||
Name string `bson:"name"` // server name
|
||||
Health float64 `bson:"health"` // This field conveys if the member is up (i.e. 1) or down (i.e. 0).
|
||||
StateStr string `bson:"stateStr"` // A string that describes state.
|
||||
Uptime float64 `bson:"uptime"` // number of seconds that this member has been online.
|
||||
ConfigVersion float64 `bson:"configVersion"` // revision # of the replica set configuration object from previous iterations of the configuration.
|
||||
Self bool `bson:"self"` // true if this is the server we are currently connected
|
||||
State float64 `bson:"state"` // integer between 0 and 10 that represents the replica state of the member.
|
||||
ElectionTime int64 `bson:"electionTime"` // For the current primary, information regarding the election Timestamp from the operation log.
|
||||
ElectionDate string `bson:"electionDate"` // For the current primary, an ISODate formatted date string that reflects the election date
|
||||
Set string `bson:"-"`
|
||||
Optime map[string]Optime `bson:"optimes"` // See Optime struct
|
||||
OptimeDate primitive.DateTime `bson:"optimeDate"` // The last entry from the oplog that this member applied.
|
||||
InfoMessage string `bson:"infoMessage"` // A message
|
||||
ID int64 `bson:"_id"` // Server ID
|
||||
Name string `bson:"name"` // server name
|
||||
Health float64 `bson:"health"` // This field conveys if the member is up (i.e. 1) or down (i.e. 0).
|
||||
StateStr string `bson:"stateStr"` // A string that describes state.
|
||||
Uptime float64 `bson:"uptime"` // number of seconds that this member has been online.
|
||||
ConfigVersion float64 `bson:"configVersion"` // revision # of the replica set configuration object from previous iterations of the configuration.
|
||||
Self bool `bson:"self"` // true if this is the server we are currently connected
|
||||
State float64 `bson:"state"` // integer between 0 and 10 that represents the replica state of the member.
|
||||
ElectionTime primitive.Timestamp `bson:"electionTime"` // For the current primary, information regarding the election Timestamp from the operation log.
|
||||
ElectionDate primitive.DateTime `bson:"electionDate"` // For the current primary, an ISODate formatted date string that reflects the election date
|
||||
Set string `bson:"-"`
|
||||
StorageEngine StorageEngine
|
||||
}
|
||||
|
||||
// Struct for replSetGetStatus
|
||||
type ReplicaSetStatus struct {
|
||||
Date string `bson:"date"` // Current date
|
||||
MyState float64 `bson:"myState"` // Integer between 0 and 10 that represents the replica state of the current member
|
||||
Term float64 `bson:"term"` // The election count for the replica set, as known to this replica set member. Mongo 3.2+
|
||||
HeartbeatIntervalMillis float64 `bson:"heartbeatIntervalMillis"` // The frequency in milliseconds of the heartbeats. 3.2+
|
||||
Members []Members `bson:"members"` //
|
||||
Ok float64 `bson:"ok"` //
|
||||
Set string `bson:"set"` // Replica set name
|
||||
Date primitive.DateTime `bson:"date"` // Current date
|
||||
MyState float64 `bson:"myState"` // Integer between 0 and 10 that represents the replica state of the current member
|
||||
Term float64 `bson:"term"` // The election count for the replica set, as known to this replica set member. Mongo 3.2+
|
||||
HeartbeatIntervalMillis float64 `bson:"heartbeatIntervalMillis"` // The frequency in milliseconds of the heartbeats. 3.2+
|
||||
Members []Members `bson:"members"` //
|
||||
Ok float64 `bson:"ok"` //
|
||||
Set string `bson:"set"` // Replica set name
|
||||
}
|
||||
|
@@ -1,245 +1,172 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/percona/percona-toolkit/src/go/lib/tutil"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
|
||||
"github.com/percona/pmgo"
|
||||
"github.com/percona/pmgo/pmgomock"
|
||||
"gopkg.in/mgo.v2"
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
tu "github.com/percona/percona-toolkit/src/go/internal/testutils"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
// OK
|
||||
func TestGetReplicasetMembers(t *testing.T) {
|
||||
t.Skip("needs fixed")
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
dialer := pmgomock.NewMockDialer(ctrl)
|
||||
|
||||
session := pmgomock.NewMockSessionManager(ctrl)
|
||||
|
||||
mockrss := proto.ReplicaSetStatus{
|
||||
Date: "",
|
||||
MyState: 1,
|
||||
Term: 0,
|
||||
HeartbeatIntervalMillis: 0,
|
||||
Members: []proto.Members{
|
||||
proto.Members{
|
||||
Optime: nil,
|
||||
OptimeDate: "",
|
||||
InfoMessage: "",
|
||||
ID: 0,
|
||||
Name: "localhost:17001",
|
||||
Health: 1,
|
||||
StateStr: "PRIMARY",
|
||||
Uptime: 113287,
|
||||
ConfigVersion: 1,
|
||||
Self: true,
|
||||
State: 1,
|
||||
ElectionTime: 6340960613392449537,
|
||||
ElectionDate: "",
|
||||
Set: ""},
|
||||
proto.Members{
|
||||
Optime: nil,
|
||||
OptimeDate: "",
|
||||
InfoMessage: "",
|
||||
ID: 1,
|
||||
Name: "localhost:17002",
|
||||
Health: 1,
|
||||
StateStr: "SECONDARY",
|
||||
Uptime: 113031,
|
||||
ConfigVersion: 1,
|
||||
Self: false,
|
||||
State: 2,
|
||||
ElectionTime: 0,
|
||||
ElectionDate: "",
|
||||
Set: ""},
|
||||
proto.Members{
|
||||
Optime: nil,
|
||||
OptimeDate: "",
|
||||
InfoMessage: "",
|
||||
ID: 2,
|
||||
Name: "localhost:17003",
|
||||
Health: 1,
|
||||
StateStr: "SECONDARY",
|
||||
Uptime: 113031,
|
||||
ConfigVersion: 1,
|
||||
Self: false,
|
||||
State: 2,
|
||||
ElectionTime: 0,
|
||||
ElectionDate: "",
|
||||
Set: ""}},
|
||||
Ok: 1,
|
||||
Set: "r1",
|
||||
}
|
||||
expect := []proto.Members{
|
||||
proto.Members{
|
||||
Optime: nil,
|
||||
OptimeDate: "",
|
||||
InfoMessage: "",
|
||||
ID: 0,
|
||||
Name: "localhost:17001",
|
||||
Health: 1,
|
||||
StateStr: "PRIMARY",
|
||||
Uptime: 113287,
|
||||
ConfigVersion: 1,
|
||||
Self: true,
|
||||
State: 1,
|
||||
ElectionTime: 6340960613392449537,
|
||||
ElectionDate: "",
|
||||
Set: "r1"},
|
||||
proto.Members{Optime: (*proto.Optime)(nil),
|
||||
OptimeDate: "",
|
||||
InfoMessage: "",
|
||||
ID: 1,
|
||||
Name: "localhost:17002",
|
||||
Health: 1,
|
||||
StateStr: "SECONDARY",
|
||||
Uptime: 113031,
|
||||
ConfigVersion: 1,
|
||||
Self: false,
|
||||
State: 2,
|
||||
ElectionTime: 0,
|
||||
ElectionDate: "",
|
||||
Set: "r1"},
|
||||
proto.Members{Optime: (*proto.Optime)(nil),
|
||||
OptimeDate: "",
|
||||
InfoMessage: "",
|
||||
ID: 2,
|
||||
Name: "localhost:17003",
|
||||
Health: 1,
|
||||
StateStr: "SECONDARY",
|
||||
Uptime: 113031,
|
||||
ConfigVersion: 1,
|
||||
Self: false,
|
||||
State: 2,
|
||||
ElectionTime: 0,
|
||||
ElectionDate: "",
|
||||
Set: "r1",
|
||||
}}
|
||||
|
||||
database := pmgomock.NewMockDatabaseManager(ctrl)
|
||||
ss := proto.ServerStatus{}
|
||||
tutil.LoadJson("test/sample/serverstatus.json", &ss)
|
||||
|
||||
dialer.EXPECT().DialWithInfo(gomock.Any()).Return(session, nil)
|
||||
session.EXPECT().Run(bson.M{"replSetGetStatus": 1}, gomock.Any()).SetArg(1, mockrss)
|
||||
|
||||
dialer.EXPECT().DialWithInfo(gomock.Any()).Return(session, nil)
|
||||
session.EXPECT().DB("admin").Return(database)
|
||||
database.EXPECT().Run(bson.D{{"serverStatus", 1}, {"recordStats", 1}}, gomock.Any()).SetArg(1, ss)
|
||||
session.EXPECT().Close()
|
||||
|
||||
dialer.EXPECT().DialWithInfo(gomock.Any()).Return(session, nil)
|
||||
session.EXPECT().DB("admin").Return(database)
|
||||
database.EXPECT().Run(bson.D{{"serverStatus", 1}, {"recordStats", 1}}, gomock.Any()).SetArg(1, ss)
|
||||
session.EXPECT().Close()
|
||||
|
||||
dialer.EXPECT().DialWithInfo(gomock.Any()).Return(session, nil)
|
||||
session.EXPECT().DB("admin").Return(database)
|
||||
database.EXPECT().Run(bson.D{{"serverStatus", 1}, {"recordStats", 1}}, gomock.Any()).SetArg(1, ss)
|
||||
session.EXPECT().Close()
|
||||
|
||||
di := &pmgo.DialInfo{Addrs: []string{"localhost"}}
|
||||
rss, err := GetReplicasetMembers(dialer, di)
|
||||
if err != nil {
|
||||
t.Errorf("getReplicasetMembers: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(rss, expect) {
|
||||
t.Errorf("getReplicasetMembers:\ngot %#v\nwant: %#v\n", rss, expect)
|
||||
funci TestGetHostnames(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
uri string
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "from_mongos",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBMongosPort),
|
||||
want: []string{"127.0.0.1:17001", "127.0.0.1:17002", "127.0.0.1:17004", "127.0.0.1:17005", "127.0.0.1:17007"},
|
||||
},
|
||||
{
|
||||
name: "from_mongod",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBShard1PrimaryPort),
|
||||
want: []string{"127.0.0.1:17001", "127.0.0.1:17002", "127.0.0.1:17003"},
|
||||
},
|
||||
{
|
||||
name: "from_non_sharded",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBShard3PrimaryPort),
|
||||
want: []string{"127.0.0.1:17021", "127.0.0.1:17022", "127.0.0.1:17023"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
client, err := mongo.NewClient(options.Client().ApplyURI(test.uri))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot get a new MongoDB client: %s", err)
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
err = client.Connect(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot connect to MongoDB: %s", err)
|
||||
}
|
||||
|
||||
hostnames, err := GetHostnames(ctx, client)
|
||||
if err != nil {
|
||||
t.Errorf("getHostnames: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(hostnames, test.want) {
|
||||
t.Errorf("Invalid hostnames from mongos. Got: %+v, want %+v", hostnames, test.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//OK
|
||||
func TestGetHostnames(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
func TestGetServerStatus(t *testing.T) {
|
||||
client, err := mongo.NewClient(options.Client().ApplyURI("mongodb://admin:admin123456@127.0.0.1:17001"))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot get a new MongoDB client: %s", err)
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dialer := pmgomock.NewMockDialer(ctrl)
|
||||
session := pmgomock.NewMockSessionManager(ctrl)
|
||||
|
||||
mockrss := proto.ReplicaSetStatus{
|
||||
Date: "",
|
||||
MyState: 1,
|
||||
Term: 0,
|
||||
HeartbeatIntervalMillis: 0,
|
||||
Members: []proto.Members{
|
||||
proto.Members{
|
||||
Optime: nil,
|
||||
OptimeDate: "",
|
||||
InfoMessage: "",
|
||||
ID: 0,
|
||||
Name: "localhost:17001",
|
||||
Health: 1,
|
||||
StateStr: "PRIMARY",
|
||||
Uptime: 113287,
|
||||
ConfigVersion: 1,
|
||||
Self: true,
|
||||
State: 1,
|
||||
ElectionTime: 6340960613392449537,
|
||||
ElectionDate: "",
|
||||
Set: ""},
|
||||
proto.Members{
|
||||
Optime: nil,
|
||||
OptimeDate: "",
|
||||
InfoMessage: "",
|
||||
ID: 1,
|
||||
Name: "localhost:17002",
|
||||
Health: 1,
|
||||
StateStr: "SECONDARY",
|
||||
Uptime: 113031,
|
||||
ConfigVersion: 1,
|
||||
Self: false,
|
||||
State: 2,
|
||||
ElectionTime: 0,
|
||||
ElectionDate: "",
|
||||
Set: ""},
|
||||
proto.Members{
|
||||
Optime: nil,
|
||||
OptimeDate: "",
|
||||
InfoMessage: "",
|
||||
ID: 2,
|
||||
Name: "localhost:17003",
|
||||
Health: 1,
|
||||
StateStr: "SECONDARY",
|
||||
Uptime: 113031,
|
||||
ConfigVersion: 1,
|
||||
Self: false,
|
||||
State: 2,
|
||||
ElectionTime: 0,
|
||||
ElectionDate: "",
|
||||
Set: ""}},
|
||||
Ok: 1,
|
||||
Set: "r1",
|
||||
err = client.Connect(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot connect to MongoDB: %s", err)
|
||||
}
|
||||
|
||||
dialer.EXPECT().DialWithInfo(gomock.Any()).Return(session, nil)
|
||||
session.EXPECT().SetMode(mgo.Monotonic, true)
|
||||
session.EXPECT().Run(bson.M{"replSetGetStatus": 1}, gomock.Any()).SetArg(1, mockrss)
|
||||
|
||||
expect := []string{"localhost:17001", "localhost:17002", "localhost:17003"}
|
||||
di := &pmgo.DialInfo{Addrs: []string{"localhost"}}
|
||||
rss, err := GetHostnames(dialer, di)
|
||||
_, err = GetServerStatus(ctx, client)
|
||||
if err != nil {
|
||||
t.Errorf("getHostnames: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(rss, expect) {
|
||||
t.Errorf("getHostnames: got %+v, expected: %+v\n", rss, expect)
|
||||
}
|
||||
|
||||
func TestGetReplicasetMembers(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
uri string
|
||||
want int
|
||||
}{
|
||||
{
|
||||
name: "from_mongos",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBMongosPort),
|
||||
want: 7,
|
||||
},
|
||||
{
|
||||
name: "from_mongod",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBShard1PrimaryPort),
|
||||
want: 3,
|
||||
},
|
||||
{
|
||||
name: "from_non_sharded",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBShard3PrimaryPort),
|
||||
want: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
clientOptions := options.Client().ApplyURI(test.uri)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
rsm, err := GetReplicasetMembers(ctx, clientOptions)
|
||||
if err != nil {
|
||||
t.Errorf("Got an error while getting replicaset members: %s", err)
|
||||
}
|
||||
if len(rsm) != test.want {
|
||||
t.Errorf("Invalid number of replicaset members. Want %d, got %d", test.want, len(rsm))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func addToCounters(ss proto.ServerStatus, increment int64) proto.ServerStatus {
|
||||
ss.Opcounters.Command += increment
|
||||
ss.Opcounters.Delete += increment
|
||||
ss.Opcounters.GetMore += increment
|
||||
ss.Opcounters.Insert += increment
|
||||
ss.Opcounters.Query += increment
|
||||
ss.Opcounters.Update += increment
|
||||
return ss
|
||||
func TestGetShardedHosts(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
uri string
|
||||
want int
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
name: "from_mongos",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBMongosPort),
|
||||
want: 2,
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
name: "from_mongod",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBShard1PrimaryPort),
|
||||
want: 0,
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "from_non_sharded",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBShard3PrimaryPort),
|
||||
want: 0,
|
||||
err: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
clientOptions := options.Client().ApplyURI(test.uri)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
client, err := mongo.NewClient(clientOptions)
|
||||
if err != nil {
|
||||
t.Errorf("Cannot get a new client for host %s: %s", test.uri, err)
|
||||
}
|
||||
if err := client.Connect(ctx); err != nil {
|
||||
t.Errorf("Cannot connect to host %s: %s", test.uri, err)
|
||||
}
|
||||
|
||||
rsm, err := GetShardedHosts(ctx, client)
|
||||
if (err != nil) != test.err {
|
||||
t.Errorf("Invalid error response. Want %v, got %v", test.err, (err != nil))
|
||||
}
|
||||
if len(rsm) != test.want {
|
||||
t.Errorf("Invalid number of replicaset members. Want %d, got %d", test.want, len(rsm))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -1,49 +1,71 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
|
||||
"github.com/percona/pmgo"
|
||||
"github.com/pkg/errors"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"gopkg.in/mgo.v2" // TODO: Remove this dependency
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
var (
|
||||
CANNOT_GET_QUERY_ERROR = errors.New("cannot get query field from the profile document (it is not a map)")
|
||||
)
|
||||
|
||||
// TODO: Refactor to official mongo-driver.
|
||||
func GetReplicasetMembers(dialer pmgo.Dialer, di *pmgo.DialInfo) ([]proto.Members, error) {
|
||||
hostnames, err := GetHostnames(dialer, di)
|
||||
func GetReplicasetMembers(ctx context.Context, clientOptions *options.ClientOptions) ([]proto.Members, error) {
|
||||
client, err := mongo.NewClient(clientOptions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot get a new client for GetReplicasetMembers")
|
||||
}
|
||||
if err := client.Connect(ctx); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot connect to MongoDB")
|
||||
}
|
||||
|
||||
hostnames, err := GetHostnames(ctx, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := client.Disconnect(ctx); err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot disconnect from %v", clientOptions.Hosts)
|
||||
}
|
||||
|
||||
membersMap := make(map[string]proto.Members)
|
||||
members := []proto.Members{}
|
||||
|
||||
for _, hostname := range hostnames {
|
||||
session, err := dialer.DialWithInfo(getTmpDI(di, hostname))
|
||||
client, err = GetClientForHost(clientOptions, hostname)
|
||||
if err != nil {
|
||||
continue
|
||||
return nil, errors.Wrapf(err, "cannot get a new client to connect to %s", hostname)
|
||||
}
|
||||
|
||||
if err := client.Connect(ctx); err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot connect to %s", hostname)
|
||||
}
|
||||
defer session.Close()
|
||||
session.SetMode(mgo.Monotonic, true)
|
||||
|
||||
cmdOpts := proto.CommandLineOptions{}
|
||||
session.DB("admin").Run(bson.D{{"getCmdLineOpts", 1}, {"recordStats", 1}}, &cmdOpts)
|
||||
// Not always we can get this info. For examples, we cannot get this for hidden hosts so
|
||||
// if there is an error, just ignore it
|
||||
res := client.Database("admin").RunCommand(ctx, primitive.D{{"getCmdLineOpts", 1}, {"recordStats", 1}})
|
||||
if res.Err() == nil {
|
||||
if err := res.Decode(&cmdOpts); err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot decode getCmdLineOpts response for host %s", hostname)
|
||||
}
|
||||
}
|
||||
|
||||
rss := proto.ReplicaSetStatus{}
|
||||
if err = session.Run(bson.M{"replSetGetStatus": 1}, &rss); err != nil {
|
||||
res = client.Database("admin").RunCommand(ctx, primitive.M{"replSetGetStatus": 1})
|
||||
if res.Err() != nil {
|
||||
m := proto.Members{
|
||||
Name: hostname,
|
||||
}
|
||||
m.StateStr = strings.ToUpper(cmdOpts.Parsed.Sharding.ClusterRole)
|
||||
|
||||
if serverStatus, err := GetServerStatus(dialer, di, m.Name); err == nil {
|
||||
if serverStatus, err := GetServerStatus(ctx, client); err == nil {
|
||||
m.ID = serverStatus.Pid
|
||||
m.StorageEngine = serverStatus.StorageEngine
|
||||
}
|
||||
@@ -51,12 +73,15 @@ func GetReplicasetMembers(dialer pmgo.Dialer, di *pmgo.DialInfo) ([]proto.Member
|
||||
continue // If a host is a mongos we cannot get info but is not a real error
|
||||
}
|
||||
|
||||
if err := res.Decode(&rss); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot decode replSetGetStatus response")
|
||||
}
|
||||
for _, m := range rss.Members {
|
||||
if _, ok := membersMap[m.Name]; ok {
|
||||
continue // already exists
|
||||
}
|
||||
m.Set = rss.Set
|
||||
if serverStatus, err := GetServerStatus(dialer, di, m.Name); err == nil {
|
||||
if serverStatus, err := GetServerStatus(ctx, client); err == nil {
|
||||
m.ID = serverStatus.Pid
|
||||
m.StorageEngine = serverStatus.StorageEngine
|
||||
if cmdOpts.Parsed.Sharding.ClusterRole != "" {
|
||||
@@ -67,7 +92,7 @@ func GetReplicasetMembers(dialer pmgo.Dialer, di *pmgo.DialInfo) ([]proto.Member
|
||||
membersMap[m.Name] = m
|
||||
}
|
||||
|
||||
session.Close()
|
||||
client.Disconnect(ctx)
|
||||
}
|
||||
|
||||
for _, member := range membersMap {
|
||||
@@ -78,41 +103,38 @@ func GetReplicasetMembers(dialer pmgo.Dialer, di *pmgo.DialInfo) ([]proto.Member
|
||||
return members, nil
|
||||
}
|
||||
|
||||
// TODO: Refactor to official mongo-driver.
|
||||
func GetHostnames(dialer pmgo.Dialer, di *pmgo.DialInfo) ([]string, error) {
|
||||
hostnames := []string{di.Addrs[0]}
|
||||
di.Direct = true
|
||||
di.Timeout = 2 * time.Second
|
||||
|
||||
session, err := dialer.DialWithInfo(di)
|
||||
if err != nil {
|
||||
return hostnames, err
|
||||
}
|
||||
session.SetMode(mgo.Monotonic, true)
|
||||
|
||||
func GetHostnames(ctx context.Context, client *mongo.Client) ([]string, error) {
|
||||
// Probably we are connected to an individual member of a replica set
|
||||
rss := proto.ReplicaSetStatus{}
|
||||
if err := session.Run(bson.M{"replSetGetStatus": 1}, &rss); err == nil {
|
||||
res := client.Database("admin").RunCommand(ctx, primitive.M{"replSetGetStatus": 1})
|
||||
if res.Err() == nil {
|
||||
if err := res.Decode(&rss); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot decode replSetGetStatus response for GetHostnames")
|
||||
}
|
||||
return buildHostsListFromReplStatus(rss), nil
|
||||
}
|
||||
|
||||
defer session.Close()
|
||||
|
||||
// Try getShardMap first. If we are connected to a mongos it will return
|
||||
// all hosts, including config hosts
|
||||
var shardsMap proto.ShardsMap
|
||||
err = session.Run("getShardMap", &shardsMap)
|
||||
if err == nil && len(shardsMap.Map) > 0 {
|
||||
smRes := client.Database("admin").RunCommand(ctx, primitive.M{"getShardMap": 1})
|
||||
if smRes.Err() != nil {
|
||||
return nil, errors.Wrap(smRes.Err(), "cannot getShardMap for GetHostnames")
|
||||
}
|
||||
if err := smRes.Decode(&shardsMap); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot decode getShardMap result for GetHostnames")
|
||||
}
|
||||
|
||||
if len(shardsMap.Map) > 0 {
|
||||
// if the only element getShardMap returns is the list of config servers,
|
||||
// it means we are connected to a replicaSet member and getShardMap is not
|
||||
// the answer we want.
|
||||
_, ok := shardsMap.Map["config"]
|
||||
if ok && len(shardsMap.Map) > 1 {
|
||||
if _, ok := shardsMap.Map["config"]; ok {
|
||||
return buildHostsListFromShardMap(shardsMap), nil
|
||||
}
|
||||
}
|
||||
|
||||
return hostnames, nil
|
||||
return nil, fmt.Errorf("cannot get shards map")
|
||||
}
|
||||
|
||||
func buildHostsListFromReplStatus(replStatus proto.ReplicaSetStatus) []string {
|
||||
@@ -138,6 +160,8 @@ func buildHostsListFromReplStatus(replStatus proto.ReplicaSetStatus) []string {
|
||||
for _, member := range replStatus.Members {
|
||||
hostnames = append(hostnames, member.Name)
|
||||
}
|
||||
sort.Strings(hostnames) // to make testing easier
|
||||
|
||||
return hostnames
|
||||
}
|
||||
|
||||
@@ -181,26 +205,24 @@ func buildHostsListFromShardMap(shardsMap proto.ShardsMap) []string {
|
||||
hostnames = append(hostnames, host)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(hostnames)
|
||||
return hostnames
|
||||
}
|
||||
|
||||
// This function is like GetHostnames but it uses listShards instead of getShardMap
|
||||
// GetShardedHosts is like GetHostnames but it uses listShards instead of getShardMap
|
||||
// so it won't include config servers in the returned list
|
||||
// TODO: Refactor to official mongo-driver.
|
||||
func GetShardedHosts(dialer pmgo.Dialer, di *pmgo.DialInfo) ([]string, error) {
|
||||
hostnames := []string{di.Addrs[0]}
|
||||
session, err := dialer.DialWithInfo(di)
|
||||
if err != nil {
|
||||
return hostnames, err
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
func GetShardedHosts(ctx context.Context, client *mongo.Client) ([]string, error) {
|
||||
shardsInfo := &proto.ShardsInfo{}
|
||||
err = session.Run("listShards", shardsInfo)
|
||||
if err != nil {
|
||||
return hostnames, errors.Wrap(err, "cannot list shards")
|
||||
res := client.Database("admin").RunCommand(ctx, primitive.M{"listShards": 1})
|
||||
if res.Err() != nil {
|
||||
return nil, errors.Wrap(res.Err(), "cannot list shards")
|
||||
}
|
||||
if err := res.Decode(&shardsInfo); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot decode listShards response")
|
||||
}
|
||||
|
||||
hostnames := []string{}
|
||||
for _, shardInfo := range shardsInfo.Shards {
|
||||
m := strings.Split(shardInfo.Host, "/")
|
||||
h := strings.Split(m[1], ",")
|
||||
@@ -209,39 +231,27 @@ func GetShardedHosts(dialer pmgo.Dialer, di *pmgo.DialInfo) ([]string, error) {
|
||||
return hostnames, nil
|
||||
}
|
||||
|
||||
func getTmpDI(di *pmgo.DialInfo, hostname string) *pmgo.DialInfo {
|
||||
tmpdi := *di
|
||||
tmpdi.Addrs = []string{hostname}
|
||||
tmpdi.Direct = true
|
||||
tmpdi.Timeout = 2 * time.Second
|
||||
|
||||
return &tmpdi
|
||||
}
|
||||
|
||||
// TODO: Refactor to official mongo-driver.
|
||||
func GetServerStatus(dialer pmgo.Dialer, di *pmgo.DialInfo, hostname string) (proto.ServerStatus, error) {
|
||||
// GetServerStatus returns the server status by running serverStatus and recordStats
|
||||
func GetServerStatus(ctx context.Context, client *mongo.Client) (proto.ServerStatus, error) {
|
||||
ss := proto.ServerStatus{}
|
||||
|
||||
tmpdi := getTmpDI(di, hostname)
|
||||
session, err := dialer.DialWithInfo(tmpdi)
|
||||
if err != nil {
|
||||
return ss, errors.Wrapf(err, "getReplicasetMembers. cannot connect to %s", hostname)
|
||||
}
|
||||
defer session.Close()
|
||||
session.SetMode(mgo.Monotonic, true)
|
||||
|
||||
query := bson.D{
|
||||
query := primitive.D{
|
||||
{Key: "serverStatus", Value: 1},
|
||||
{Key: "recordStats", Value: 1},
|
||||
}
|
||||
if err := session.DB("admin").Run(query, &ss); err != nil {
|
||||
return ss, errors.Wrap(err, "GetHostInfo.serverStatus")
|
||||
res := client.Database("admin").RunCommand(ctx, query)
|
||||
if res.Err() != nil {
|
||||
return ss, errors.Wrap(res.Err(), "GetHostInfo.serverStatus")
|
||||
}
|
||||
|
||||
if err := res.Decode(&ss); err != nil {
|
||||
return ss, errors.Wrap(err, "cannot decode serverStatus/recordStats")
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
func GetQueryField(doc proto.SystemProfile) (bson.M, error) {
|
||||
func GetQueryField(doc proto.SystemProfile) (primitive.M, error) {
|
||||
// Proper way to detect if protocol used is "op_msg" or "op_command"
|
||||
// would be to look at "doc.Protocol" field,
|
||||
// however MongoDB 3.0 doesn't have that field
|
||||
@@ -252,7 +262,7 @@ func GetQueryField(doc proto.SystemProfile) (bson.M, error) {
|
||||
if doc.Op == "update" || doc.Op == "remove" {
|
||||
if squery, ok := query.Map()["q"]; ok {
|
||||
// just an extra check to ensure this type assertion won't fail
|
||||
if ssquery, ok := squery.(bson.M); ok {
|
||||
if ssquery, ok := squery.(primitive.M); ok {
|
||||
return ssquery, nil
|
||||
}
|
||||
return nil, CANNOT_GET_QUERY_ERROR
|
||||
@@ -288,7 +298,7 @@ func GetQueryField(doc proto.SystemProfile) (bson.M, error) {
|
||||
//
|
||||
if squery, ok := query.Map()["query"]; ok {
|
||||
// just an extra check to ensure this type assertion won't fail
|
||||
if ssquery, ok := squery.(bson.M); ok {
|
||||
if ssquery, ok := squery.(primitive.M); ok {
|
||||
return ssquery, nil
|
||||
}
|
||||
return nil, CANNOT_GET_QUERY_ERROR
|
||||
@@ -296,7 +306,7 @@ func GetQueryField(doc proto.SystemProfile) (bson.M, error) {
|
||||
|
||||
// "query" in MongoDB 3.2+ is better structured and always has a "filter" subkey:
|
||||
if squery, ok := query.Map()["filter"]; ok {
|
||||
if ssquery, ok := squery.(bson.M); ok {
|
||||
if ssquery, ok := squery.(primitive.M); ok {
|
||||
return ssquery, nil
|
||||
}
|
||||
return nil, CANNOT_GET_QUERY_ERROR
|
||||
@@ -304,8 +314,16 @@ func GetQueryField(doc proto.SystemProfile) (bson.M, error) {
|
||||
|
||||
// {"ns":"test.system.js","op":"query","query":{"find":"system.js"}}
|
||||
if len(query) == 1 && query[0].Key == "find" {
|
||||
return bson.M{}, nil
|
||||
return primitive.M{}, nil
|
||||
}
|
||||
|
||||
return query.Map(), nil
|
||||
}
|
||||
|
||||
// GetClientForHost returns a new *mongo.Client using a copy of the original connection options where
|
||||
// the host is being replaced by the newHost and the connection is set to be direct to the instance.
|
||||
func GetClientForHost(co *options.ClientOptions, newHost string) (*mongo.Client, error) {
|
||||
newOptions := options.MergeClientOptions(co, &options.ClientOptions{Hosts: []string{newHost}})
|
||||
newOptions.SetDirect(true)
|
||||
return mongo.NewClient(newOptions)
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -20,10 +21,10 @@ import (
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/stats"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/util"
|
||||
"github.com/percona/percona-toolkit/src/go/pt-mongodb-query-digest/filter"
|
||||
"github.com/percona/pmgo"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/mgo.v2"
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -42,7 +43,7 @@ var (
|
||||
Version string = "3.0.1"
|
||||
)
|
||||
|
||||
type options struct {
|
||||
type cliOptions struct {
|
||||
AuthDB string
|
||||
Database string
|
||||
Debug bool
|
||||
@@ -106,35 +107,45 @@ func main() {
|
||||
|
||||
log.Debugf("Command line options:\n%+v\n", opts)
|
||||
|
||||
di := getDialInfo(opts)
|
||||
if di.Database == "" {
|
||||
log.Errorln("must indicate a database as host:[port]/database")
|
||||
clientOptions, err := getClientOptions(opts)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot get a MongoDB client: %s", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
if opts.Database == "" {
|
||||
log.Errorln("must indicate a database to profile with the --database parameter")
|
||||
getopt.PrintUsage(os.Stderr)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
dialer := pmgo.NewDialer()
|
||||
session, err := dialer.DialWithInfo(di)
|
||||
log.Debugf("Dial Info: %+v\n", di)
|
||||
ctx := context.Background()
|
||||
|
||||
log.Debugf("Dial Info: %+v\n", clientOptions)
|
||||
|
||||
client, err := mongo.NewClient(clientOptions)
|
||||
if err != nil {
|
||||
log.Errorf("Error connecting to the db: %s while trying to connect to %s", err, di.Addrs[0])
|
||||
os.Exit(3)
|
||||
log.Fatalf("Cannot create a new MongoDB client: %s", err)
|
||||
}
|
||||
|
||||
isProfilerEnabled, err := isProfilerEnabled(dialer, di)
|
||||
if err := client.Connect(ctx); err != nil {
|
||||
log.Fatalf("Cannot connect to MongoDB: %s", err)
|
||||
}
|
||||
|
||||
isProfilerEnabled, err := isProfilerEnabled(ctx, clientOptions)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot get profiler status: %s", err.Error())
|
||||
os.Exit(4)
|
||||
}
|
||||
|
||||
if !isProfilerEnabled {
|
||||
count, err := systemProfileDocsCount(session, di.Database)
|
||||
count, err := systemProfileDocsCount(ctx, client, opts.Database)
|
||||
if err != nil || count == 0 {
|
||||
log.Error("Profiler is not enabled")
|
||||
os.Exit(5)
|
||||
}
|
||||
fmt.Printf("Profiler is disabled for the %q database but there are %d documents in the system.profile collection.\n",
|
||||
di.Database, count)
|
||||
opts.Database, count)
|
||||
fmt.Println("Using those documents for the stats")
|
||||
}
|
||||
|
||||
@@ -145,15 +156,18 @@ func main() {
|
||||
filters = append(filters, filter.NewFilterByCollection(opts.SkipCollections))
|
||||
}
|
||||
|
||||
i := session.DB(di.Database).C("system.profile").Find(bson.M{}).Sort("-$natural").Iter()
|
||||
cursor, err := client.Database(opts.Database).Collection("system.profile").Find(ctx, primitive.M{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fp := fingerprinter.NewFingerprinter(fingerprinter.DEFAULT_KEY_FILTERS)
|
||||
s := stats.New(fp)
|
||||
prof := profiler.NewProfiler(i, filters, nil, s)
|
||||
prof.Start()
|
||||
prof := profiler.NewProfiler(cursor, filters, nil, s)
|
||||
prof.Start(ctx)
|
||||
queries := <-prof.QueriesChan()
|
||||
|
||||
uptime := uptime(session)
|
||||
uptime := uptime(ctx, client)
|
||||
|
||||
queriesStats := queries.CalcQueriesStats(uptime)
|
||||
sortedQueryStats := sortQueries(queriesStats, opts.OrderBy)
|
||||
@@ -163,7 +177,7 @@ func main() {
|
||||
}
|
||||
|
||||
if len(queries) == 0 {
|
||||
log.Errorf("No queries found in profiler information for database %q\n", di.Database)
|
||||
log.Errorf("No queries found in profiler information for database %q\n", opts.Database)
|
||||
return
|
||||
}
|
||||
rep := report{
|
||||
@@ -236,20 +250,20 @@ func format(val float64, size float64) string {
|
||||
return fmt.Sprintf("%s%s", fval, unit)
|
||||
}
|
||||
|
||||
func uptime(session pmgo.SessionManager) int64 {
|
||||
ss := proto.ServerStatus{}
|
||||
if err := session.Ping(); err != nil {
|
||||
func uptime(ctx context.Context, client *mongo.Client) int64 {
|
||||
res := client.Database("admin").RunCommand(ctx, primitive.D{{"serverStatus", 1}, {"recordStats", 1}})
|
||||
if res.Err() != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
if err := session.DB("admin").Run(bson.D{{"serverStatus", 1}, {"recordStats", 1}}, &ss); err != nil {
|
||||
ss := proto.ServerStatus{}
|
||||
if err := res.Decode(&ss); err != nil {
|
||||
return 0
|
||||
}
|
||||
return ss.Uptime
|
||||
}
|
||||
|
||||
func getOptions() (*options, error) {
|
||||
opts := &options{
|
||||
func getOptions() (*cliOptions, error) {
|
||||
opts := &cliOptions{
|
||||
Host: DEFAULT_HOST,
|
||||
LogLevel: DEFAULT_LOGLEVEL,
|
||||
OrderBy: strings.Split(DEFAULT_ORDERBY, ","),
|
||||
@@ -281,7 +295,7 @@ func getOptions() (*options, error) {
|
||||
gop.StringVarLong(&opts.SSLCAFile, "sslCAFile", 0, "SSL CA cert file used for authentication")
|
||||
gop.StringVarLong(&opts.SSLPEMKeyFile, "sslPEMKeyFile", 0, "SSL client PEM file used for authentication")
|
||||
|
||||
gop.SetParameters("host[:port]/database")
|
||||
gop.SetParameters("host[:port]")
|
||||
|
||||
gop.Parse(os.Args)
|
||||
if gop.NArgs() > 0 {
|
||||
@@ -322,40 +336,29 @@ func getOptions() (*options, error) {
|
||||
opts.Password = string(pass)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(opts.Host, "mongodb://") {
|
||||
opts.Host = "mongodb://" + opts.Host
|
||||
}
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
func getDialInfo(opts *options) *pmgo.DialInfo {
|
||||
di, _ := mgo.ParseURL(opts.Host)
|
||||
di.FailFast = true
|
||||
|
||||
if di.Username == "" {
|
||||
di.Username = opts.User
|
||||
func getClientOptions(opts *cliOptions) (*options.ClientOptions, error) {
|
||||
clientOptions := options.Client().ApplyURI(opts.Host)
|
||||
credential := options.Credential{}
|
||||
if opts.User != "" {
|
||||
credential.Username = opts.User
|
||||
clientOptions.SetAuth(credential)
|
||||
}
|
||||
if di.Password == "" {
|
||||
di.Password = opts.Password
|
||||
if opts.Password != "" {
|
||||
credential.Password = opts.Password
|
||||
credential.PasswordSet = true
|
||||
clientOptions.SetAuth(credential)
|
||||
}
|
||||
if opts.AuthDB != "" {
|
||||
di.Source = opts.AuthDB
|
||||
}
|
||||
if opts.Database != "" {
|
||||
di.Database = opts.Database
|
||||
}
|
||||
|
||||
pmgoDialInfo := pmgo.NewDialInfo(di)
|
||||
|
||||
if opts.SSLCAFile != "" {
|
||||
pmgoDialInfo.SSLCAFile = opts.SSLCAFile
|
||||
}
|
||||
|
||||
if opts.SSLPEMKeyFile != "" {
|
||||
pmgoDialInfo.SSLPEMKeyFile = opts.SSLPEMKeyFile
|
||||
}
|
||||
|
||||
return pmgoDialInfo
|
||||
return clientOptions, nil
|
||||
}
|
||||
|
||||
func getHeaders(opts *options) []string {
|
||||
func getHeaders(opts *cliOptions) []string {
|
||||
h := []string{
|
||||
fmt.Sprintf("%s - %s\n", TOOLNAME, time.Now().Format(time.RFC1123Z)),
|
||||
fmt.Sprintf("Host: %s\n", opts.Host),
|
||||
@@ -522,25 +525,21 @@ func sortQueries(queries []stats.QueryStats, orderby []string) []stats.QueryStat
|
||||
|
||||
}
|
||||
|
||||
func isProfilerEnabled(dialer pmgo.Dialer, di *pmgo.DialInfo) (bool, error) {
|
||||
func isProfilerEnabled(ctx context.Context, clientOptions *options.ClientOptions) (bool, error) {
|
||||
var ps proto.ProfilerStatus
|
||||
replicaMembers, err := util.GetReplicasetMembers(dialer, di)
|
||||
replicaMembers, err := util.GetReplicasetMembers(ctx, clientOptions)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, member := range replicaMembers {
|
||||
// Stand alone instances return state = REPLICA_SET_MEMBER_STARTUP
|
||||
di.Addrs = []string{member.Name}
|
||||
di.Direct = true
|
||||
session, err := dialer.DialWithInfo(di)
|
||||
client, err := util.GetClientForHost(clientOptions, member.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
defer session.Close()
|
||||
session.SetMode(mgo.Monotonic, true)
|
||||
|
||||
isReplicaEnabled := isReplicasetEnabled(session)
|
||||
isReplicaEnabled := isReplicasetEnabled(ctx, client)
|
||||
|
||||
if strings.ToLower(member.StateStr) == "configsvr" {
|
||||
continue
|
||||
@@ -549,7 +548,7 @@ func isProfilerEnabled(dialer pmgo.Dialer, di *pmgo.DialInfo) (bool, error) {
|
||||
if isReplicaEnabled && member.State != proto.REPLICA_SET_MEMBER_PRIMARY {
|
||||
continue
|
||||
}
|
||||
if err := session.DB(di.Database).Run(bson.M{"profile": -1}, &ps); err != nil {
|
||||
if err := client.Database("admin").RunCommand(ctx, primitive.M{"profile": -1}).Decode(&ps); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -560,13 +559,13 @@ func isProfilerEnabled(dialer pmgo.Dialer, di *pmgo.DialInfo) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func systemProfileDocsCount(session pmgo.SessionManager, dbname string) (int, error) {
|
||||
return session.DB(dbname).C("system.profile").Count()
|
||||
func systemProfileDocsCount(ctx context.Context, client *mongo.Client, dbname string) (int64, error) {
|
||||
return client.Database(dbname).Collection("system.profile").CountDocuments(ctx, primitive.M{})
|
||||
}
|
||||
|
||||
func isReplicasetEnabled(session pmgo.SessionManager) bool {
|
||||
func isReplicasetEnabled(ctx context.Context, client *mongo.Client) bool {
|
||||
rss := proto.ReplicaSetStatus{}
|
||||
if err := session.Run(bson.M{"replSetGetStatus": 1}, &rss); err != nil {
|
||||
if err := client.Database("admin").RunCommand(ctx, primitive.M{"replSetGetStatus": 1}).Decode(&rss); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
BIN
src/go/pt-mongodb-query-digest/pt-mongodb-query-digest
Executable file
BIN
src/go/pt-mongodb-query-digest/pt-mongodb-query-digest
Executable file
Binary file not shown.
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html/template"
|
||||
@@ -19,12 +20,12 @@ import (
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/util"
|
||||
"github.com/percona/percona-toolkit/src/go/pt-mongodb-summary/oplog"
|
||||
"github.com/percona/percona-toolkit/src/go/pt-mongodb-summary/templates"
|
||||
"github.com/percona/pmgo"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/shirou/gopsutil/process"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/mgo.v2"
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -89,8 +90,8 @@ type procInfo struct {
|
||||
}
|
||||
|
||||
type security struct {
|
||||
Users int
|
||||
Roles int
|
||||
Users int64
|
||||
Roles int64
|
||||
Auth string
|
||||
SSL string
|
||||
BindIP string
|
||||
@@ -105,9 +106,9 @@ type databases struct {
|
||||
// Empty bool `bson:"empty"`
|
||||
// Shards map[string]int64 `bson:"shards"`
|
||||
} `bson:"databases"`
|
||||
TotalSize int64 `bson:"totalSize"`
|
||||
TotalSizeMb int64 `bson:"totalSizeMb"`
|
||||
OK bool `bson:"ok"`
|
||||
TotalSize int64 `bson:"totalSize"`
|
||||
TotalSizeMb int64 `bson:"totalSizeMb"`
|
||||
OK float64 `bson:"ok"`
|
||||
}
|
||||
|
||||
type clusterwideInfo struct {
|
||||
@@ -124,7 +125,7 @@ type clusterwideInfo struct {
|
||||
Chunks []proto.ChunksByCollection
|
||||
}
|
||||
|
||||
type options struct {
|
||||
type cliOptions struct {
|
||||
Help bool
|
||||
Host string
|
||||
User string
|
||||
@@ -153,7 +154,6 @@ type collectedInfo struct {
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
opts, err := parseFlags()
|
||||
if err != nil {
|
||||
log.Errorf("cannot get parameters: %s", err.Error())
|
||||
@@ -194,48 +194,30 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
di := &pmgo.DialInfo{
|
||||
Username: opts.User,
|
||||
Password: opts.Password,
|
||||
Addrs: []string{opts.Host},
|
||||
FailFast: true,
|
||||
Source: opts.AuthDB,
|
||||
SSLCAFile: opts.SSLCAFile,
|
||||
SSLPEMKeyFile: opts.SSLPEMKeyFile,
|
||||
}
|
||||
|
||||
log.Debugf("Connecting to the db using:\n%+v", di)
|
||||
dialer := pmgo.NewDialer()
|
||||
|
||||
hostnames, err := util.GetHostnames(dialer, di)
|
||||
log.Debugf("hostnames: %v", hostnames)
|
||||
|
||||
session, err := dialer.DialWithInfo(di)
|
||||
ctx := context.Background()
|
||||
clientOptions := getClientOptions(opts)
|
||||
client, err := mongo.NewClient(clientOptions)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Cannot connect to %q", di.Addrs[0])
|
||||
if di.Username != "" || di.Password != "" {
|
||||
message += fmt.Sprintf(" using user: %q", di.Username)
|
||||
if strings.HasPrefix(di.Password, "=") {
|
||||
message += " (probably you are using = with -p or -u instead of a blank space)"
|
||||
}
|
||||
}
|
||||
message += fmt.Sprintf(". %s", err.Error())
|
||||
log.Errorf(message)
|
||||
os.Exit(1)
|
||||
log.Fatalf("Cannot get a MongoDB client: %s", err)
|
||||
}
|
||||
defer session.Close()
|
||||
session.SetMode(mgo.Monotonic, true)
|
||||
if err := client.Connect(ctx); err != nil {
|
||||
log.Fatalf("Cannot connect to MongoDB: %s", err)
|
||||
}
|
||||
defer client.Disconnect(ctx)
|
||||
|
||||
hostnames, err := util.GetHostnames(ctx, client)
|
||||
log.Debugf("hostnames: %v", hostnames)
|
||||
|
||||
ci := &collectedInfo{}
|
||||
|
||||
ci.HostInfo, err = getHostinfo(session)
|
||||
ci.HostInfo, err = getHostInfo(ctx, client)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Cannot get host info for %q: %s", di.Addrs[0], err.Error())
|
||||
message := fmt.Sprintf("Cannot get host info for %q: %s", opts.Host, err.Error())
|
||||
log.Errorf(message)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
if ci.ReplicaMembers, err = util.GetReplicasetMembers(dialer, di); err != nil {
|
||||
if ci.ReplicaMembers, err = util.GetReplicasetMembers(ctx, clientOptions); err != nil {
|
||||
log.Warnf("[Error] cannot get replicaset members: %v\n", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
@@ -243,8 +225,7 @@ func main() {
|
||||
|
||||
if opts.RunningOpsSamples > 0 && opts.RunningOpsInterval > 0 {
|
||||
ci.RunningOps, err = getOpCountersStats(
|
||||
session,
|
||||
opts.RunningOpsSamples,
|
||||
ctx, client, opts.RunningOpsSamples,
|
||||
time.Duration(opts.RunningOpsInterval)*time.Millisecond,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -253,14 +234,14 @@ func main() {
|
||||
}
|
||||
|
||||
if ci.HostInfo != nil {
|
||||
if ci.SecuritySettings, err = getSecuritySettings(session, ci.HostInfo.Version); err != nil {
|
||||
if ci.SecuritySettings, err = getSecuritySettings(ctx, client, ci.HostInfo.Version); err != nil {
|
||||
log.Errorf("[Error] cannot get security settings: %v\n", err)
|
||||
}
|
||||
} else {
|
||||
log.Warn("Cannot check security settings since host info is not available (permissions?)")
|
||||
}
|
||||
|
||||
if ci.OplogInfo, err = oplog.GetOplogInfo(hostnames, di); err != nil {
|
||||
if ci.OplogInfo, err = oplog.GetOplogInfo(ctx, hostnames, clientOptions); err != nil {
|
||||
log.Infof("Cannot get Oplog info: %s\n", err)
|
||||
} else {
|
||||
if len(ci.OplogInfo) == 0 {
|
||||
@@ -272,13 +253,13 @@ func main() {
|
||||
|
||||
// individual servers won't know about this info
|
||||
if ci.HostInfo.NodeType == typeMongos {
|
||||
if ci.ClusterWideInfo, err = getClusterwideInfo(session); err != nil {
|
||||
if ci.ClusterWideInfo, err = getClusterwideInfo(ctx, client); err != nil {
|
||||
log.Printf("[Error] cannot get cluster wide info: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if ci.HostInfo.NodeType == typeMongos {
|
||||
if ci.BalancerStats, err = GetBalancerStats(session); err != nil {
|
||||
if ci.BalancerStats, err = GetBalancerStats(ctx, client); err != nil {
|
||||
log.Printf("[Error] cannot get balancer stats: %v\n", err)
|
||||
}
|
||||
}
|
||||
@@ -306,48 +287,63 @@ func formatResults(ci *collectedInfo, format string) ([]byte, error) {
|
||||
buf = new(bytes.Buffer)
|
||||
|
||||
t := template.Must(template.New("replicas").Parse(templates.Replicas))
|
||||
t.Execute(buf, ci.ReplicaMembers)
|
||||
if err := t.Execute(buf, ci.ReplicaMembers); err != nil {
|
||||
return nil, errors.Wrap(err, "cannnot parse replicas section of the output template")
|
||||
}
|
||||
|
||||
t = template.Must(template.New("hosttemplateData").Parse(templates.HostInfo))
|
||||
t.Execute(buf, ci.HostInfo)
|
||||
if err := t.Execute(buf, ci.HostInfo); err != nil {
|
||||
return nil, errors.Wrap(err, "cannnot parse hosttemplateData section of the output template")
|
||||
}
|
||||
|
||||
t = template.Must(template.New("runningOps").Parse(templates.RunningOps))
|
||||
t.Execute(buf, ci.RunningOps)
|
||||
if err := t.Execute(buf, ci.RunningOps); err != nil {
|
||||
return nil, errors.Wrap(err, "cannnot parse runningOps section of the output template")
|
||||
}
|
||||
|
||||
t = template.Must(template.New("ssl").Parse(templates.Security))
|
||||
t.Execute(buf, ci.SecuritySettings)
|
||||
if err := t.Execute(buf, ci.SecuritySettings); err != nil {
|
||||
return nil, errors.Wrap(err, "cannnot parse ssl section of the output template")
|
||||
}
|
||||
|
||||
if ci.OplogInfo != nil && len(ci.OplogInfo) > 0 {
|
||||
t = template.Must(template.New("oplogInfo").Parse(templates.Oplog))
|
||||
t.Execute(buf, ci.OplogInfo[0])
|
||||
if err := t.Execute(buf, ci.OplogInfo[0]); err != nil {
|
||||
return nil, errors.Wrap(err, "cannnot parse oplogInfo section of the output template")
|
||||
}
|
||||
}
|
||||
|
||||
t = template.Must(template.New("clusterwide").Parse(templates.Clusterwide))
|
||||
t.Execute(buf, ci.ClusterWideInfo)
|
||||
if err := t.Execute(buf, ci.ClusterWideInfo); err != nil {
|
||||
return nil, errors.Wrap(err, "cannnot parse clusterwide section of the output template")
|
||||
}
|
||||
|
||||
t = template.Must(template.New("balancer").Parse(templates.BalancerStats))
|
||||
t.Execute(buf, ci.BalancerStats)
|
||||
if err := t.Execute(buf, ci.BalancerStats); err != nil {
|
||||
return nil, errors.Wrap(err, "cannnot parse balancer section of the output template")
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func getHostinfo(session pmgo.SessionManager) (*hostInfo, error) {
|
||||
|
||||
func getHostInfo(ctx context.Context, client *mongo.Client) (*hostInfo, error) {
|
||||
hi := proto.HostInfo{}
|
||||
if err := session.Run(bson.M{"hostInfo": 1}, &hi); err != nil {
|
||||
log.Debugf("run('hostInfo') error: %s", err.Error())
|
||||
if err := client.Database("admin").RunCommand(ctx, primitive.M{"hostInfo": 1}).Decode(&hi); err != nil {
|
||||
log.Debugf("run('hostInfo') error: %s", err)
|
||||
return nil, errors.Wrap(err, "GetHostInfo.hostInfo")
|
||||
}
|
||||
|
||||
cmdOpts := proto.CommandLineOptions{}
|
||||
err := session.DB("admin").Run(bson.D{{"getCmdLineOpts", 1}, {"recordStats", 1}}, &cmdOpts)
|
||||
query := primitive.D{{Key: "getCmdLineOpts", Value: 1}, {Key: "recordStats", Value: 1}}
|
||||
err := client.Database("admin").RunCommand(ctx, query).Decode(&cmdOpts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot get command line options")
|
||||
}
|
||||
|
||||
ss := proto.ServerStatus{}
|
||||
if err := session.DB("admin").Run(bson.D{{"serverStatus", 1}, {"recordStats", 1}}, &ss); err != nil {
|
||||
query = primitive.D{{Key: "serverStatus", Value: 1}, {Key: "recordStats", Value: 1}}
|
||||
if err := client.Database("admin").RunCommand(ctx, query).Decode(&ss); err != nil {
|
||||
return nil, errors.Wrap(err, "GetHostInfo.serverStatus")
|
||||
}
|
||||
|
||||
@@ -356,7 +352,7 @@ func getHostinfo(session pmgo.SessionManager) (*hostInfo, error) {
|
||||
pi.Error = err
|
||||
}
|
||||
|
||||
nodeType, _ := getNodeType(session)
|
||||
nodeType, _ := getNodeType(ctx, client)
|
||||
procCount, _ := countMongodProcesses()
|
||||
|
||||
i := &hostInfo{
|
||||
@@ -403,10 +399,10 @@ func countMongodProcesses() (int, error) {
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func getClusterwideInfo(session pmgo.SessionManager) (*clusterwideInfo, error) {
|
||||
func getClusterwideInfo(ctx context.Context, client *mongo.Client) (*clusterwideInfo, error) {
|
||||
var databases databases
|
||||
|
||||
err := session.Run(bson.M{"listDatabases": 1}, &databases)
|
||||
err := client.Database("admin").RunCommand(ctx, primitive.M{"listDatabases": 1}).Decode(&databases)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getClusterwideInfo.listDatabases ")
|
||||
}
|
||||
@@ -416,19 +412,24 @@ func getClusterwideInfo(session pmgo.SessionManager) (*clusterwideInfo, error) {
|
||||
}
|
||||
|
||||
for _, db := range databases.Databases {
|
||||
collections, err := session.DB(db.Name).CollectionNames()
|
||||
cursor, err := client.Database(db.Name).ListCollections(ctx, primitive.M{})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
cwi.TotalCollectionsCount += len(collections)
|
||||
for _, collName := range collections {
|
||||
var collStats proto.CollStats
|
||||
err := session.DB(db.Name).Run(bson.M{"collStats": collName}, &collStats)
|
||||
if err != nil {
|
||||
continue
|
||||
for cursor.Next(ctx) {
|
||||
c := proto.CollectionEntry{}
|
||||
if err := cursor.Decode(&c); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot decode ListCollections doc")
|
||||
}
|
||||
|
||||
var collStats proto.CollStats
|
||||
err := client.Database(db.Name).RunCommand(ctx, primitive.M{"collStats": c.Name}).Decode(&collStats)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot get info for collection %s.%s", db.Name, c.Name)
|
||||
}
|
||||
cwi.TotalCollectionsCount++
|
||||
|
||||
if collStats.Sharded {
|
||||
cwi.ShardedDataSize += collStats.Size
|
||||
cwi.ShardedColsCount++
|
||||
@@ -438,14 +439,16 @@ func getClusterwideInfo(session pmgo.SessionManager) (*clusterwideInfo, error) {
|
||||
cwi.UnshardedDataSize += collStats.Size
|
||||
cwi.UnshardedColsCount++
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
cwi.UnshardedColsCount = cwi.TotalCollectionsCount - cwi.ShardedColsCount
|
||||
cwi.ShardedDataSizeScaled, cwi.ShardedDataSizeScale = sizeAndUnit(cwi.ShardedDataSize)
|
||||
cwi.UnshardedDataSizeScaled, cwi.UnshardedDataSizeScale = sizeAndUnit(cwi.UnshardedDataSize)
|
||||
|
||||
cwi.Chunks, _ = getChunksCount(session)
|
||||
cwi.Chunks, err = getChunksCount(ctx, client)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot get chunks information")
|
||||
}
|
||||
|
||||
return cwi, nil
|
||||
}
|
||||
@@ -462,7 +465,7 @@ func sizeAndUnit(size int64) (float64, string) {
|
||||
return newSize, unit[idx]
|
||||
}
|
||||
|
||||
func getSecuritySettings(session pmgo.SessionManager, ver string) (*security, error) {
|
||||
func getSecuritySettings(ctx context.Context, client *mongo.Client, ver string) (*security, error) {
|
||||
s := security{
|
||||
Auth: "disabled",
|
||||
SSL: "disabled",
|
||||
@@ -476,7 +479,7 @@ func getSecuritySettings(session pmgo.SessionManager, ver string) (*security, er
|
||||
}
|
||||
|
||||
cmdOpts := proto.CommandLineOptions{}
|
||||
err = session.DB("admin").Run(bson.D{{"getCmdLineOpts", 1}, {"recordStats", 1}}, &cmdOpts)
|
||||
err = client.Database("admin").RunCommand(ctx, primitive.D{{"getCmdLineOpts", 1}, {"recordStats", 1}}).Decode(&cmdOpts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot get command line options")
|
||||
}
|
||||
@@ -525,17 +528,8 @@ func getSecuritySettings(session pmgo.SessionManager, ver string) (*security, er
|
||||
}
|
||||
}
|
||||
|
||||
// On some servers, like a mongos with config servers, this fails if session mode is Monotonic
|
||||
// On some other servers like a secondary in a replica set, this fails if the session mode is Strong.
|
||||
// Lets try both
|
||||
newSession := session.Clone()
|
||||
defer newSession.Close()
|
||||
|
||||
newSession.SetMode(mgo.Strong, true)
|
||||
|
||||
if s.Users, s.Roles, err = getUserRolesCount(newSession); err != nil {
|
||||
newSession.SetMode(mgo.Monotonic, true)
|
||||
if s.Users, s.Roles, err = getUserRolesCount(newSession); err != nil {
|
||||
if s.Users, s.Roles, err = getUserRolesCount(ctx, client); err != nil {
|
||||
if s.Users, s.Roles, err = getUserRolesCount(ctx, client); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot get security settings.")
|
||||
}
|
||||
}
|
||||
@@ -543,23 +537,22 @@ func getSecuritySettings(session pmgo.SessionManager, ver string) (*security, er
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
func getUserRolesCount(session pmgo.SessionManager) (int, int, error) {
|
||||
users, err := session.DB("admin").C("system.users").Count()
|
||||
func getUserRolesCount(ctx context.Context, client *mongo.Client) (int64, int64, error) {
|
||||
users, err := client.Database("admin").Collection("system.users").CountDocuments(ctx, primitive.M{})
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "cannot get users count")
|
||||
}
|
||||
|
||||
roles, err := session.DB("admin").C("system.roles").Count()
|
||||
roles, err := client.Database("admin").Collection("system.roles").CountDocuments(ctx, primitive.M{})
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "cannot get roles count")
|
||||
}
|
||||
return users, roles, nil
|
||||
}
|
||||
|
||||
func getNodeType(session pmgo.SessionManager) (string, error) {
|
||||
func getNodeType(ctx context.Context, client *mongo.Client) (string, error) {
|
||||
md := proto.MasterDoc{}
|
||||
err := session.Run("isMaster", &md)
|
||||
if err != nil {
|
||||
if err := client.Database("admin").RunCommand(ctx, primitive.M{"isMaster": 1}).Decode(&md); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -573,7 +566,7 @@ func getNodeType(session pmgo.SessionManager) (string, error) {
|
||||
return "mongod", nil
|
||||
}
|
||||
|
||||
func getOpCountersStats(session pmgo.SessionManager, count int, sleep time.Duration) (*opCounters, error) {
|
||||
func getOpCountersStats(ctx context.Context, client *mongo.Client, count int, sleep time.Duration) (*opCounters, error) {
|
||||
oc := &opCounters{}
|
||||
prevOpCount := &opCounters{}
|
||||
ss := proto.ServerStatus{}
|
||||
@@ -585,7 +578,7 @@ func getOpCountersStats(session pmgo.SessionManager, count int, sleep time.Durat
|
||||
// count + 1 because we need 1st reading to stablish a base to measure variation
|
||||
for i := 0; i < count+1; i++ {
|
||||
<-ticker.C
|
||||
err := session.DB("admin").Run(bson.D{{"serverStatus", 1}, {"recordStats", 1}}, &ss)
|
||||
err := client.Database("admin").RunCommand(ctx, primitive.D{{"serverStatus", 1}, {"recordStats", 1}}).Decode(&ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -729,38 +722,8 @@ func getProcInfo(pid int32, templateData *procInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDbsAndCollectionsCount(hostnames []string) (int, int, error) {
|
||||
dbnames := make(map[string]bool)
|
||||
colnames := make(map[string]bool)
|
||||
|
||||
for _, hostname := range hostnames {
|
||||
session, err := mgo.Dial(hostname)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
dbs, err := session.DatabaseNames()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, dbname := range dbs {
|
||||
dbnames[dbname] = true
|
||||
cols, err := session.DB(dbname).CollectionNames()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, colname := range cols {
|
||||
colnames[dbname+"."+colname] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return len(dbnames), len(colnames), nil
|
||||
}
|
||||
|
||||
func GetBalancerStats(session pmgo.SessionManager) (*proto.BalancerStats, error) {
|
||||
|
||||
scs, err := GetShardingChangelogStatus(session)
|
||||
func GetBalancerStats(ctx context.Context, client *mongo.Client) (*proto.BalancerStats, error) {
|
||||
scs, err := GetShardingChangelogStatus(ctx, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -788,16 +751,25 @@ func GetBalancerStats(session pmgo.SessionManager) (*proto.BalancerStats, error)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func GetShardingChangelogStatus(session pmgo.SessionManager) (*proto.ShardingChangelogStats, error) {
|
||||
func GetShardingChangelogStatus(ctx context.Context, client *mongo.Client) (*proto.ShardingChangelogStats, error) {
|
||||
var qresults []proto.ShardingChangelogSummary
|
||||
coll := session.DB("config").C("changelog")
|
||||
match := bson.M{"time": bson.M{"$gt": time.Now().Add(-240 * time.Hour)}}
|
||||
group := bson.M{"_id": bson.M{"event": "$what", "note": "$details.note"}, "count": bson.M{"$sum": 1}}
|
||||
coll := client.Database("config").Collection("changelog")
|
||||
match := primitive.M{"time": primitive.M{"$gt": time.Now().Add(-240 * time.Hour)}}
|
||||
group := primitive.M{"_id": primitive.M{"event": "$what", "note": "$details.note"}, "count": primitive.M{"$sum": 1}}
|
||||
|
||||
err := coll.Pipe([]bson.M{{"$match": match}, {"$group": group}}).All(&qresults)
|
||||
cursor, err := coll.Aggregate(ctx, []primitive.M{{"$match": match}, {"$group": group}})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "GetShardingChangelogStatus.changelog.find")
|
||||
}
|
||||
defer cursor.Close(ctx)
|
||||
|
||||
for cursor.Next(ctx) {
|
||||
res := proto.ShardingChangelogSummary{}
|
||||
if err := cursor.Decode(&res); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot decode GetShardingChangelogStatus")
|
||||
}
|
||||
qresults = append(qresults, res)
|
||||
}
|
||||
|
||||
return &proto.ShardingChangelogStats{
|
||||
Items: &qresults,
|
||||
@@ -863,8 +835,8 @@ func externalIP() (string, error) {
|
||||
return "", errors.New("are you connected to the network?")
|
||||
}
|
||||
|
||||
func parseFlags() (*options, error) {
|
||||
opts := &options{
|
||||
func parseFlags() (*cliOptions, error) {
|
||||
opts := &cliOptions{
|
||||
Host: DefaultHost,
|
||||
LogLevel: DefaultLogLevel,
|
||||
RunningOpsSamples: DefaultRunningOpsSamples,
|
||||
@@ -906,6 +878,7 @@ func parseFlags() (*options, error) {
|
||||
opts.Host = gop.Arg(0)
|
||||
gop.Parse(gop.Args())
|
||||
}
|
||||
|
||||
if gop.IsSet("password") && opts.Password == "" {
|
||||
print("Password: ")
|
||||
pass, err := gopass.GetPasswd()
|
||||
@@ -914,6 +887,9 @@ func parseFlags() (*options, error) {
|
||||
}
|
||||
opts.Password = string(pass)
|
||||
}
|
||||
if !strings.HasPrefix(opts.Host, "mongodb://") {
|
||||
opts.Host = "mongodb://" + opts.Host
|
||||
}
|
||||
if opts.Help {
|
||||
gop.PrintUsage(os.Stdout)
|
||||
return nil, nil
|
||||
@@ -925,16 +901,38 @@ func parseFlags() (*options, error) {
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
func getChunksCount(session pmgo.SessionManager) ([]proto.ChunksByCollection, error) {
|
||||
func getChunksCount(ctx context.Context, client *mongo.Client) ([]proto.ChunksByCollection, error) {
|
||||
var result []proto.ChunksByCollection
|
||||
|
||||
c := session.DB("config").C("chunks")
|
||||
query := bson.M{"$group": bson.M{"_id": "$ns", "count": bson.M{"$sum": 1}}}
|
||||
c := client.Database("config").Collection("chunks")
|
||||
query := primitive.M{"$group": primitive.M{"_id": "$ns", "count": primitive.M{"$sum": 1}}}
|
||||
|
||||
// db.getSiblingDB('config').chunks.aggregate({$group:{_id:"$ns",count:{$sum:1}}})
|
||||
err := c.Pipe([]bson.M{query}).All(&result)
|
||||
cursor, err := c.Aggregate(ctx, []primitive.M{query})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for cursor.Next(ctx) {
|
||||
res := proto.ChunksByCollection{}
|
||||
if err := cursor.Decode(&res); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot decode chunks aggregation")
|
||||
}
|
||||
result = append(result, res)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func getClientOptions(opts *cliOptions) *options.ClientOptions {
|
||||
clientOptions := options.Client().ApplyURI(opts.Host)
|
||||
credential := options.Credential{}
|
||||
if opts.User != "" {
|
||||
credential.Username = opts.User
|
||||
clientOptions.SetAuth(credential)
|
||||
}
|
||||
if opts.Password != "" {
|
||||
credential.Password = opts.Password
|
||||
credential.PasswordSet = true
|
||||
clientOptions.SetAuth(credential)
|
||||
}
|
||||
return clientOptions
|
||||
}
|
||||
|
@@ -1,411 +1,106 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mgo "gopkg.in/mgo.v2"
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
"gopkg.in/mgo.v2/dbtest"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/pborman/getopt"
|
||||
"github.com/percona/percona-toolkit/src/go/lib/tutil"
|
||||
tu "github.com/percona/percona-toolkit/src/go/internal/testutils"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
|
||||
"github.com/percona/pmgo"
|
||||
"github.com/percona/pmgo/pmgomock"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
func TestGetOpCounterStats(t *testing.T) {
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
session := pmgomock.NewMockSessionManager(ctrl)
|
||||
database := pmgomock.NewMockDatabaseManager(ctrl)
|
||||
|
||||
ss := proto.ServerStatus{}
|
||||
if err := tutil.LoadJson("test/sample/serverstatus.json", &ss); err != nil {
|
||||
t.Fatalf("Cannot load sample file: %s", err)
|
||||
}
|
||||
|
||||
session.EXPECT().DB("admin").Return(database)
|
||||
database.EXPECT().Run(bson.D{
|
||||
{Name: "serverStatus", Value: 1},
|
||||
{Name: "recordStats", Value: 1},
|
||||
}, gomock.Any()).SetArg(1, ss)
|
||||
|
||||
session.EXPECT().DB("admin").Return(database)
|
||||
database.EXPECT().Run(bson.D{
|
||||
{Name: "serverStatus", Value: 1},
|
||||
{Name: "recordStats", Value: 1},
|
||||
}, gomock.Any()).SetArg(1, ss)
|
||||
|
||||
session.EXPECT().DB("admin").Return(database)
|
||||
database.EXPECT().Run(bson.D{
|
||||
{Name: "serverStatus", Value: 1},
|
||||
{Name: "recordStats", Value: 1},
|
||||
}, gomock.Any()).SetArg(1, ss)
|
||||
|
||||
session.EXPECT().DB("admin").Return(database)
|
||||
database.EXPECT().Run(bson.D{
|
||||
{Name: "serverStatus", Value: 1},
|
||||
{Name: "recordStats", Value: 1},
|
||||
}, gomock.Any()).SetArg(1, ss)
|
||||
|
||||
session.EXPECT().DB("admin").Return(database)
|
||||
database.EXPECT().Run(bson.D{
|
||||
{Name: "serverStatus", Value: 1},
|
||||
{Name: "recordStats", Value: 1},
|
||||
}, gomock.Any()).SetArg(1, ss)
|
||||
|
||||
ss = addToCounters(ss, 1)
|
||||
session.EXPECT().DB("admin").Return(database)
|
||||
database.EXPECT().Run(bson.D{
|
||||
{Name: "serverStatus", Value: 1}, {Name: "recordStats", Value: 1},
|
||||
}, gomock.Any()).SetArg(1, ss)
|
||||
|
||||
sampleCount := 5
|
||||
sampleRate := 10 * time.Millisecond // in seconds
|
||||
expect := TimedStats{Min: 0, Max: 0, Total: 0, Avg: 0}
|
||||
|
||||
os, err := getOpCountersStats(session, sampleCount, sampleRate)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !reflect.DeepEqual(expect, os.Command) {
|
||||
t.Errorf("getOpCountersStats. got: %+v\nexpect: %+v\n", os.Command, expect)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSecurityOpts(t *testing.T) {
|
||||
cmdopts := []proto.CommandLineOptions{
|
||||
// 1
|
||||
{
|
||||
Parsed: proto.Parsed{
|
||||
Net: proto.Net{
|
||||
SSL: proto.SSL{
|
||||
Mode: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Security: proto.Security{
|
||||
KeyFile: "",
|
||||
Authorization: "",
|
||||
},
|
||||
},
|
||||
// 2
|
||||
{
|
||||
Parsed: proto.Parsed{
|
||||
Net: proto.Net{
|
||||
SSL: proto.SSL{
|
||||
Mode: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Security: proto.Security{
|
||||
KeyFile: "a file",
|
||||
Authorization: "",
|
||||
},
|
||||
},
|
||||
// 3
|
||||
{
|
||||
Parsed: proto.Parsed{
|
||||
Net: proto.Net{
|
||||
SSL: proto.SSL{
|
||||
Mode: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Security: proto.Security{
|
||||
KeyFile: "",
|
||||
Authorization: "something here",
|
||||
},
|
||||
},
|
||||
// 4
|
||||
{
|
||||
Parsed: proto.Parsed{
|
||||
Net: proto.Net{
|
||||
SSL: proto.SSL{
|
||||
Mode: "super secure",
|
||||
},
|
||||
},
|
||||
},
|
||||
Security: proto.Security{
|
||||
KeyFile: "",
|
||||
Authorization: "",
|
||||
},
|
||||
},
|
||||
// 5
|
||||
{
|
||||
Parsed: proto.Parsed{
|
||||
Net: proto.Net{
|
||||
SSL: proto.SSL{
|
||||
Mode: "",
|
||||
},
|
||||
},
|
||||
Security: proto.Security{
|
||||
KeyFile: "/home/plavi/psmdb/percona-server-mongodb-3.4.0-1.0-beta-6320ac4/data/keyfile",
|
||||
},
|
||||
},
|
||||
Security: proto.Security{
|
||||
KeyFile: "",
|
||||
Authorization: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expect := []*security{
|
||||
// 1
|
||||
{
|
||||
Users: 1,
|
||||
Roles: 2,
|
||||
Auth: "disabled",
|
||||
SSL: "disabled",
|
||||
BindIP: "",
|
||||
Port: 0,
|
||||
WarningMsgs: nil,
|
||||
},
|
||||
// 2
|
||||
{
|
||||
Users: 1,
|
||||
Roles: 2,
|
||||
Auth: "enabled",
|
||||
SSL: "disabled",
|
||||
BindIP: "", Port: 0,
|
||||
WarningMsgs: nil,
|
||||
},
|
||||
// 3
|
||||
{
|
||||
Users: 1,
|
||||
Roles: 2,
|
||||
Auth: "enabled",
|
||||
SSL: "disabled",
|
||||
BindIP: "",
|
||||
Port: 0,
|
||||
WarningMsgs: nil,
|
||||
},
|
||||
// 4
|
||||
{
|
||||
Users: 1,
|
||||
Roles: 2,
|
||||
Auth: "disabled",
|
||||
SSL: "super secure",
|
||||
BindIP: "",
|
||||
Port: 0,
|
||||
WarningMsgs: nil,
|
||||
},
|
||||
// 5
|
||||
{
|
||||
Users: 1,
|
||||
Roles: 2,
|
||||
Auth: "enabled",
|
||||
SSL: "disabled",
|
||||
BindIP: "",
|
||||
Port: 0,
|
||||
WarningMsgs: nil,
|
||||
},
|
||||
}
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
session := pmgomock.NewMockSessionManager(ctrl)
|
||||
database := pmgomock.NewMockDatabaseManager(ctrl)
|
||||
|
||||
usersCol := pmgomock.NewMockCollectionManager(ctrl)
|
||||
rolesCol := pmgomock.NewMockCollectionManager(ctrl)
|
||||
|
||||
for i, cmd := range cmdopts {
|
||||
session.EXPECT().DB("admin").Return(database)
|
||||
database.EXPECT().Run(bson.D{
|
||||
{Name: "getCmdLineOpts", Value: 1}, {Name: "recordStats", Value: 1},
|
||||
}, gomock.Any()).SetArg(1, cmd)
|
||||
|
||||
session.EXPECT().Clone().Return(session)
|
||||
session.EXPECT().SetMode(mgo.Strong, true)
|
||||
|
||||
session.EXPECT().DB("admin").Return(database)
|
||||
database.EXPECT().C("system.users").Return(usersCol)
|
||||
usersCol.EXPECT().Count().Return(1, nil)
|
||||
|
||||
session.EXPECT().DB("admin").Return(database)
|
||||
database.EXPECT().C("system.roles").Return(rolesCol)
|
||||
rolesCol.EXPECT().Count().Return(2, nil)
|
||||
session.EXPECT().Close().Return()
|
||||
|
||||
got, err := getSecuritySettings(session, "3.2")
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("cannot get sec settings: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, expect[i]) {
|
||||
t.Errorf("Test # %d,\ngot: %#v\nwant: %#v\n", i+1, got, expect[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNodeType(t *testing.T) {
|
||||
md := []struct {
|
||||
in proto.MasterDoc
|
||||
out string
|
||||
func TestGetHostInfo(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
uri string
|
||||
want []string
|
||||
}{
|
||||
{proto.MasterDoc{SetName: "name"}, "replset"},
|
||||
{proto.MasterDoc{Msg: "isdbgrid"}, "mongos"},
|
||||
{proto.MasterDoc{Msg: "a msg"}, "mongod"},
|
||||
}
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
session := pmgomock.NewMockSessionManager(ctrl)
|
||||
for _, m := range md {
|
||||
session.EXPECT().Run("isMaster", gomock.Any()).SetArg(1, m.in)
|
||||
nodeType, err := getNodeType(session)
|
||||
if err != nil {
|
||||
t.Errorf("cannot get node type: %+v, error: %s\n", m.in, err)
|
||||
}
|
||||
if nodeType != m.out {
|
||||
t.Errorf("invalid node type. got %s, expect: %s\n", nodeType, m.out)
|
||||
}
|
||||
}
|
||||
session.EXPECT().Run("isMaster", gomock.Any()).Return(fmt.Errorf("some fake error"))
|
||||
nodeType, err := getNodeType(session)
|
||||
if err == nil {
|
||||
t.Errorf("error expected, got nil")
|
||||
}
|
||||
if nodeType != "" {
|
||||
t.Errorf("expected blank node type, got %s", nodeType)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestIsPrivateNetwork(t *testing.T) {
|
||||
//privateCIDRs := []string{"10.0.0.0/24", "172.16.0.0/20", "192.168.0.0/16"}
|
||||
testdata :=
|
||||
[]struct {
|
||||
ip string
|
||||
want bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
ip: "127.0.0.1",
|
||||
want: true,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
ip: "10.0.0.1",
|
||||
want: true,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
ip: "10.0.1.1",
|
||||
want: false,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
ip: "172.16.1.2",
|
||||
want: true,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
ip: "192.168.1.2",
|
||||
want: true,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
ip: "8.8.8.8",
|
||||
want: false,
|
||||
err: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, in := range testdata {
|
||||
got, err := isPrivateNetwork(in.ip)
|
||||
if err != in.err {
|
||||
t.Errorf("ip %s. got err: %s, want err: %v", in.ip, err, in.err)
|
||||
}
|
||||
if got != in.want {
|
||||
t.Errorf("ip %s. got: %v, want : %v", in.ip, got, in.want)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetChunks(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
session := pmgomock.NewMockSessionManager(ctrl)
|
||||
database := pmgomock.NewMockDatabaseManager(ctrl)
|
||||
pipe := pmgomock.NewMockPipeManager(ctrl)
|
||||
col := pmgomock.NewMockCollectionManager(ctrl)
|
||||
|
||||
var res []proto.ChunksByCollection
|
||||
if err := tutil.LoadJson("test/sample/chunks.json", &res); err != nil {
|
||||
t.Errorf("Cannot load samples file: %s", err)
|
||||
}
|
||||
|
||||
pipe.EXPECT().All(gomock.Any()).SetArg(0, res)
|
||||
|
||||
col.EXPECT().Pipe(gomock.Any()).Return(pipe)
|
||||
|
||||
database.EXPECT().C("chunks").Return(col)
|
||||
|
||||
session.EXPECT().DB("config").Return(database)
|
||||
|
||||
want := []proto.ChunksByCollection{
|
||||
{ID: "samples.col2", Count: 5},
|
||||
}
|
||||
|
||||
got, err := getChunksCount(session)
|
||||
if err != nil {
|
||||
t.Errorf("Cannot get chunks: %s", err.Error())
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("Invalid getChunksCount response.\ngot: %+v\nwant: %+v\n", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegrationGetChunks(t *testing.T) {
|
||||
var server dbtest.DBServer
|
||||
os.Setenv("CHECK_SESSIONS", "0")
|
||||
tempDir, _ := ioutil.TempDir("", "testing")
|
||||
server.SetPath(tempDir)
|
||||
|
||||
session := pmgo.NewSessionManager(server.Session())
|
||||
if err := session.DB("config").C("chunks").Insert(bson.M{"ns": "samples.col1", "count": 2}); err != nil {
|
||||
t.Errorf("Cannot insert sample data: %s", err)
|
||||
}
|
||||
|
||||
want := []proto.ChunksByCollection{
|
||||
{
|
||||
ID: "samples.col1",
|
||||
Count: 1,
|
||||
name: "from_mongos",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBMongosPort),
|
||||
want: []string{"127.0.0.1:17001", "127.0.0.1:17002", "127.0.0.1:17004", "127.0.0.1:17005", "127.0.0.1:17007"},
|
||||
},
|
||||
{
|
||||
name: "from_mongod",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBShard1PrimaryPort),
|
||||
want: []string{"127.0.0.1:17001", "127.0.0.1:17002", "127.0.0.1:17003"},
|
||||
},
|
||||
{
|
||||
name: "from_non_sharded",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBShard3PrimaryPort),
|
||||
want: []string{"127.0.0.1:17021", "127.0.0.1:17022", "127.0.0.1:17023"},
|
||||
},
|
||||
}
|
||||
got, err := getChunksCount(session)
|
||||
if err != nil {
|
||||
t.Errorf("Error in integration chunks count: %s", err.Error())
|
||||
}
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
client, err := mongo.NewClient(options.Client().ApplyURI(test.uri))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot get a new MongoDB client: %s", err)
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
err = client.Connect(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot connect to MongoDB: %s", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("Invalid integration chunks count.\ngot: %+v\nwant: %+v", got, want)
|
||||
_, err = getHostInfo(ctx, client)
|
||||
if err != nil {
|
||||
t.Errorf("getHostnames: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if err := server.Session().DB("config").DropDatabase(); err != nil {
|
||||
t.Logf("Cannot drop config database (cleanup): %s", err)
|
||||
}
|
||||
server.Session().Close()
|
||||
server.Stop()
|
||||
|
||||
}
|
||||
|
||||
func TestClusterWideInfo(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
uri string
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "from_mongos",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBMongosPort),
|
||||
want: []string{"127.0.0.1:17001", "127.0.0.1:17002", "127.0.0.1:17004", "127.0.0.1:17005", "127.0.0.1:17007"},
|
||||
},
|
||||
{
|
||||
name: "from_mongod",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBShard1PrimaryPort),
|
||||
want: []string{"127.0.0.1:17001", "127.0.0.1:17002", "127.0.0.1:17003"},
|
||||
},
|
||||
{
|
||||
name: "from_non_sharded",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBShard3PrimaryPort),
|
||||
want: []string{"127.0.0.1:17021", "127.0.0.1:17022", "127.0.0.1:17023"},
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
client, err := mongo.NewClient(options.Client().ApplyURI(test.uri))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot get a new MongoDB client: %s", err)
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
err = client.Connect(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot connect to MongoDB: %s", err)
|
||||
}
|
||||
|
||||
_, err = getClusterwideInfo(ctx, client)
|
||||
if err != nil {
|
||||
t.Errorf("getClisterWideInfo error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
func addToCounters(ss proto.ServerStatus, increment int64) proto.ServerStatus {
|
||||
ss.Opcounters.Command += increment
|
||||
ss.Opcounters.Delete += increment
|
||||
@@ -419,11 +114,11 @@ func addToCounters(ss proto.ServerStatus, increment int64) proto.ServerStatus {
|
||||
func TestParseArgs(t *testing.T) {
|
||||
tests := []struct {
|
||||
args []string
|
||||
want *options
|
||||
want *cliOptions
|
||||
}{
|
||||
{
|
||||
args: []string{TOOLNAME}, // arg[0] is the command itself
|
||||
want: &options{
|
||||
want: &cliOptions{
|
||||
Host: DefaultHost,
|
||||
LogLevel: DefaultLogLevel,
|
||||
AuthDB: DefaultAuthDB,
|
||||
@@ -456,5 +151,4 @@ func TestParseArgs(t *testing.T) {
|
||||
}
|
||||
|
||||
os.Stdout = old
|
||||
|
||||
}
|
||||
|
@@ -1,70 +1,66 @@
|
||||
package oplog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
|
||||
"github.com/percona/pmgo"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/util"
|
||||
"github.com/pkg/errors"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
func GetOplogInfo(hostnames []string, di *pmgo.DialInfo) ([]proto.OplogInfo, error) {
|
||||
|
||||
func GetOplogInfo(ctx context.Context, hostnames []string, co *options.ClientOptions) ([]proto.OplogInfo, error) {
|
||||
results := proto.OpLogs{}
|
||||
|
||||
for _, hostname := range hostnames {
|
||||
result := proto.OplogInfo{
|
||||
Hostname: hostname,
|
||||
}
|
||||
di.Addrs = []string{hostname}
|
||||
dialer := pmgo.NewDialer()
|
||||
session, err := dialer.DialWithInfo(di)
|
||||
client, err := util.GetClientForHost(co, hostname)
|
||||
if err != nil {
|
||||
continue
|
||||
return nil, errors.Wrap(err, "cannot get a client (GetOplogInfo)")
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
oplogCol, err := getOplogCollection(session)
|
||||
if err != nil {
|
||||
continue
|
||||
if err := client.Connect(ctx); err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot connect to %s", hostname)
|
||||
}
|
||||
|
||||
olEntry, err := getOplogEntry(session, oplogCol)
|
||||
oplogCol, err := getOplogCollection(ctx, client)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getOplogInfo -> GetOplogEntry")
|
||||
return nil, errors.Wrap(err, "cannot determine the oplog collection")
|
||||
}
|
||||
result.Size = olEntry.Options.Size / (1024 * 1024)
|
||||
|
||||
var colStats proto.OplogColStats
|
||||
err = session.DB("local").Run(bson.M{"collStats": oplogCol}, &colStats)
|
||||
err = client.Database("local").RunCommand(ctx, bson.M{"collStats": oplogCol}).Decode(&colStats)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot get collStats for collection %s", oplogCol)
|
||||
}
|
||||
|
||||
result.Size = colStats.Size
|
||||
result.UsedMB = colStats.Size / (1024 * 1024)
|
||||
|
||||
var firstRow, lastRow proto.OplogRow
|
||||
err = session.DB("local").C(oplogCol).Find(nil).Sort("$natural").One(&firstRow)
|
||||
options := options.FindOne()
|
||||
options.SetSort(bson.M{"$natural": 1})
|
||||
err = client.Database("local").Collection(oplogCol).FindOne(ctx, bson.M{}, options).Decode(&firstRow)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot read first oplog row")
|
||||
}
|
||||
|
||||
err = session.DB("local").C(oplogCol).Find(nil).Sort("-$natural").One(&lastRow)
|
||||
options.SetSort(bson.M{"$natural": -1})
|
||||
err = client.Database("local").Collection(oplogCol).FindOne(ctx, bson.M{}, options).Decode(&lastRow)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot read last oplog row")
|
||||
}
|
||||
|
||||
// https://docs.mongodb.com/manual/reference/bson-types/#timestamps
|
||||
tfirst := firstRow.Timestamp >> 32
|
||||
tlast := lastRow.Timestamp >> 32
|
||||
result.TimeDiff = tlast - tfirst
|
||||
result.TimeDiffHours = float64(result.TimeDiff) / 3600
|
||||
|
||||
result.TFirst = time.Unix(tfirst, 0)
|
||||
result.TLast = time.Unix(tlast, 0)
|
||||
result.TFirst = time.Unix(int64(firstRow.Timestamp.T), int64(firstRow.Timestamp.I))
|
||||
result.TLast = time.Unix(int64(lastRow.Timestamp.T), int64(lastRow.Timestamp.I))
|
||||
result.TimeDiff = result.TLast.Sub(result.TFirst)
|
||||
result.TimeDiffHours = result.TimeDiff.Hours()
|
||||
result.Now = time.Now().UTC()
|
||||
if result.TimeDiffHours > 24 {
|
||||
result.Running = fmt.Sprintf("%0.2f days", result.TimeDiffHours/24)
|
||||
@@ -73,14 +69,14 @@ func GetOplogInfo(hostnames []string, di *pmgo.DialInfo) ([]proto.OplogInfo, err
|
||||
}
|
||||
|
||||
replSetStatus := proto.ReplicaSetStatus{}
|
||||
err = session.Run(bson.M{"replSetGetStatus": 1}, &replSetStatus)
|
||||
err = client.Database("admin").RunCommand(ctx, bson.M{"replSetGetStatus": 1}).Decode(&replSetStatus)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, member := range replSetStatus.Members {
|
||||
if member.State == 1 {
|
||||
result.ElectionTime = time.Unix(member.ElectionTime>>32, 0)
|
||||
result.ElectionTime = time.Unix(int64(member.ElectionTime.T), 0)
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -92,29 +88,32 @@ func GetOplogInfo(hostnames []string, di *pmgo.DialInfo) ([]proto.OplogInfo, err
|
||||
|
||||
}
|
||||
|
||||
func getOplogCollection(session pmgo.SessionManager) (string, error) {
|
||||
func getOplogCollection(ctx context.Context, client *mongo.Client) (string, error) {
|
||||
oplog := "oplog.rs"
|
||||
|
||||
db := session.DB("local")
|
||||
nsCol := db.C("system.namespaces")
|
||||
filter := bson.M{"name": bson.M{"$eq": oplog}}
|
||||
cursor, err := client.Database("local").ListCollections(ctx, filter)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "cannot getOplogCollection")
|
||||
}
|
||||
|
||||
var res interface{}
|
||||
if err := nsCol.Find(bson.M{"name": "local." + oplog}).One(&res); err == nil {
|
||||
defer cursor.Close(ctx)
|
||||
for cursor.Next(ctx) {
|
||||
n := bson.M{}
|
||||
if err := cursor.Decode(&n); err != nil {
|
||||
continue
|
||||
}
|
||||
return oplog, nil
|
||||
}
|
||||
|
||||
oplog = "oplog.$main"
|
||||
if err := nsCol.Find(bson.M{"name": "local." + oplog}).One(&res); err != nil {
|
||||
return "", fmt.Errorf("neither master/slave nor replica set replication detected")
|
||||
}
|
||||
|
||||
return oplog, nil
|
||||
return "", fmt.Errorf("cannot find the oplog collection")
|
||||
}
|
||||
|
||||
func getOplogEntry(session pmgo.SessionManager, oplogCol string) (*proto.OplogEntry, error) {
|
||||
func getOplogEntry(ctx context.Context, client *mongo.Client, oplogCol string) (*proto.OplogEntry, error) {
|
||||
olEntry := &proto.OplogEntry{}
|
||||
|
||||
err := session.DB("local").C("system.namespaces").Find(bson.M{"name": "local." + oplogCol}).One(&olEntry)
|
||||
err := client.Database("local").Collection("system.namespaces").
|
||||
FindOne(ctx, bson.M{"name": "local." + oplogCol}).Decode(&olEntry)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("local.%s, or its options, not found in system.namespaces collection", oplogCol)
|
||||
}
|
||||
|
@@ -1 +1,107 @@
|
||||
package oplog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
tu "github.com/percona/percona-toolkit/src/go/internal/testutils"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
func TestGetOplogCollection(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
uri string
|
||||
want string
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
name: "from_mongos",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBMongosPort),
|
||||
want: "",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "from_mongod",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBShard1PrimaryPort),
|
||||
want: "oplog.rs",
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
name: "from_non_sharded",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBShard3PrimaryPort),
|
||||
want: "oplog.rs",
|
||||
err: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
client, err := mongo.NewClient(options.Client().ApplyURI(test.uri))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot get a new MongoDB client: %s", err)
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
err = client.Connect(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot connect to MongoDB: %s", err)
|
||||
}
|
||||
|
||||
oplogCol, err := getOplogCollection(ctx, client)
|
||||
if (err != nil) != test.err {
|
||||
t.Errorf("Expected error=%v, got %v", test.err, err)
|
||||
}
|
||||
if oplogCol != test.want {
|
||||
t.Errorf("Want oplog collection to be %q, got %q", test.want, oplogCol)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetOplogInfo(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
uri string
|
||||
wantHost bool
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
name: "from_mongos",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBMongosPort),
|
||||
wantHost: false,
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "from_mongod",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBShard1PrimaryPort),
|
||||
wantHost: true,
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
name: "from_non_sharded",
|
||||
uri: fmt.Sprintf("mongodb://%s:%s@%s:%s", tu.MongoDBUser, tu.MongoDBPassword, tu.MongoDBHost, tu.MongoDBShard3PrimaryPort),
|
||||
wantHost: true,
|
||||
err: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
clientOptions := options.Client().ApplyURI(test.uri)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
oplogInfo, err := GetOplogInfo(ctx, clientOptions.Hosts, clientOptions)
|
||||
if (err != nil) != test.err {
|
||||
t.Errorf("Expected error=%v, got %v", test.err, err)
|
||||
}
|
||||
if test.wantHost && (len(oplogInfo) == 0 || oplogInfo[0].Hostname == "") {
|
||||
t.Error("Expected structure with data. Hostname is empty")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -1,9 +1,11 @@
|
||||
package templates
|
||||
|
||||
const BalancerStats = `
|
||||
{{ if . -}}
|
||||
# Balancer (per day)
|
||||
Success: {{.Success}}
|
||||
Failed: {{.Failed}}
|
||||
Splits: {{.Splits}}
|
||||
Drops: {{.Drops}}
|
||||
{{- end -}}
|
||||
`
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package templates
|
||||
|
||||
const Clusterwide = `
|
||||
{{ if . -}}
|
||||
# Cluster wide ###########################################################################################
|
||||
Databases: {{.TotalDBsCount}}
|
||||
Collections: {{.TotalCollectionsCount}}
|
||||
@@ -16,4 +17,5 @@ Unsharded Collections: {{.UnshardedColsCount}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
`
|
||||
|
@@ -19,7 +19,7 @@ const HostInfo = `# This host
|
||||
{{- end }}
|
||||
Processes | {{.ProcProcessCount}}
|
||||
Process Type | {{.NodeType}}
|
||||
{{ if .ReplicaSetName -}}
|
||||
{{ if .ReplicasetName -}}
|
||||
ReplSet | {{.ReplicasetName}}
|
||||
Repl Status |
|
||||
{{- end -}}
|
||||
|
@@ -2,7 +2,6 @@ package templates
|
||||
|
||||
const RunningOps = `
|
||||
# Running Ops ############################################################################################
|
||||
|
||||
Type Min Max Avg
|
||||
Insert {{printf "% 8d" .Insert.Min}} {{printf "% 8d" .Insert.Max}} {{printf "% 8d" .Insert.Avg}}/{{.SampleRate}}
|
||||
Query {{printf "% 8d" .Query.Min}} {{printf "% 8d" .Query.Max}} {{printf "% 8d" .Query.Avg}}/{{.SampleRate}}
|
||||
|
32
src/go/setenv.sh
Executable file
32
src/go/setenv.sh
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
export DEBUG=1
|
||||
export GOLANG_DOCKERHUB_TAG=1.10-stretch
|
||||
export AWS_REGION=us-west-2
|
||||
#export TEST_MONGODB_FLAVOR=percona/percona-server-mongodb
|
||||
export TEST_MONGODB_FLAVOR=mongo
|
||||
export TEST_PSMDB_VERSION=4.0
|
||||
export TEST_MONGODB_ADMIN_USERNAME=admin
|
||||
export TEST_MONGODB_ADMIN_PASSWORD=admin123456
|
||||
export TEST_MONGODB_USERNAME=test
|
||||
export TEST_MONGODB_PASSWORD=123456
|
||||
export TEST_MONGODB_STANDALONE_PORT=27017
|
||||
export TEST_MONGODB_MONGOS_PORT=17000
|
||||
export TEST_MONGODB_S1_RS=rs1
|
||||
export TEST_MONGODB_S1_PRIMARY_PORT=17001
|
||||
export TEST_MONGODB_S1_SECONDARY1_PORT=17002
|
||||
export TEST_MONGODB_S1_SECONDARY2_PORT=17003
|
||||
export TEST_MONGODB_S2_RS=rs2
|
||||
export TEST_MONGODB_S2_PRIMARY_PORT=17004
|
||||
export TEST_MONGODB_S2_SECONDARY1_PORT=17005
|
||||
export TEST_MONGODB_S2_SECONDARY2_PORT=17006
|
||||
export TEST_MONGODB_CONFIGSVR_RS=csReplSet
|
||||
export TEST_MONGODB_CONFIGSVR1_PORT=17007
|
||||
export TEST_MONGODB_CONFIGSVR2_PORT=17008
|
||||
export TEST_MONGODB_CONFIGSVR3_PORT=17009
|
||||
export TEST_MONGODB_S3_RS=rs3
|
||||
export TEST_MONGODB_S3_PRIMARY_PORT=17021
|
||||
export TEST_MONGODB_S3_SECONDARY1_PORT=17022
|
||||
export TEST_MONGODB_S3_SECONDARY2_PORT=17023
|
||||
export MINIO_ENDPOINT=http://localhost:9000/
|
||||
export MINIO_ACCESS_KEY_ID=example00000
|
||||
export MINIO_SECRET_ACCESS_KEY=secret00000
|
Reference in New Issue
Block a user