Merge pull request #413 from percona/release-3.1

Release 3.1
This commit is contained in:
Carlos Salguero
2019-09-13 11:17:01 -03:00
committed by GitHub
91 changed files with 2518 additions and 204 deletions

2
.gitignore vendored
View File

@@ -10,6 +10,8 @@ snapshot
build
Makefile.old
bin/pt-mongodb-*
bin/pt-secure-*
bin/pt-pg-*
!src/go/pt-mongodb-query-digest/vendor/vendor.json
!src/go/pt-mongodb-summary/vendor/vendor.json
src/go/pt-mongodb-query-digest/vendor/

View File

@@ -1,6 +1,23 @@
Changelog for Percona Toolkit
* Fixed bug PT-1114: pt-table-checksum fails when table is empty
* Fixed bug PT-1344: pt-online-schema-change: Use of uninitialized value $host in string
* Fixed bug PT-1575: pt-mysql-summary does not print PXC section for PXC 5.6 and 5.7
* Fixed bug PT-1630: pt-table-checksum not working with galera cluster anymore since 3.0.11
* Fixed bug PT-1633: Fix incorrect parsing of a variable with number + K,M,G,T (Thanks Dieter Adriaenssens)
* Fixed bug PT-1633: pt-config-diff doesn't handle innodb_temp_data_file_path correctly
* Improvement PT-1663: Implement retention by bytes for pt-stalk
* Fixed bug PT-1670: pt-table-checksum fails when table is empty
* New-tool PT-1696: pt-postgresql-summary
* Improvement PT-1705: Make pt-online-schema-change exit with different codes depending on the status
* Fixed bug PT-1709: Error: Use of uninitialized value in concatenation (.) or string at pt-upgrade
* Fixed bug PT-1715: pt-upgrade documentation doesn't have the type tcpdump
* Fixed bug PT-1720: pt-pmp parses configuration files that lead to errors
* Fixed bug PT-1728: Pt-table-checksum failing to scan small tables that get wiped out often
* Fixed bug PT-1734: Tailing log_error in pt-stalk doesn't work
* Improvement PT-1741: Migration to new MongoDB driver
* Improvement PT-1746: diskstats not working for kernel 4.18+
* Improvement PT-1761: Prevent pt-osc to run under MySQL 8.0.14 ~ 8.0.17
v3.0.13 released 2018-12-28

90
Gopkg.lock generated
View File

@@ -9,6 +9,14 @@
revision = "c7af12943936e8c39859482e61f0574c2fd7fc75"
version = "v1.4.2"
[[projects]]
digest = "1:c39fbf3b3e138accc03357c72417c0153c54cc1ae8c9f40e8f120a550d876a76"
name = "github.com/Percona-Lab/pt-pg-summary"
packages = ["models"]
pruneopts = ""
revision = "f06beea959eb00acfe44ce39342c27582ad84caa"
version = "v0.1.9"
[[projects]]
digest = "1:f82b8ac36058904227087141017bb82f4b0fc58272990a4cdae3e2d6d222644e"
name = "github.com/StackExchange/wmi"
@@ -81,11 +89,11 @@
[[projects]]
branch = "master"
digest = "1:6a6322a15aa8e99bd156fbba0aae4e5d67b4bb05251d860b348a45dfdcba9cce"
digest = "1:c823f556d11763bb805a60a77226ae321843c9bd1454ee5b808ca169ac515762"
name = "github.com/golang/snappy"
packages = ["."]
pruneopts = ""
revision = "2a8bb927dd31d8daada140a5d09578521ce5c36a"
revision = "ff6b7dc882cf4cfba7ee0b9f7dcc1ac096c554aa"
[[projects]]
branch = "master"
@@ -128,12 +136,24 @@
version = "v0.1.0"
[[projects]]
digest = "1:0093a7c66d5b9e0cdaf4be5c20e0a9b889d1d839148eeed1d587e99b4cfd90ff"
digest = "1:f4216047c24ab66fb757045febd7dac4edc6f4ad9f6c0063d0755d654d04f25e"
name = "github.com/lib/pq"
packages = [
".",
"oid",
"scram",
]
pruneopts = ""
revision = "3427c32cb71afc948325f299f040e53c1dd78979"
version = "v1.2.0"
[[projects]]
digest = "1:d5d962b7a8d95b9e6226f6b831a7e50237acf1730dcace6e8cb87c6dd628ef54"
name = "github.com/mattn/go-shellwords"
packages = ["."]
pruneopts = ""
revision = "a72fbe27a1b0ed0df2f02754945044ce1456608b"
version = "v1.0.5"
revision = "36a9b3c57cb5caa559ff63fb7e9b585f1c00df75"
version = "v1.0.6"
[[projects]]
digest = "1:a067513044dc491395a58f56f39cedddb5ad35789b832b570c283a64d712f81b"
@@ -153,19 +173,11 @@
[[projects]]
branch = "master"
digest = "1:1adb91baf59317a1614c3b465b2734066c256d1dca99d5526baa43274542a737"
digest = "1:457024f04029bb321d759cc3b2c46b7e0e43572e3a663ce7d006aeb41efa2b17"
name = "github.com/percona/go-mysql"
packages = ["query"]
pruneopts = ""
revision = "c5d0b4a3add9c9bf5bb26b2ab823289e395a3f98"
[[projects]]
digest = "1:16b4510ba61ab0bb7a4e694ea6396a7b2879f5fabb21e93066e182691f790173"
name = "github.com/percona/pmgo"
packages = ["."]
pruneopts = ""
revision = "497d06e28f910fbe26d5d60f59d36284a6901c6f"
version = "0.5.2"
revision = "197f4ad8db8d1b04ff408042119176907c971f0a"
[[projects]]
digest = "1:1d7e1867c49a6dd9856598ef7c3123604ea3daabf5b83f303ff457bcbc410b1d"
@@ -184,7 +196,7 @@
version = "v1.2.0"
[[projects]]
digest = "1:2226ffdae873216a5bc8a0bab7a51ac670b27a4aed852007d77600f809aa04e3"
digest = "1:55dcddb2ba6ab25098ee6b96f176f39305f1fde7ea3d138e7e10bb64a5bf45be"
name = "github.com/shirou/gopsutil"
packages = [
"cpu",
@@ -194,8 +206,8 @@
"process",
]
pruneopts = ""
revision = "d80c43f9c984a48783daf22f4bd9278006ae483a"
version = "v2.19.7"
revision = "e4ec7b275ada47ca32799106c2dba142d96aaf93"
version = "v2.19.8"
[[projects]]
branch = "master"
@@ -270,22 +282,14 @@
[[projects]]
branch = "master"
digest = "1:086760278d762dbb0e9a26e09b57f04c89178c86467d8d94fae47d64c222f328"
digest = "1:2f8d339c3b89d5abf9a78aafe1e9fbe548f3b1fb9be5c3117036940904d39527"
name = "golang.org/x/crypto"
packages = [
"pbkdf2",
"ssh/terminal",
]
pruneopts = ""
revision = "4def268fd1a49955bfb3dda92fe3db4f924f2285"
[[projects]]
branch = "master"
digest = "1:955694a7c42527d7fb188505a22f10b3e158c6c2cf31fe64b1e62c9ab7b18401"
name = "golang.org/x/net"
packages = ["context"]
pruneopts = ""
revision = "ca1201d0de80cfde86cb01aea620983605dfe99b"
revision = "71b5226ff73902d121cd9dbbdfdb67045a805845"
[[projects]]
branch = "master"
@@ -297,14 +301,14 @@
[[projects]]
branch = "master"
digest = "1:0b5c2207c72f2d13995040f176feb6e3f453d6b01af2b9d57df76b05ded2e926"
digest = "1:ffaa20332022643a821848aa2322787bbfbf06bceb4b4e84cde3b05d07fa51ac"
name = "golang.org/x/sys"
packages = [
"unix",
"windows",
]
pruneopts = ""
revision = "51ab0e2deafac1f46c46ad59cf0921be2f180c3d"
revision = "749cb33beabd9aa6d3178e3de05bcc914f70b2bf"
[[projects]]
digest = "1:740b51a55815493a8d0f2b1e0d0ae48fe48953bf7eaf3fcc4198823bf67768c0"
@@ -321,45 +325,23 @@
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
version = "v0.3.2"
[[projects]]
branch = "v2"
digest = "1:f54ba71a035aac92ced3e902d2bff3734a15d1891daff73ec0f90ef236750139"
name = "gopkg.in/mgo.v2"
packages = [
".",
"bson",
"dbtest",
"internal/json",
"internal/sasl",
"internal/scram",
]
pruneopts = ""
revision = "9856a29383ce1c59f308dd1cf0363a79b5bef6b5"
[[projects]]
branch = "v2"
digest = "1:61a650a53e5e865a91ae9581f02990a4b6e3afcb8d280f19b1e67a3c284944e6"
name = "gopkg.in/tomb.v2"
packages = ["."]
pruneopts = ""
revision = "d5d1b5820637886def9eef33e03a27a9f166942c"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/Masterminds/semver",
"github.com/Percona-Lab/pt-pg-summary/models",
"github.com/alecthomas/kingpin",
"github.com/go-ini/ini",
"github.com/golang/mock/gomock",
"github.com/hashicorp/go-version",
"github.com/howeyc/gopass",
"github.com/kr/pretty",
"github.com/lib/pq",
"github.com/mattn/go-shellwords",
"github.com/montanaflynn/stats",
"github.com/pborman/getopt",
"github.com/percona/go-mysql/query",
"github.com/percona/pmgo",
"github.com/pkg/errors",
"github.com/satori/go.uuid",
"github.com/shirou/gopsutil/process",

View File

@@ -44,10 +44,6 @@
branch = "master"
name = "github.com/pborman/getopt"
[[constraint]]
name = "github.com/percona/pmgo"
version = "0.5.1"
[[constraint]]
name = "github.com/pkg/errors"
version = "0.8.0"
@@ -63,9 +59,5 @@
[[constraint]]
name = "github.com/sirupsen/logrus"
[[constraint]]
branch = "v2"
name = "gopkg.in/mgo.v2"
[[constraint]]
name = "go.mongodb.org/mongo-driver"

View File

@@ -2,7 +2,7 @@ use ExtUtils::MakeMaker;
WriteMakefile(
NAME => 'percona-toolkit',
VERSION => '3.0.13',
VERSION => '3.1.0',
EXE_FILES => [ <bin/*> ],
MAN1PODS => {
'docs/percona-toolkit.pod' => 'blib/man1/percona-toolkit.1p',

View File

@@ -1359,6 +1359,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-align 3.0.13
pt-align 3.1.0
=cut

View File

@@ -45,7 +45,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -8607,6 +8607,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-archiver 3.0.13
pt-archiver 3.1.0
=cut

View File

@@ -43,7 +43,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -5890,6 +5890,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-config-diff 3.0.13
pt-config-diff 3.1.0
=cut

View File

@@ -42,7 +42,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -5680,6 +5680,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-deadlock-logger 3.0.13
pt-deadlock-logger 3.1.0
=cut

View File

@@ -38,7 +38,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -5677,6 +5677,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-diskstats 3.0.13
pt-diskstats 3.1.0
=cut

View File

@@ -39,7 +39,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -5743,6 +5743,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-duplicate-key-checker 3.0.13
pt-duplicate-key-checker 3.1.0
=cut

View File

@@ -1648,6 +1648,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-fifo-split 3.0.13
pt-fifo-split 3.1.0
=cut

View File

@@ -35,7 +35,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -5104,6 +5104,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-find 3.0.13
pt-find 3.1.0
=cut

View File

@@ -2239,6 +2239,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-fingerprint 3.0.13
pt-fingerprint 3.1.0
=cut

View File

@@ -37,7 +37,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -4666,6 +4666,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-fk-error-logger 3.0.13
pt-fk-error-logger 3.1.0
=cut

View File

@@ -44,7 +44,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -7346,6 +7346,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-heartbeat 3.0.13
pt-heartbeat 3.1.0
=cut

View File

@@ -45,7 +45,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -7673,6 +7673,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-index-usage 3.0.13
pt-index-usage 3.1.0
=cut

View File

@@ -1127,7 +1127,7 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-ioprofile 3.0.13
pt-ioprofile 3.1.0
=cut

View File

@@ -47,7 +47,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -8514,6 +8514,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-kill 3.0.13
pt-kill 3.1.0
=cut

View File

@@ -804,7 +804,7 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-mext 3.0.13
pt-mext 3.1.0
=cut

View File

@@ -3289,7 +3289,7 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-mysql-summary 3.0.13
pt-mysql-summary 3.1.0
=cut

View File

@@ -56,7 +56,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -8323,8 +8323,6 @@ use constant {
sub _die {
my ($msg, $exit_status) = @_;
warn "msg: $msg",;
warn "exit_status: $exit_status";
$exit_status ||= 255;
chomp ($msg);
print "$msg\n";
@@ -8943,6 +8941,22 @@ sub main {
# Get child tables of the original table, if necessary.
# ########################################################################
my $child_tables;
my $have_child_tables = find_child_tables(
tbl => $orig_tbl,
Cxn => $cxn,
Quoter => $q,
);
my $vp = VersionParser->new($cxn->dbh());
if ($vp->cmp('8.0.14') > -1 && $vp->flavor() !~ m/maria/i) {
my $msg = "There is an error in MySQL that makes the server to die when trying to ".
"rename a table with FKs. See https://bugs.mysql.com/bug.php?id=96145\n".
"Since pt-online-schema change needs to rename the old <-> new tables as the final " .
"step, and the requested table has FKs, it cannot be executed under the current MySQL version";
_die($msg, NO_MINIMUM_REQUIREMENTS);
}
if ( ($alter_fk_method || '') eq 'none' ) {
print "Not updating foreign keys because "
. "--alter-foreign-keys-method=none. Foreign keys "
@@ -9858,7 +9872,6 @@ sub main {
}
}
my $vp = VersionParser->new($cxn->dbh());
if ($vp->cmp('8.0') > -1 && $vp->flavor() !~ m/maria/i && $alter_fk_method eq 'drop_swap') {
my $msg = "--alter-foreign-keys-method=drop_swap doesn't work with MySQL 8.0+\n".
"See https://bugs.mysql.com/bug.php?id=89441";
@@ -13172,6 +13185,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-online-schema-change 3.0.13
pt-online-schema-change 3.1.0
=cut

View File

@@ -896,7 +896,7 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-pmp 3.0.13
pt-pmp 3.1.0
=cut

View File

@@ -64,7 +64,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -16916,6 +16916,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-query-digest 3.0.13
pt-query-digest 3.1.0
=cut

View File

@@ -2591,6 +2591,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-show-grants 3.0.13
pt-show-grants 3.1.0
=cut

View File

@@ -1245,7 +1245,7 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-sift 3.0.13
pt-sift 3.1.0
=cut

View File

@@ -40,7 +40,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -4966,6 +4966,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-slave-delay 3.0.13
pt-slave-delay 3.1.0
=cut

View File

@@ -4483,6 +4483,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-slave-find 3.0.13
pt-slave-find 3.1.0
=cut

View File

@@ -41,7 +41,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -6119,6 +6119,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-slave-restart 3.0.13
pt-slave-restart 3.1.0
=cut

View File

@@ -2419,7 +2419,7 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-stalk 3.0.13
pt-stalk 3.1.0
=cut

View File

@@ -2723,7 +2723,7 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-summary 3.0.13
pt-summary 3.1.0
=cut

View File

@@ -58,7 +58,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -14116,6 +14116,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-table-checksum 3.0.13
pt-table-checksum 3.1.0
=cut

View File

@@ -55,7 +55,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -13051,6 +13051,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-table-sync 3.0.13
pt-table-sync 3.1.0
=cut

View File

@@ -8487,6 +8487,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-table-usage 3.0.13
pt-table-usage 3.1.0
=cut

View File

@@ -61,7 +61,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -11421,6 +11421,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-upgrade 3.0.13
pt-upgrade 3.1.0
=cut

View File

@@ -44,7 +44,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';
@@ -6235,6 +6235,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-variable-advisor 3.0.13
pt-variable-advisor 3.1.0
=cut

View File

@@ -3281,6 +3281,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-visual-explain 3.0.13
pt-visual-explain 3.1.0
=cut

View File

@@ -48,9 +48,9 @@ copyright = u'2017, Percona LLC and/or its affiliates'
# built documents.
#
# The short X.Y version.
version = '3.0'
version = '3.1'
# The full version, including alpha/beta/rc tags.
release = '3.0.13'
release = '3.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

View File

@@ -560,6 +560,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
Percona Toolkit v3.0.13 released 2018-12-28
Percona Toolkit v3.1.0 released 2019-09-07
=cut

View File

@@ -29,6 +29,7 @@ use English qw(-no_match_vars);
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
use Data::Dumper;
use Carp;
$Data::Dumper::Indent = 1;
$Data::Dumper::Sortkeys = 1;
$Data::Dumper::Quotekeys = 0;
@@ -98,13 +99,21 @@ sub _get_first_values {
}
my ($cxn, $tbl, $index, $n_index_cols) = @args{@required_args};
my $q = $self->{Quoter};
my $q = $self->{quoter};
# Select just the index columns.
my $index_struct = $tbl->{tbl_struct}->{keys}->{$index};
my $index_cols = $index_struct->{cols};
my $index_columns = join (', ',
my $index_columns;
eval {
$index_columns = join (', ',
map { $q->quote($_) } @{$index_cols}[0..($n_index_cols - 1)]);
};
if ($EVAL_ERROR) {
confess "$EVAL_ERROR";
}
# Where no index column is null, because we can't > NULL.
my @where;

View File

@@ -18,7 +18,7 @@
# ###########################################################################
package Percona::Toolkit;
our $VERSION = '3.0.13';
our $VERSION = '3.1.0';
use strict;
use warnings FATAL => 'all';

View File

@@ -1,3 +1,6 @@
AWS_ACCESS_KEY_ID=AKIAJQ2GZPAJ3JZS52HQ
AWS_SECRET_ACCESS_KEY=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
GOCACHE=
GOLANG_DOCKERHUB_TAG=1.10-stretch
TEST_MONGODB_ADMIN_USERNAME=admin
TEST_MONGODB_ADMIN_PASSWORD=admin123456

View File

@@ -1,15 +1,16 @@
GO := go
pkgs = $(shell find . -type d -name "pt-*" -exec basename {} \;)
VERSION="3.0.13"
VERSION=$(shell git describe --abbrev=0 --tags)
BUILD=$(shell date +%FT%T%z)
GOVERSION=$(shell go version | cut --delimiter=" " -f3)
COMMIT=$(shell git rev-list -1 HEAD)
GOUTILSDIR ?= $(GOPATH)/bin
PREFIX=$(shell pwd)
TOP_DIR=$(shell git rev-parse --show-toplevel)
BIN_DIR=$(shell git rev-parse --show-toplevel)/bin
SRC_DIR=$(shell git rev-parse --show-toplevel)/src/go
LDFLAGS="-X main.Version=${VERSION} -X main.Build=${BUILD} -X main.GoVersion=${GOVERSION} -s -w"
LDFLAGS="-X main.Version=${VERSION} -X main.Build=${BUILD} -X main.GoVersion=${GOVERSION} -X main.Commit=${COMMIT} -s -w"
TEST_PSMDB_VERSION?=3.6
TEST_MONGODB_FLAVOR?=percona/percona-server-mongodb

324
src/go/lib/pginfo/pginfo.go Normal file
View File

@@ -0,0 +1,324 @@
package pginfo
import (
"fmt"
"regexp"
"time"
"github.com/Percona-Lab/pt-pg-summary/models"
"github.com/hashicorp/go-version"
"github.com/pkg/errors"
"github.com/shirou/gopsutil/process"
"github.com/sirupsen/logrus"
)
// Process contains PostgreSQL process information
type Process struct {
PID int32
CmdLine string
}
// PGInfo has exported fields containing the data collected.
// Fields are exported to be able to use them when printing the templates
type PGInfo struct {
ClusterInfo []*models.ClusterInfo
ConnectedClients []*models.ConnectedClients
DatabaseWaitEvents []*models.DatabaseWaitEvents
AllDatabases []*models.Databases
GlobalWaitEvents []*models.GlobalWaitEvents
PortAndDatadir *models.PortAndDatadir
SlaveHosts96 []*models.SlaveHosts96
SlaveHosts10 []*models.SlaveHosts10
Tablespaces []*models.Tablespaces
Settings []*models.Setting
Counters map[models.Name][]*models.Counters // Counters per database
IndexCacheHitRatio map[string]*models.IndexCacheHitRatio // Indexes cache hit ratio per database
TableCacheHitRatio map[string]*models.TableCacheHitRatio // Tables cache hit ratio per database
TableAccess map[string][]*models.TableAccess // Table access per database
ServerVersion *version.Version
Sleep int
Processes []Process
// This is the list of databases from where we should get Table Cache Hit, Index Cache Hits, etc.
// This field is being populated on the newData function depending on the cli parameters.
// If --databases was not specified, this array will have the list of ALL databases from the GetDatabases
// method in the models pkg
databases []string
logger *logrus.Logger
}
// New returns a new PGInfo instance with a local logger instance
func New(db models.XODB, databases []string, sleep int) (*PGInfo, error) {
return new(db, databases, sleep, logrus.New())
}
// NewWithLogger returns a new PGInfo instance with an external logger instance
func NewWithLogger(db models.XODB, databases []string, sleep int, l *logrus.Logger) (*PGInfo, error) {
return new(db, databases, sleep, l)
}
func new(db models.XODB, databases []string, sleep int, logger *logrus.Logger) (*PGInfo, error) {
var err error
info := &PGInfo{
databases: databases,
Counters: make(map[models.Name][]*models.Counters),
TableAccess: make(map[string][]*models.TableAccess),
TableCacheHitRatio: make(map[string]*models.TableCacheHitRatio),
IndexCacheHitRatio: make(map[string]*models.IndexCacheHitRatio),
Sleep: sleep,
logger: logger,
}
if info.AllDatabases, err = models.GetDatabases(db); err != nil {
return nil, errors.Wrap(err, "Cannot get databases list")
}
info.logger.Debug("All databases list")
for i, db := range info.AllDatabases {
logger.Debugf("% 5d: %s", i, db.Datname)
}
if len(databases) < 1 {
info.databases = make([]string, 0, len(info.AllDatabases))
allDatabases, err := models.GetAllDatabases(db)
if err != nil {
return nil, errors.Wrap(err, "cannot get the list of all databases")
}
for _, database := range allDatabases {
info.databases = append(info.databases, string(database.Datname))
}
} else {
info.databases = make([]string, len(databases))
copy(info.databases, databases)
}
info.logger.Debugf("Will collect info for these databases: %v", info.databases)
serverVersion, err := models.GetServerVersion(db)
if err != nil {
return nil, errors.Wrap(err, "Cannot get the connected clients list")
}
if info.ServerVersion, err = parseServerVersion(serverVersion.Version); err != nil {
return nil, fmt.Errorf("cannot get server version: %s", err.Error())
}
info.logger.Infof("Detected PostgreSQL version: %v", info.ServerVersion)
return info, nil
}
// DatabaseNames returns the list of the database names for which information will be collected
func (i *PGInfo) DatabaseNames() []string {
return i.databases
}
// CollectPerDatabaseInfo collects information for a specific database
func (i *PGInfo) CollectPerDatabaseInfo(db models.XODB, dbName string) (err error) {
i.logger.Info("Collecting Table Access information")
if i.TableAccess[dbName], err = models.GetTableAccesses(db); err != nil {
return errors.Wrapf(err, "cannot get Table Accesses for the %s ibase", dbName)
}
i.logger.Info("Collecting Table Cache Hit Ratio information")
if i.TableCacheHitRatio[dbName], err = models.GetTableCacheHitRatio(db); err != nil {
return errors.Wrapf(err, "cannot get Table Cache Hit Ratios for the %s ibase", dbName)
}
i.logger.Info("Collecting Index Cache Hit Ratio information")
if i.IndexCacheHitRatio[dbName], err = models.GetIndexCacheHitRatio(db); err != nil {
return errors.Wrapf(err, "cannot get Index Cache Hit Ratio for the %s ibase", dbName)
}
return nil
}
// CollectGlobalInfo collects global information
func (i *PGInfo) CollectGlobalInfo(db models.XODB) []error {
errs := make([]error, 0)
var err error
version10, _ := version.NewVersion("10.0.0")
ch := make(chan interface{}, 2)
i.logger.Info("Collecting global counters (1st pass)")
getCounters(db, ch)
c1, err := waitForCounters(ch)
if err != nil {
errs = append(errs, errors.Wrap(err, "Cannot get counters (1st run)"))
} else {
for _, counters := range c1 {
i.Counters[counters.Datname] = append(i.Counters[counters.Datname], counters)
}
}
go func() {
i.logger.Infof("Waiting %d seconds to read counters", i.Sleep)
time.Sleep(time.Duration(i.Sleep) * time.Second)
i.logger.Info("Collecting global counters (2nd pass)")
getCounters(db, ch)
}()
i.logger.Info("Collecting Cluster information")
if i.ClusterInfo, err = models.GetClusterInfos(db); err != nil {
errs = append(errs, errors.Wrap(err, "Cannot get cluster info"))
}
i.logger.Info("Collecting Connected Clients information")
if i.ConnectedClients, err = models.GetConnectedClients(db); err != nil {
errs = append(errs, errors.Wrap(err, "Cannot get the connected clients list"))
}
i.logger.Info("Collecting Database Wait Events information")
if i.DatabaseWaitEvents, err = models.GetDatabaseWaitEvents(db); err != nil {
errs = append(errs, errors.Wrap(err, "Cannot get databases wait events"))
}
i.logger.Info("Collecting Global Wait Events information")
if i.GlobalWaitEvents, err = models.GetGlobalWaitEvents(db); err != nil {
errs = append(errs, errors.Wrap(err, "Cannot get Global Wait Events"))
}
i.logger.Info("Collecting Port and Data Dir information")
if i.PortAndDatadir, err = models.GetPortAndDatadir(db); err != nil {
errs = append(errs, errors.Wrap(err, "Cannot get Port and Dir"))
}
i.logger.Info("Collecting Tablespaces information")
if i.Tablespaces, err = models.GetTablespaces(db); err != nil {
errs = append(errs, errors.Wrap(err, "Cannot get Tablespaces"))
}
i.logger.Info("Collecting Instance Settings information")
if i.Settings, err = models.GetSettings(db); err != nil {
errs = append(errs, errors.Wrap(err, "Cannot get instance settings"))
}
if i.ServerVersion.LessThan(version10) {
i.logger.Info("Collecting Slave Hosts (PostgreSQL < 10)")
if i.SlaveHosts96, err = models.GetSlaveHosts96s(db); err != nil {
errs = append(errs, errors.Wrap(err, "Cannot get slave hosts on Postgre < 10"))
}
}
if !i.ServerVersion.LessThan(version10) {
i.logger.Info("Collecting Slave Hosts (PostgreSQL 10+)")
if i.SlaveHosts10, err = models.GetSlaveHosts10s(db); err != nil {
errs = append(errs, errors.Wrap(err, "Cannot get slave hosts in Postgre 10+"))
}
}
i.logger.Info("Waiting for counters information")
c2, err := waitForCounters(ch)
if err != nil {
errs = append(errs, errors.Wrap(err, "Cannot read counters (2nd run)"))
} else {
for _, counters := range c2 {
i.Counters[counters.Datname] = append(i.Counters[counters.Datname], counters)
}
i.calcCountersDiff(i.Counters)
}
i.logger.Info("Collecting processes command line information")
if err := i.collectProcesses(); err != nil {
errs = append(errs, errors.Wrap(err, "Cannot collect processes information"))
}
i.logger.Info("Finished collecting global information")
return errs
}
// SetLogger sets an external logger instance
func (i *PGInfo) SetLogger(l *logrus.Logger) {
i.logger = l
}
// SetLogLevel changes the current log level
func (i *PGInfo) SetLogLevel(level logrus.Level) {
i.logger.SetLevel(level)
}
func getCounters(db models.XODB, ch chan interface{}) {
counters, err := models.GetCounters(db)
if err != nil {
ch <- err
} else {
ch <- counters
}
}
func waitForCounters(ch chan interface{}) ([]*models.Counters, error) {
resp := <-ch
if err, ok := resp.(error); ok {
return nil, err
}
return resp.([]*models.Counters), nil
}
func parseServerVersion(v string) (*version.Version, error) {
re := regexp.MustCompile(`(\d?\d)(\d\d)(\d\d)`)
m := re.FindStringSubmatch(v)
if len(m) != 4 {
return nil, fmt.Errorf("cannot parse version %s", v)
}
return version.NewVersion(fmt.Sprintf("%s.%s.%s", m[1], m[2], m[3]))
}
func (i *PGInfo) calcCountersDiff(counters map[models.Name][]*models.Counters) {
for dbName, c := range counters {
i.logger.Debugf("Calculating counters diff for %s database", dbName)
diff := &models.Counters{
Datname: dbName,
Numbackends: c[1].Numbackends - c[0].Numbackends,
XactCommit: c[1].XactCommit - c[0].XactCommit,
XactRollback: c[1].XactRollback - c[0].XactRollback,
BlksRead: c[1].BlksRead - c[0].BlksRead,
BlksHit: c[1].BlksHit - c[0].BlksHit,
TupReturned: c[1].TupReturned - c[0].TupReturned,
TupFetched: c[1].TupFetched - c[0].TupFetched,
TupInserted: c[1].TupInserted - c[0].TupInserted,
TupUpdated: c[1].TupUpdated - c[0].TupUpdated,
TupDeleted: c[1].TupDeleted - c[0].TupDeleted,
Conflicts: c[1].Conflicts - c[0].Conflicts,
TempFiles: c[1].TempFiles - c[0].TempFiles,
TempBytes: c[1].TempBytes - c[0].TempBytes,
Deadlocks: c[1].Deadlocks - c[0].Deadlocks,
}
counters[dbName] = append(counters[dbName], diff)
i.logger.Debugf("Numbackends : %v - %v", c[1].Numbackends, c[0].Numbackends)
i.logger.Debugf("XactCommit : %v - %v", c[1].XactCommit, c[0].XactCommit)
i.logger.Debugf("XactRollback: %v - %v", c[1].XactRollback, c[0].XactRollback)
i.logger.Debugf("BlksRead : %v - %v", c[1].BlksRead, c[0].BlksRead)
i.logger.Debugf("BlksHit : %v - %v", c[1].BlksHit, c[0].BlksHit)
i.logger.Debugf("TupReturned : %v - %v", c[1].TupReturned, c[0].TupReturned)
i.logger.Debugf("TupFetched : %v - %v", c[1].TupFetched, c[0].TupFetched)
i.logger.Debugf("TupInserted : %v - %v", c[1].TupInserted, c[0].TupInserted)
i.logger.Debugf("TupUpdated : %v - %v", c[1].TupUpdated, c[0].TupUpdated)
i.logger.Debugf("TupDeleted : %v - %v", c[1].TupDeleted, c[0].TupDeleted)
i.logger.Debugf("Conflicts : %v - %v", c[1].Conflicts, c[0].Conflicts)
i.logger.Debugf("TempFiles : %v - %v", c[1].TempFiles, c[0].TempFiles)
i.logger.Debugf("TempBytes : %v - %v", c[1].TempBytes, c[0].TempBytes)
i.logger.Debugf("Deadlocks : %v - %v", c[1].Deadlocks, c[0].Deadlocks)
i.logger.Debugf("---")
}
}
func (i *PGInfo) collectProcesses() error {
procs, err := process.Processes()
if err != nil {
return err
}
i.Processes = make([]Process, 0)
for _, proc := range procs {
cmdLine, err := proc.Cmdline()
if err != nil {
continue
}
match, _ := regexp.MatchString("^.*?/postgres\\s.*$", cmdLine)
if match {
i.Processes = append(i.Processes, Process{PID: proc.Pid, CmdLine: cmdLine})
}
}
return nil
}

View File

@@ -11,6 +11,7 @@ import (
"time"
"github.com/Masterminds/semver"
"github.com/kr/pretty"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
@@ -157,6 +158,8 @@ func TestExplain(t *testing.T) {
if err != nil {
t.Fatalf("cannot load sample %s: %s", dir+file.Name(), err)
}
pretty.Println(eq)
query, err := bson.MarshalExtJSON(eq, true, true)
if err != nil {
t.Fatalf("cannot marshal json %s: %s", dir+file.Name(), err)

View File

@@ -38,9 +38,10 @@ const (
)
var (
Build string = "01-01-1980"
GoVersion string = "1.8"
Version string = "3.0.1"
Build string = "01-01-1980" //nolint
GoVersion string = "1.8" //nolint
Version string = "3.0.1" //nolint
Commit string //nolint
)
type cliOptions struct {
@@ -90,6 +91,7 @@ func main() {
fmt.Println(TOOLNAME)
fmt.Printf("Version %s\n", Version)
fmt.Printf("Build: %s using %s\n", Build, GoVersion)
fmt.Printf("Commit: %s\n", Commit)
return
}
@@ -98,12 +100,10 @@ func main() {
advice, err := versioncheck.CheckUpdates(TOOLNAME, Version)
if err != nil {
log.Infof("cannot check version updates: %s", err.Error())
} else {
if advice != "" {
} else if advice != "" {
log.Warn(advice)
}
}
}
log.Debugf("Command line options:\n%+v\n", opts)

View File

@@ -41,9 +41,10 @@ const (
)
var (
Build string = "01-01-1980"
GoVersion string = "1.8"
Version string = "3.0.1"
Build string = "01-01-1980" // nolint
GoVersion string = "1.8" // nolint
Version string = "3.0.1" // nolint
Commit string // nolint
)
type TimedStats struct {
@@ -126,20 +127,20 @@ type clusterwideInfo struct {
}
type cliOptions struct {
Help bool
Host string
User string
Password string
AuthDB string
LogLevel string
OutputFormat string
SSLCAFile string
SSLPEMKeyFile string
RunningOpsSamples int
RunningOpsInterval int
Help bool
Version bool
NoVersionCheck bool
NoRunningOps bool
OutputFormat string
RunningOpsSamples int
RunningOpsInterval int
SSLCAFile string
SSLPEMKeyFile string
}
type collectedInfo struct {
@@ -179,6 +180,7 @@ func main() {
fmt.Println(TOOLNAME)
fmt.Printf("Version %s\n", Version)
fmt.Printf("Build: %s using %s\n", Build, GoVersion)
fmt.Printf("Commit: %s\n", Commit)
return
}
@@ -187,12 +189,10 @@ func main() {
advice, err := versioncheck.CheckUpdates(TOOLNAME, Version)
if err != nil {
log.Infof("cannot check version updates: %s", err.Error())
} else {
if advice != "" {
} else if advice != "" {
log.Infof(advice)
}
}
}
ctx := context.Background()
clientOptions := getClientOptions(opts)
@@ -203,9 +203,12 @@ func main() {
if err := client.Connect(ctx); err != nil {
log.Fatalf("Cannot connect to MongoDB: %s", err)
}
defer client.Disconnect(ctx)
defer client.Disconnect(ctx) // nolint
hostnames, err := util.GetHostnames(ctx, client)
if err != nil {
log.Errorf("Cannot get hostnames: %s", err)
}
log.Debugf("hostnames: %v", hostnames)
ci := &collectedInfo{}
@@ -288,39 +291,39 @@ func formatResults(ci *collectedInfo, format string) ([]byte, error) {
t := template.Must(template.New("replicas").Parse(templates.Replicas))
if err := t.Execute(buf, ci.ReplicaMembers); err != nil {
return nil, errors.Wrap(err, "cannnot parse replicas section of the output template")
return nil, errors.Wrap(err, "cannot parse replicas section of the output template")
}
t = template.Must(template.New("hosttemplateData").Parse(templates.HostInfo))
if err := t.Execute(buf, ci.HostInfo); err != nil {
return nil, errors.Wrap(err, "cannnot parse hosttemplateData section of the output template")
return nil, errors.Wrap(err, "cannot parse hosttemplateData section of the output template")
}
t = template.Must(template.New("runningOps").Parse(templates.RunningOps))
if err := t.Execute(buf, ci.RunningOps); err != nil {
return nil, errors.Wrap(err, "cannnot parse runningOps section of the output template")
return nil, errors.Wrap(err, "cannot parse runningOps section of the output template")
}
t = template.Must(template.New("ssl").Parse(templates.Security))
if err := t.Execute(buf, ci.SecuritySettings); err != nil {
return nil, errors.Wrap(err, "cannnot parse ssl section of the output template")
return nil, errors.Wrap(err, "cannot parse ssl section of the output template")
}
if ci.OplogInfo != nil && len(ci.OplogInfo) > 0 {
t = template.Must(template.New("oplogInfo").Parse(templates.Oplog))
if err := t.Execute(buf, ci.OplogInfo[0]); err != nil {
return nil, errors.Wrap(err, "cannnot parse oplogInfo section of the output template")
return nil, errors.Wrap(err, "cannot parse oplogInfo section of the output template")
}
}
t = template.Must(template.New("clusterwide").Parse(templates.Clusterwide))
if err := t.Execute(buf, ci.ClusterWideInfo); err != nil {
return nil, errors.Wrap(err, "cannnot parse clusterwide section of the output template")
return nil, errors.Wrap(err, "cannot parse clusterwide section of the output template")
}
t = template.Must(template.New("balancer").Parse(templates.BalancerStats))
if err := t.Execute(buf, ci.BalancerStats); err != nil {
return nil, errors.Wrap(err, "cannnot parse balancer section of the output template")
return nil, errors.Wrap(err, "cannot parse balancer section of the output template")
}
}
@@ -566,7 +569,8 @@ func getNodeType(ctx context.Context, client *mongo.Client) (string, error) {
return "mongod", nil
}
func getOpCountersStats(ctx context.Context, client *mongo.Client, count int, sleep time.Duration) (*opCounters, error) {
func getOpCountersStats(ctx context.Context, client *mongo.Client, count int,
sleep time.Duration) (*opCounters, error) {
oc := &opCounters{}
prevOpCount := &opCounters{}
ss := proto.ServerStatus{}

1
src/go/pt-pg-summary/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
dist

View File

@@ -0,0 +1,37 @@
# This is an example goreleaser.yaml file with some sane defaults.
# Make sure to check the documentation at http://goreleaser.com
before:
hooks:
# you may remove this if you don't use vgo
# - go mod download
# you may remove this if you don't need go generate
- go generate ./...
builds:
-
binary: pt-pg-summary
env:
- CGO_ENABLED=0
goos:
- linux
goarch:
- amd64
ignore:
- goos: darwin
- goarch: 386
archive:
replacements:
darwin: Darwin
linux: Linux
windows: Windows
386: i386
amd64: x86_64
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ .Tag }}"
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'

View File

@@ -0,0 +1,26 @@
version: '2.2'
services:
postgres9:
image: ${MYSQL_IMAGE:-postgres:9.6}
ports:
- ${POSTGRE_HOST:-127.0.0.1}:${POSTGRE_96_PORT:-6432}:5432
environment:
- POSTGRES_PASSWORD=root
postgres10:
image: ${POSTGRE_IMAGE:-postgres:10.7}
ports:
- ${POSTGRE_HOST:-127.0.0.1}:${POSTGRE_10_PORT:-6433}:5432
environment:
- POSTGRES_PASSWORD=root
postgres11:
image: ${POSTGRE_IMAGE:-postgres:11}
ports:
- ${POSTGRE_HOST:-127.0.0.1}:${POSTGRE_11_PORT:-6434}:5432
environment:
- POSTGRES_PASSWORD=root
postgres12:
image: ${POSTGRE_IMAGE:-postgres:12}
ports:
- ${POSTGRE_HOST:-127.0.0.1}:${POSTGRE_12_PORT:-6435}:5432
environment:
- POSTGRES_PASSWORD=root

View File

@@ -0,0 +1,79 @@
package tu // test utils
import (
"log"
"os"
"os/exec"
"strings"
)
const (
ipv4Host = "127.0.0.1"
ipv6Host = "::1"
username = "postgres"
password = "root"
ipv4PG9Port = "6432"
ipv4PG10Port = "6433"
ipv4PG11Port = "6434"
ipv4PG12Port = "6435"
ipv6PG9Port = "6432"
ipv6PG10Port = "6432"
ipv6PG11Port = "6432"
ipv6PG12Port = "6432"
pg9Container = "pt-pg-summary_postgres9_1"
pg10Container = "pt-pg-summary_postgres10_1"
pg11Container = "pt-pg-summary_postgres11_1"
pg12Container = "pt-pg-summary_postgres12_1"
)
var (
// IPv4Host env(PG_IPV4_HOST) or 127.0.0.1
IPv4Host = getVar("PG_IPV4_HOST", ipv4Host)
// IPv6Host env(PG_IPV6_HOST) or ::1
IPv6Host = getVar("PG_IPV6_HOST", ipv6Host)
// Password env(PG_PASSWORD) or root
Password = getVar("PG_PASSWORD", password)
// Username env(PG_USERNAME) or PG
Username = getVar("PG_USERNAME", username)
IPv4PG9Port = getVar("PG_IPV4_9_PORT", ipv4PG9Port)
IPv4PG10Port = getVar("PG_IPV4_10_PORT", ipv4PG10Port)
IPv4PG11Port = getVar("PG_IPV4_11_PORT", ipv4PG11Port)
IPv4PG12Port = getVar("PG_IPV4_12_PORT", ipv4PG12Port)
IPv6PG9Port = getVar("PG_IPV6_9_PORT", ipv6PG9Port)
IPv6PG10Port = getVar("PG_IPV6_10_PORT", ipv6PG10Port)
IPv6PG11Port = getVar("PG_IPV6_11_PORT", ipv6PG11Port)
IPv6PG12Port = getVar("PG_IPV6_12_PORT", ipv6PG12Port)
PG9DockerIP = getContainerIP(pg9Container)
PG10DockerIP = getContainerIP(pg9Container)
PG11DockerIP = getContainerIP(pg9Container)
PG12DockerIP = getContainerIP(pg9Container)
DefaultPGPort = "5432"
)
func getVar(varname, defaultValue string) string {
if v := os.Getenv(varname); v != "" {
return v
}
return defaultValue
}
func getContainerIP(container string) string {
cmd := []string{"docker", "inspect", "-f", "'{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}'", container}
out, err := exec.Command(cmd[0], cmd[1:]...).Output()
if err != nil {
log.Fatalf("error getting IP address of %q container: %s", container, err)
}
ip := strings.TrimSpace(string(out))
if ip == "" {
log.Fatalf("error getting IP address of %q container (empty)", container)
}
return ip
}

View File

@@ -0,0 +1,241 @@
package main
import (
"database/sql"
"fmt"
"os"
"strings"
"text/template"
"github.com/alecthomas/kingpin"
"github.com/percona/percona-toolkit/src/go/lib/pginfo"
"github.com/percona/percona-toolkit/src/go/pt-pg-summary/templates"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
_ "github.com/lib/pq"
)
var (
Build string = "01-01-1980" //nolint
Commit string //nolint
GoVersion string = "1.8" //nolint
Version string = "3.0.1" //nolint
)
type connOpts struct {
Host string
Port int
User string
Password string
DisableSSL bool
}
type cliOptions struct {
app *kingpin.Application
connOpts connOpts
Config string
DefaultsFile string
ReadSamples string
SaveSamples string
Databases []string
Seconds int
AllDatabases bool
AskPass bool
ListEncryptedTables bool
Verbose bool
Debug bool
}
func main() {
opts, err := parseCommandLineOpts(os.Args[1:])
if err != nil {
fmt.Printf("Cannot parse command line arguments: %s", err)
os.Exit(1)
}
logger := logrus.New()
if opts.Verbose {
logger.SetLevel(logrus.InfoLevel)
}
if opts.Debug {
logger.SetLevel(logrus.DebugLevel)
}
dsn := buildConnString(opts.connOpts, "postgres")
logger.Infof("Connecting to the database server using: %s", safeConnString(opts.connOpts, "postgres"))
db, err := connect(dsn)
if err != nil {
logger.Errorf("Cannot connect to the database: %s\n", err)
opts.app.Usage(os.Args[1:])
os.Exit(1)
}
logger.Infof("Connection OK")
info, err := pginfo.NewWithLogger(db, opts.Databases, opts.Seconds, logger)
if err != nil {
log.Fatalf("Cannot create a data collector instance: %s", err)
}
logger.Info("Getting global information")
errs := info.CollectGlobalInfo(db)
if len(errs) > 0 {
logger.Errorf("Cannot collect info")
for _, err := range errs {
logger.Error(err)
}
}
logger.Info("Collecting per database information")
logger.Debugf("Will collect information for these databases: (%T), %v", info.DatabaseNames(), info.DatabaseNames())
for _, dbName := range info.DatabaseNames() {
dsn := buildConnString(opts.connOpts, dbName)
logger.Infof("Connecting to the %q database", dbName)
conn, err := connect(dsn)
if err != nil {
logger.Errorf("Cannot connect to the %s database: %s", dbName, err)
continue
}
if err := info.CollectPerDatabaseInfo(conn, dbName); err != nil {
logger.Errorf("Cannot collect information for the %s database: %s", dbName, err)
}
conn.Close()
}
masterTmpl, err := template.New("master").Funcs(funcsMap()).Parse(templates.TPL)
if err != nil {
log.Fatal(err)
}
if err := masterTmpl.ExecuteTemplate(os.Stdout, "report", info); err != nil {
log.Fatal(err)
}
}
func connect(dsn string) (*sql.DB, error) {
db, err := sql.Open("postgres", dsn)
if err != nil {
return nil, errors.Wrap(err, "cannot connect to the database")
}
if err := db.Ping(); err != nil {
return nil, errors.Wrap(err, "cannot connect to the database")
}
return db, nil
}
func funcsMap() template.FuncMap {
return template.FuncMap{
"trim": func(s string, size int) string {
if len(s) < size {
return s
}
return s[:size]
},
}
}
func buildConnString(opts connOpts, dbName string) string {
parts := []string{}
if opts.Host != "" {
parts = append(parts, fmt.Sprintf("host=%s", opts.Host))
}
if opts.Port != 0 {
parts = append(parts, fmt.Sprintf("port=%d", opts.Port))
}
if opts.User != "" {
parts = append(parts, fmt.Sprintf("user=%s", opts.User))
}
if opts.Password != "" {
parts = append(parts, fmt.Sprintf("password=%s", opts.Password))
}
if opts.DisableSSL {
parts = append(parts, "sslmode=disable")
}
if dbName == "" {
dbName = "postgres"
}
parts = append(parts, fmt.Sprintf("dbname=%s", dbName))
return strings.Join(parts, " ")
}
// build the same connection string as buildConnString but the password is hidden so
// we can display this in the logs
func safeConnString(opts connOpts, dbName string) string {
parts := []string{}
if opts.Host != "" {
parts = append(parts, fmt.Sprintf("host=%s", opts.Host))
}
if opts.Port != 0 {
parts = append(parts, fmt.Sprintf("port=%d", opts.Port))
}
if opts.User != "" {
parts = append(parts, fmt.Sprintf("user=%s", opts.User))
}
if opts.Password != "" {
parts = append(parts, "password=******")
}
if opts.DisableSSL {
parts = append(parts, "sslmode=disable")
}
if dbName == "" {
dbName = "postgres"
}
parts = append(parts, fmt.Sprintf("dbname=%s", dbName))
return strings.Join(parts, " ")
}
func parseCommandLineOpts(args []string) (cliOptions, error) {
app := kingpin.New("pt-pg-summary", "Percona Toolkit - PostgreSQL Summary")
// version, commit and date will be set at build time by the compiler -ldflags param
app.Version(fmt.Sprintf("%s version %s\nGIT commit %s\nDate: %s\nGo version: %s",
app.Name, Version, Commit, Build, GoVersion))
opts := cliOptions{app: app}
app.Flag("ask-pass", "Prompt for a password when connecting to PostgreSQL").
Hidden().BoolVar(&opts.AskPass) // hidden because it is not implemented yet
app.Flag("config", "Config file").
Hidden().StringVar(&opts.Config) // hidden because it is not implemented yet
app.Flag("databases", "Summarize this comma-separated list of databases. All if not specified").
StringsVar(&opts.Databases)
app.Flag("defaults-file", "Only read PostgreSQL options from the given file").
Hidden().StringVar(&opts.DefaultsFile) // hidden because it is not implemented yet
app.Flag("host", "Host to connect to").
Short('h').
StringVar(&opts.connOpts.Host)
app.Flag("list-encrypted-tables", "Include a list of the encrypted tables in all databases").
Hidden().BoolVar(&opts.ListEncryptedTables)
app.Flag("password", "Password to use when connecting").
Short('W').
StringVar(&opts.connOpts.Password)
app.Flag("port", "Port number to use for connection").
Short('p').
IntVar(&opts.connOpts.Port)
app.Flag("read-samples", "Create a report from the files found in this directory").
Hidden().StringVar(&opts.ReadSamples) // hidden because it is not implemented yet
app.Flag("save-samples", "Save the data files used to generate the summary in this directory").
Hidden().StringVar(&opts.SaveSamples) // hidden because it is not implemented yet
app.Flag("sleep", "Seconds to sleep when gathering status counters").
Default("10").IntVar(&opts.Seconds)
app.Flag("username", "User for login if not current user").
Short('U').
StringVar(&opts.connOpts.User)
app.Flag("disable-ssl", "Diable SSL for the connection").
Default("true").BoolVar(&opts.connOpts.DisableSSL)
app.Flag("verbose", "Show verbose log").
Default("false").BoolVar(&opts.Verbose)
app.Flag("debug", "Show debug information in the logs").
Default("false").BoolVar(&opts.Debug)
_, err := app.Parse(args)
dbs := []string{}
for _, databases := range opts.Databases {
ds := strings.Split(databases, ",")
dbs = append(dbs, ds...)
}
opts.Databases = dbs
return opts, err
}

View File

@@ -0,0 +1,44 @@
package main
import (
"fmt"
"os"
"testing"
"github.com/percona/percona-toolkit/src/go/pt-pg-summary/internal/tu"
)
func TestMain(m *testing.M) {
os.Exit(m.Run())
}
func TestConnection(t *testing.T) {
tests := []struct {
name string
host string
port string
username string
password string
}{
{"IPv4PG9", tu.IPv4Host, tu.IPv4PG9Port, tu.Username, tu.Password},
{"IPv4PG10", tu.IPv4Host, tu.IPv4PG10Port, tu.Username, tu.Password},
{"IPv4PG11", tu.IPv4Host, tu.IPv4PG11Port, tu.Username, tu.Password},
{"IPv4PG12", tu.IPv4Host, tu.IPv4PG12Port, tu.Username, tu.Password},
// use IPV6 for PostgreSQL 9
//{"IPV6", tu.IPv6Host, tu.IPv6PG9Port, tu.Username, tu.Password},
// use an "external" IP to simulate a remote host
{"remote_host", tu.PG9DockerIP, tu.DefaultPGPort, tu.Username, tu.Password},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
dsn := fmt.Sprintf("host=%s port=%s user=%s password=%s sslmode=disable dbname=%s",
test.host, test.port, test.username, test.password, "postgres")
if _, err := connect(dsn); err != nil {
t.Errorf("Cannot connect to the db using %q: %s", dsn, err)
}
})
}
}

View File

@@ -0,0 +1,43 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
// AllDatabases represents a row from '[custom all_databases]'.
type AllDatabases struct {
Datname Name // datname
}
// GetAllDatabases runs a custom query, returning results as AllDatabases.
func GetAllDatabases(db XODB) ([]*AllDatabases, error) {
var err error
// sql query
var sqlstr = `SELECT datname ` +
`FROM pg_database ` +
`WHERE datistemplate = false`
// run query
XOLog(sqlstr)
q, err := db.Query(sqlstr)
if err != nil {
return nil, err
}
defer q.Close()
// load results
res := []*AllDatabases{}
for q.Next() {
ad := AllDatabases{}
// scan
err = q.Scan(&ad.Datname)
if err != nil {
return nil, err
}
res = append(res, &ad)
}
return res, nil
}

View File

@@ -0,0 +1,59 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
import (
"database/sql"
"time"
)
// Cluster info
type ClusterInfo struct {
Usename string // usename
Time time.Time // time
ClientAddr string // client_addr
ClientHostname sql.NullString // client_hostname
Version string // version
Started time.Time // started
IsSlave bool // is_slave
}
// GetClusterInfos runs a custom query, returning results as ClusterInfo.
func GetClusterInfos(db XODB) ([]*ClusterInfo, error) {
var err error
// sql query
var sqlstr = `SELECT usename, now() AS "Time", ` +
`client_addr, ` +
`client_hostname, ` +
`version() AS version, ` +
`pg_postmaster_start_time() AS Started, ` +
`pg_is_in_recovery() AS "Is_Slave" ` +
`FROM pg_stat_activity ` +
`WHERE pid = pg_backend_pid()`
// run query
XOLog(sqlstr)
q, err := db.Query(sqlstr)
if err != nil {
return nil, err
}
defer q.Close()
// load results
res := []*ClusterInfo{}
for q.Next() {
ci := ClusterInfo{}
// scan
err = q.Scan(&ci.Usename, &ci.Time, &ci.ClientAddr, &ci.ClientHostname, &ci.Version, &ci.Started, &ci.IsSlave)
if err != nil {
return nil, err
}
res = append(res, &ci)
}
return res, nil
}

View File

@@ -0,0 +1,54 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
import (
"database/sql"
)
// Connected clients list
type ConnectedClients struct {
Usename Name // usename
Client sql.NullString // client
State sql.NullString // state
Count sql.NullInt64 // count
}
// GetConnectedClients runs a custom query, returning results as ConnectedClients.
func GetConnectedClients(db XODB) ([]*ConnectedClients, error) {
var err error
// sql query
var sqlstr = `SELECT usename, ` +
`CASE WHEN client_hostname IS NULL THEN client_addr::text ELSE client_hostname END AS client, ` +
`state, count(*) ` +
`FROM pg_stat_activity ` +
`WHERE state IS NOT NULL ` +
`GROUP BY 1,2,3 ` +
`ORDER BY 4 desc,3`
// run query
XOLog(sqlstr)
q, err := db.Query(sqlstr)
if err != nil {
return nil, err
}
defer q.Close()
// load results
res := []*ConnectedClients{}
for q.Next() {
cc := ConnectedClients{}
// scan
err = q.Scan(&cc.Usename, &cc.Client, &cc.State, &cc.Count)
if err != nil {
return nil, err
}
res = append(res, &cc)
}
return res, nil
}

View File

@@ -0,0 +1,44 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
// Connections represents a row from '[custom connections]'.
type Connections struct {
State string // state
Count int64 // count
}
// GetConnections runs a custom query, returning results as Connections.
func GetConnections(db XODB) ([]*Connections, error) {
var err error
// sql query
var sqlstr = `SELECT state, count(*) ` +
`FROM pg_stat_activity ` +
`GROUP BY 1`
// run query
XOLog(sqlstr)
q, err := db.Query(sqlstr)
if err != nil {
return nil, err
}
defer q.Close()
// load results
res := []*Connections{}
for q.Next() {
c := Connections{}
// scan
err = q.Scan(&c.State, &c.Count)
if err != nil {
return nil, err
}
res = append(res, &c)
}
return res, nil
}

View File

@@ -0,0 +1,60 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
// Counters represents a row from '[custom counters]'.
type Counters struct {
Datname Name // datname
Numbackends int // numbackends
XactCommit int64 // xact_commit
XactRollback int64 // xact_rollback
BlksRead int64 // blks_read
BlksHit int64 // blks_hit
TupReturned int64 // tup_returned
TupFetched int64 // tup_fetched
TupInserted int64 // tup_inserted
TupUpdated int64 // tup_updated
TupDeleted int64 // tup_deleted
Conflicts int64 // conflicts
TempFiles int64 // temp_files
TempBytes int64 // temp_bytes
Deadlocks int64 // deadlocks
}
// GetCounters runs a custom query, returning results as Counters.
func GetCounters(db XODB) ([]*Counters, error) {
var err error
// sql query
var sqlstr = `SELECT datname, numbackends, xact_commit, xact_rollback, ` +
`blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted, ` +
`tup_updated, tup_deleted, conflicts, temp_files, ` +
`temp_bytes, deadlocks ` +
`FROM pg_stat_database ` +
`ORDER BY datname`
// run query
XOLog(sqlstr)
q, err := db.Query(sqlstr)
if err != nil {
return nil, err
}
defer q.Close()
// load results
res := []*Counters{}
for q.Next() {
c := Counters{}
// scan
err = q.Scan(&c.Datname, &c.Numbackends, &c.XactCommit, &c.XactRollback, &c.BlksRead, &c.BlksHit, &c.TupReturned, &c.TupFetched, &c.TupInserted, &c.TupUpdated, &c.TupDeleted, &c.Conflicts, &c.TempFiles, &c.TempBytes, &c.Deadlocks)
if err != nil {
return nil, err
}
res = append(res, &c)
}
return res, nil
}

View File

@@ -0,0 +1,43 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
// Databases
type Databases struct {
Datname Name // datname
PgSizePretty string // pg_size_pretty
}
// GetDatabases runs a custom query, returning results as Databases.
func GetDatabases(db XODB) ([]*Databases, error) {
var err error
// sql query
var sqlstr = `SELECT datname, pg_size_pretty(pg_database_size(datname)) ` +
`FROM pg_stat_database`
// run query
XOLog(sqlstr)
q, err := db.Query(sqlstr)
if err != nil {
return nil, err
}
defer q.Close()
// load results
res := []*Databases{}
for q.Next() {
d := Databases{}
// scan
err = q.Scan(&d.Datname, &d.PgSizePretty)
if err != nil {
return nil, err
}
res = append(res, &d)
}
return res, nil
}

View File

@@ -0,0 +1,58 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
import (
"database/sql"
)
// DatabaseWaitEvents represents a row from '[custom database_wait_events]'.
type DatabaseWaitEvents struct {
Relname Name // relname
Relkind uint8 // relkind
WaitEventType sql.NullString // wait_event_type
WaitEvent sql.NullString // wait_event
Datname Name // datname
Count sql.NullInt64 // count
}
// GetDatabaseWaitEvents runs a custom query, returning results as DatabaseWaitEvents.
func GetDatabaseWaitEvents(db XODB) ([]*DatabaseWaitEvents, error) {
var err error
// sql query
var sqlstr = `SELECT c.relname, c.relkind, d.wait_event_type, d.wait_event, b.datname, count(*) ` +
`FROM pg_locks a ` +
`JOIN pg_stat_database b ON a.database=b.datid ` +
`JOIN pg_class c ON a.relation=c.oid ` +
`JOIN pg_stat_activity d ON a.pid = d.pid ` +
`WHERE a.relation IS NOT NULL ` +
`AND a.database IS NOT NULL ` +
`AND (d.wait_event_type IS NOT NULL OR d.wait_event IS NOT NULL) ` +
`GROUP BY 1,2,3,4,5`
// run query
XOLog(sqlstr)
q, err := db.Query(sqlstr)
if err != nil {
return nil, err
}
defer q.Close()
// load results
res := []*DatabaseWaitEvents{}
for q.Next() {
dwe := DatabaseWaitEvents{}
// scan
err = q.Scan(&dwe.Relname, &dwe.Relkind, &dwe.WaitEventType, &dwe.WaitEvent, &dwe.Datname, &dwe.Count)
if err != nil {
return nil, err
}
res = append(res, &dwe)
}
return res, nil
}

View File

@@ -0,0 +1,38 @@
version: '2.2'
services:
postgres9:
image: ${MYSQL_IMAGE:-postgres:9.6}
ports:
- ${POSTGRE_HOST:-127.0.0.1}:${POSTGRE_96_PORT:-6432}:5432
environment:
- POSTGRES_PASSWORD=root
networks:
app_net:
ipv6_address: 2001:3200:3200::20
postgres10:
image: ${POSTGRE_IMAGE:-postgres:10.7}
ports:
- ${POSTGRE_HOST:-127.0.0.1}:${POSTGRE_10_PORT:-6433}:5432
environment:
- POSTGRES_PASSWORD=root
postgres11:
image: ${POSTGRE_IMAGE:-postgres:11}
ports:
- ${POSTGRE_HOST:-127.0.0.1}:${POSTGRE_11_PORT:-6434}:5432
environment:
- POSTGRES_PASSWORD=root
postgres12:
image: ${POSTGRE_IMAGE:-postgres:12}
ports:
- ${POSTGRE_HOST:-127.0.0.1}:${POSTGRE_12_PORT:-6435}:5432
environment:
- POSTGRES_PASSWORD=root
networks:
app_net:
enable_ipv6: true
driver: bridge
ipam:
config:
- subnet: "2001:3200:3200::/64"
# gateway: 2001:3200:3200::1

View File

@@ -0,0 +1,296 @@
#!/bin/bash
USERNAME=postgres
PASSWORD=root
PORT9=6432
PORT10=6433
DO_CLEANUP=0
if [ ! "$(docker ps -q -f name=pt-pg-summary_postgres9_1)" ]; then
DO_CLEANUP=1
docker-compose up -d --force-recreate
sleep 20
fi
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-trim \
--query-interpolate \
--query-type AllDatabases \
--package models \
--out ./ << ENDSQL
SELECT datname
FROM pg_database
WHERE datistemplate = false
ENDSQL
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-trim \
--query-interpolate \
--query-only-one \
--query-type PortAndDatadir \
--package models \
--out ./ << ENDSQL
SELECT name,
setting
FROM pg_settings
WHERE name IN ('port','data_directory')
ENDSQL
COMMENT="Tablespaces"
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-trim \
--query-interpolate \
--query-type Tablespaces \
--query-type-comment "$COMMENT" \
--package models \
--out ./ << ENDSQL
SELECT spcname AS Name,
pg_catalog.pg_get_userbyid(spcowner) AS Owner,
pg_catalog.pg_tablespace_location(oid) AS Location
FROM pg_catalog.pg_tablespace
ORDER BY 1
ENDSQL
FIELDS='Usename string,Time time.Time,ClientAddr string,ClientHostname sql.NullString,Version string,Started time.Time,IsSlave bool'
COMMENT='Cluster info'
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-trim \
-k smart \
--query-type ClusterInfo \
--query-fields "$FIELDS" \
--query-interpolate \
--query-type-comment "$COMMENT" \
--query-allow-nulls \
--package models \
--out ./ << ENDSQL
SELECT usename, now() AS "Time",
client_addr,
client_hostname,
version() AS version,
pg_postmaster_start_time() AS Started,
pg_is_in_recovery() AS "Is_Slave"
FROM pg_stat_activity
WHERE pid = pg_backend_pid()
ENDSQL
COMMENT="Databases"
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-trim \
--query-interpolate \
--query-type-comment "$COMMENT" \
--query-type Databases \
--package models \
--out ./ << ENDSQL
SELECT datname, pg_size_pretty(pg_database_size(datname))
FROM pg_stat_database
ENDSQL
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-trim \
--query-interpolate \
--query-type Connections \
--package models \
--out ./ << ENDSQL
SELECT state, count(*)
FROM pg_stat_activity
GROUP BY 1
ENDSQL
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-interpolate \
--query-trim \
--query-type Counters \
--package models \
--out ./ << ENDSQL
SELECT datname, numbackends, xact_commit, xact_rollback,
blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted,
tup_updated, tup_deleted, conflicts, temp_files,
temp_bytes, deadlocks
FROM pg_stat_database
ORDER BY datname
ENDSQL
FIELDS='Relname string, Relkind string,Datname string,Count sql.NullInt64'
COMMENT='Table Access'
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-trim \
--query-type TableAccess \
--query-fields "$FIELDS" \
--query-type-comment "$COMMENT" \
--query-interpolate \
--query-allow-nulls \
--package models \
--out ./ << ENDSQL
SELECT c.relname, c.relkind, b.datname, count(*) FROM pg_locks a
JOIN pg_stat_database b
ON a.database=b.datid
JOIN pg_class c
ON a.relation=c.oid
WHERE a.relation IS NOT NULL
AND a.database IS NOT NULL
GROUP BY 1,2,3
ENDSQL
FIELDS='Name string,Ratio sql.NullFloat64'
COMMENT='Table cache hit ratio'
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable --query-mode --query-trim \
--query-type TableCacheHitRatio \
--query-fields "$FIELDS" \
--query-interpolate \
--query-only-one \
--query-type-comment "$COMMENT" \
--package models \
--out ./ << ENDSQL
SELECT 'cache hit rate' AS name,
CASE WHEN (sum(heap_blks_read) + sum(idx_blks_hit)) > 0
THEN
sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read))
ELSE 0
END AS ratio
FROM pg_statio_user_tables
ENDSQL
FIELDS='Name string,Ratio sql.NullFloat64'
COMMENT='Table cache hit ratio'
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-fields "$FIELDS" \
--query-trim \
--query-allow-nulls \
--query-only-one \
--query-type IndexCacheHitRatio \
--query-type-comment "$COMMENT" \
--package models \
--out ./ << ENDSQL
SELECT 'index hit rate' AS name,
CASE WHEN sum(idx_blks_hit) IS NULL
THEN 0
ELSE (sum(idx_blks_hit)) / sum(idx_blks_hit + idx_blks_read)
END AS ratio
FROM pg_statio_user_indexes
WHERE (idx_blks_hit + idx_blks_read) > 0
ENDSQL
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-trim \
--query-type GlobalWaitEvents \
--package models \
--out ./ << ENDSQL
SELECT wait_event_type, wait_event, count(*)
FROM pg_stat_activity
WHERE wait_event_type IS NOT NULL
OR wait_event IS NOT NULL
GROUP BY 1,2
ENDSQL
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-trim \
--query-interpolate \
--query-allow-nulls \
--query-type DatabaseWaitEvents \
--package models \
--out ./ << ENDSQL
SELECT c.relname, c.relkind, d.wait_event_type, d.wait_event, b.datname, count(*)
FROM pg_locks a
JOIN pg_stat_database b ON a.database=b.datid
JOIN pg_class c ON a.relation=c.oid
JOIN pg_stat_activity d ON a.pid = d.pid
WHERE a.relation IS NOT NULL
AND a.database IS NOT NULL
AND (d.wait_event_type IS NOT NULL OR d.wait_event IS NOT NULL)
GROUP BY 1,2,3,4,5
ENDSQL
COMMENT="Connected clients list"
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-trim \
--query-type ConnectedClients \
--query-type-comment "$COMMENT" \
--query-allow-nulls \
--query-interpolate \
--package models \
--out ./ << ENDSQL
SELECT usename,
CASE WHEN client_hostname IS NULL THEN client_addr::text ELSE client_hostname END AS client,
state, count(*)
FROM pg_stat_activity
WHERE state IS NOT NULL
GROUP BY 1,2,3
ORDER BY 4 desc,3
ENDSQL
# Postgre 9
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-trim \
--query-type SlaveHosts96 \
--query-interpolate \
--query-allow-nulls \
--package models \
--out ./ << ENDSQL
SELECT application_name, client_addr, state, sent_offset - (replay_offset - (sent_xlog - replay_xlog) * 255 * 16 ^ 6 ) AS byte_lag
FROM ( SELECT application_name, client_addr, client_hostname, state,
('x' || lpad(split_part(sent_location::TEXT, '/', 1), 8, '0'))::bit(32)::bigint AS sent_xlog,
('x' || lpad(split_part(replay_location::TEXT, '/', 1), 8, '0'))::bit(32)::bigint AS replay_xlog,
('x' || lpad(split_part(sent_location::TEXT, '/', 2), 8, '0'))::bit(32)::bigint AS sent_offset,
('x' || lpad(split_part(replay_location::TEXT, '/', 2), 8, '0'))::bit(32)::bigint AS replay_offset
FROM pg_stat_replication ) AS s
ENDSQL
# Postgre 10
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT10}/?sslmode=disable \
--query-mode \
--query-trim \
--query-interpolate \
--query-allow-nulls \
--query-type SlaveHosts10 \
--package models \
--out ./ << ENDSQL
SELECT application_name, client_addr, state, sent_offset - (replay_offset - (sent_lsn - replay_lsn) * 255 * 16 ^ 6 ) AS byte_lag
FROM ( SELECT application_name, client_addr, client_hostname, state,
('x' || lpad(split_part(sent_lsn::TEXT, '/', 1), 8, '0'))::bit(32)::bigint AS sent_lsn,
('x' || lpad(split_part(replay_lsn::TEXT, '/', 1), 8, '0'))::bit(32)::bigint AS replay_lsn,
('x' || lpad(split_part(sent_lsn::TEXT, '/', 2), 8, '0'))::bit(32)::bigint AS sent_offset,
('x' || lpad(split_part(replay_lsn::TEXT, '/', 2), 8, '0'))::bit(32)::bigint AS replay_offset
FROM pg_stat_replication ) AS s
ENDSQL
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT10}/?sslmode=disable \
--query-mode \
--query-trim \
--query-only-one \
--query-type ServerVersion \
--package models \
--out ./ << ENDSQL
SELECT current_setting('server_version_num') AS version
ENDSQL
FIELDS='Name string,Setting string'
COMMENT='Settings'
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-fields "$FIELDS" \
--query-trim \
--query-allow-nulls \
--query-type Setting \
--query-type-comment "$COMMENT" \
--package models \
--out ./ << ENDSQL
SELECT name, setting
FROM pg_settings
ENDSQL
if [ $DO_CLEANUP == 1 ]; then
docker-compose down --volumes
fi

View File

@@ -0,0 +1,47 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
// GlobalWaitEvents represents a row from '[custom global_wait_events]'.
type GlobalWaitEvents struct {
WaitEventType string // wait_event_type
WaitEvent string // wait_event
Count int64 // count
}
// GetGlobalWaitEvents runs a custom query, returning results as GlobalWaitEvents.
func GetGlobalWaitEvents(db XODB) ([]*GlobalWaitEvents, error) {
var err error
// sql query
const sqlstr = `SELECT wait_event_type, wait_event, count(*) ` +
`FROM pg_stat_activity ` +
`WHERE wait_event_type IS NOT NULL ` +
`OR wait_event IS NOT NULL ` +
`GROUP BY 1,2`
// run query
XOLog(sqlstr)
q, err := db.Query(sqlstr)
if err != nil {
return nil, err
}
defer q.Close()
// load results
res := []*GlobalWaitEvents{}
for q.Next() {
gwe := GlobalWaitEvents{}
// scan
err = q.Scan(&gwe.WaitEventType, &gwe.WaitEvent, &gwe.Count)
if err != nil {
return nil, err
}
res = append(res, &gwe)
}
return res, nil
}

View File

@@ -0,0 +1,38 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
import (
"database/sql"
)
// Table cache hit ratio
type IndexCacheHitRatio struct {
Name string // name
Ratio sql.NullFloat64 // ratio
}
// GetIndexCacheHitRatio runs a custom query, returning results as IndexCacheHitRatio.
func GetIndexCacheHitRatio(db XODB) (*IndexCacheHitRatio, error) {
var err error
// sql query
const sqlstr = `SELECT 'index hit rate' AS name, ` +
`CASE WHEN sum(idx_blks_hit) IS NULL ` +
`THEN 0 ` +
`ELSE (sum(idx_blks_hit)) / sum(idx_blks_hit + idx_blks_read) ` +
`END AS ratio ` +
`FROM pg_statio_user_indexes ` +
`WHERE (idx_blks_hit + idx_blks_read) > 0`
// run query
XOLog(sqlstr)
var ichr IndexCacheHitRatio
err = db.QueryRow(sqlstr).Scan(&ichr.Name, &ichr.Ratio)
if err != nil {
return nil, err
}
return &ichr, nil
}

View File

@@ -0,0 +1,31 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
// PortAndDatadir represents a row from '[custom port_and_datadir]'.
type PortAndDatadir struct {
Name string // name
Setting string // setting
}
// GetPortAndDatadir runs a custom query, returning results as PortAndDatadir.
func GetPortAndDatadir(db XODB) (*PortAndDatadir, error) {
var err error
// sql query
var sqlstr = `SELECT name, ` +
`setting ` +
`FROM pg_settings ` +
`WHERE name IN ('port','data_directory')`
// run query
XOLog(sqlstr)
var pad PortAndDatadir
err = db.QueryRow(sqlstr).Scan(&pad.Name, &pad.Setting)
if err != nil {
return nil, err
}
return &pad, nil
}

View File

@@ -0,0 +1,27 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
// ServerVersion represents a row from '[custom server_version]'.
type ServerVersion struct {
Version string // version
}
// GetServerVersion runs a custom query, returning results as ServerVersion.
func GetServerVersion(db XODB) (*ServerVersion, error) {
var err error
// sql query
const sqlstr = `SELECT current_setting('server_version_num') AS version`
// run query
XOLog(sqlstr)
var sv ServerVersion
err = db.QueryRow(sqlstr).Scan(&sv.Version)
if err != nil {
return nil, err
}
return &sv, nil
}

View File

@@ -0,0 +1,43 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
// Settings
type Setting struct {
Name string // name
Setting string // setting
}
// GetSettings runs a custom query, returning results as Setting.
func GetSettings(db XODB) ([]*Setting, error) {
var err error
// sql query
const sqlstr = `SELECT name, setting ` +
`FROM pg_settings`
// run query
XOLog(sqlstr)
q, err := db.Query(sqlstr)
if err != nil {
return nil, err
}
defer q.Close()
// load results
res := []*Setting{}
for q.Next() {
s := Setting{}
// scan
err = q.Scan(&s.Name, &s.Setting)
if err != nil {
return nil, err
}
res = append(res, &s)
}
return res, nil
}

View File

@@ -0,0 +1,54 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
import (
"database/sql"
)
// SlaveHosts10 represents a row from '[custom slave_hosts10]'.
type SlaveHosts10 struct {
ApplicationName sql.NullString // application_name
ClientAddr sql.NullString // client_addr
State sql.NullString // state
ByteLag sql.NullFloat64 // byte_lag
}
// GetSlaveHosts10s runs a custom query, returning results as SlaveHosts10.
func GetSlaveHosts10s(db XODB) ([]*SlaveHosts10, error) {
var err error
// sql query
var sqlstr = `SELECT application_name, client_addr, state, sent_offset - (replay_offset - (sent_lsn - replay_lsn) * 255 * 16 ^ 6 ) AS byte_lag ` +
`FROM ( SELECT application_name, client_addr, client_hostname, state, ` +
`('x' || lpad(split_part(sent_lsn::TEXT, '/', 1), 8, '0'))::bit(32)::bigint AS sent_lsn, ` +
`('x' || lpad(split_part(replay_lsn::TEXT, '/', 1), 8, '0'))::bit(32)::bigint AS replay_lsn, ` +
`('x' || lpad(split_part(sent_lsn::TEXT, '/', 2), 8, '0'))::bit(32)::bigint AS sent_offset, ` +
`('x' || lpad(split_part(replay_lsn::TEXT, '/', 2), 8, '0'))::bit(32)::bigint AS replay_offset ` +
`FROM pg_stat_replication ) AS s`
// run query
XOLog(sqlstr)
q, err := db.Query(sqlstr)
if err != nil {
return nil, err
}
defer q.Close()
// load results
res := []*SlaveHosts10{}
for q.Next() {
sh := SlaveHosts10{}
// scan
err = q.Scan(&sh.ApplicationName, &sh.ClientAddr, &sh.State, &sh.ByteLag)
if err != nil {
return nil, err
}
res = append(res, &sh)
}
return res, nil
}

View File

@@ -0,0 +1,54 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
import (
"database/sql"
)
// SlaveHosts96 represents a row from '[custom slave_hosts96]'.
type SlaveHosts96 struct {
ApplicationName sql.NullString // application_name
ClientAddr sql.NullString // client_addr
State sql.NullString // state
ByteLag sql.NullFloat64 // byte_lag
}
// GetSlaveHosts96s runs a custom query, returning results as SlaveHosts96.
func GetSlaveHosts96s(db XODB) ([]*SlaveHosts96, error) {
var err error
// sql query
var sqlstr = `SELECT application_name, client_addr, state, sent_offset - (replay_offset - (sent_xlog - replay_xlog) * 255 * 16 ^ 6 ) AS byte_lag ` +
`FROM ( SELECT application_name, client_addr, client_hostname, state, ` +
`('x' || lpad(split_part(sent_location::TEXT, '/', 1), 8, '0'))::bit(32)::bigint AS sent_xlog, ` +
`('x' || lpad(split_part(replay_location::TEXT, '/', 1), 8, '0'))::bit(32)::bigint AS replay_xlog, ` +
`('x' || lpad(split_part(sent_location::TEXT, '/', 2), 8, '0'))::bit(32)::bigint AS sent_offset, ` +
`('x' || lpad(split_part(replay_location::TEXT, '/', 2), 8, '0'))::bit(32)::bigint AS replay_offset ` +
`FROM pg_stat_replication ) AS s`
// run query
XOLog(sqlstr)
q, err := db.Query(sqlstr)
if err != nil {
return nil, err
}
defer q.Close()
// load results
res := []*SlaveHosts96{}
for q.Next() {
sh := SlaveHosts96{}
// scan
err = q.Scan(&sh.ApplicationName, &sh.ClientAddr, &sh.State, &sh.ByteLag)
if err != nil {
return nil, err
}
res = append(res, &sh)
}
return res, nil
}

View File

@@ -0,0 +1,55 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
import (
"database/sql"
)
// Table Access
type TableAccess struct {
Relname string // relname
Relkind string // relkind
Datname string // datname
Count sql.NullInt64 // count
}
// GetTableAccesses runs a custom query, returning results as TableAccess.
func GetTableAccesses(db XODB) ([]*TableAccess, error) {
var err error
// sql query
var sqlstr = `SELECT c.relname, c.relkind, b.datname, count(*) FROM pg_locks a ` +
`JOIN pg_stat_database b ` +
`ON a.database=b.datid ` +
`JOIN pg_class c ` +
`ON a.relation=c.oid ` +
`WHERE a.relation IS NOT NULL ` +
`AND a.database IS NOT NULL ` +
`GROUP BY 1,2,3`
// run query
XOLog(sqlstr)
q, err := db.Query(sqlstr)
if err != nil {
return nil, err
}
defer q.Close()
// load results
res := []*TableAccess{}
for q.Next() {
ta := TableAccess{}
// scan
err = q.Scan(&ta.Relname, &ta.Relkind, &ta.Datname, &ta.Count)
if err != nil {
return nil, err
}
res = append(res, &ta)
}
return res, nil
}

View File

@@ -0,0 +1,38 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
import (
"database/sql"
)
// Table cache hit ratio
type TableCacheHitRatio struct {
Name string // name
Ratio sql.NullFloat64 // ratio
}
// GetTableCacheHitRatio runs a custom query, returning results as TableCacheHitRatio.
func GetTableCacheHitRatio(db XODB) (*TableCacheHitRatio, error) {
var err error
// sql query
var sqlstr = `SELECT 'cache hit rate' AS name, ` +
`CASE WHEN (sum(heap_blks_read) + sum(idx_blks_hit)) > 0 ` +
`THEN ` +
`sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read)) ` +
`ELSE 0 ` +
`END AS ratio ` +
`FROM pg_statio_user_tables`
// run query
XOLog(sqlstr)
var tchr TableCacheHitRatio
err = db.QueryRow(sqlstr).Scan(&tchr.Name, &tchr.Ratio)
if err != nil {
return nil, err
}
return &tchr, nil
}

View File

@@ -0,0 +1,47 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
// Tablespaces
type Tablespaces struct {
Name Name // name
Owner Name // owner
Location string // location
}
// GetTablespaces runs a custom query, returning results as Tablespaces.
func GetTablespaces(db XODB) ([]*Tablespaces, error) {
var err error
// sql query
var sqlstr = `SELECT spcname AS Name, ` +
`pg_catalog.pg_get_userbyid(spcowner) AS Owner, ` +
`pg_catalog.pg_tablespace_location(oid) AS Location ` +
`FROM pg_catalog.pg_tablespace ` +
`ORDER BY 1`
// run query
XOLog(sqlstr)
q, err := db.Query(sqlstr)
if err != nil {
return nil, err
}
defer q.Close()
// load results
res := []*Tablespaces{}
for q.Next() {
t := Tablespaces{}
// scan
err = q.Scan(&t.Name, &t.Owner, &t.Location)
if err != nil {
return nil, err
}
res = append(res, &t)
}
return res, nil
}

View File

@@ -0,0 +1,11 @@
package models
//go:generate ./gen.sh
type Name string
type Unknown []uint8
func (n Unknown) String() string {
return string(n)
}

View File

@@ -0,0 +1,85 @@
// Package models contains the types for schema 'public'.
package models
// Code generated by xo. DO NOT EDIT.
import (
"database/sql"
"database/sql/driver"
"encoding/csv"
"errors"
"fmt"
"regexp"
"strings"
)
// XODB is the common interface for database operations that can be used with
// types from schema 'public'.
//
// This should work with database/sql.DB and database/sql.Tx.
type XODB interface {
Exec(string, ...interface{}) (sql.Result, error)
Query(string, ...interface{}) (*sql.Rows, error)
QueryRow(string, ...interface{}) *sql.Row
}
// XOLog provides the log func used by generated queries.
var XOLog = func(string, ...interface{}) {}
// ScannerValuer is the common interface for types that implement both the
// database/sql.Scanner and sql/driver.Valuer interfaces.
type ScannerValuer interface {
sql.Scanner
driver.Valuer
}
// StringSlice is a slice of strings.
type StringSlice []string
// quoteEscapeRegex is the regex to match escaped characters in a string.
var quoteEscapeRegex = regexp.MustCompile(`([^\\]([\\]{2})*)\\"`)
// Scan satisfies the sql.Scanner interface for StringSlice.
func (ss *StringSlice) Scan(src interface{}) error {
buf, ok := src.([]byte)
if !ok {
return errors.New("invalid StringSlice")
}
// change quote escapes for csv parser
str := quoteEscapeRegex.ReplaceAllString(string(buf), `$1""`)
str = strings.Replace(str, `\\`, `\`, -1)
// remove braces
str = str[1 : len(str)-1]
// bail if only one
if len(str) == 0 {
*ss = StringSlice([]string{})
return nil
}
// parse with csv reader
cr := csv.NewReader(strings.NewReader(str))
slice, err := cr.Read()
if err != nil {
fmt.Printf("exiting!: %v\n", err)
return err
}
*ss = StringSlice(slice)
return nil
}
// Value satisfies the driver.Valuer interface for StringSlice.
func (ss StringSlice) Value() (driver.Value, error) {
v := make([]string, len(ss))
for i, s := range ss {
v[i] = `"` + strings.Replace(strings.Replace(s, `\`, `\\\`, -1), `"`, `\"`, -1) + `"`
}
return "{" + strings.Join(v, ",") + "}", nil
}
// Slice is a slice of ScannerValuers.
type Slice []ScannerValuer

View File

@@ -0,0 +1,309 @@
package templates
var TPL = `{{define "report"}}
{{ template "port_and_datadir" .PortAndDatadir }}
{{ template "tablespaces" .Tablespaces }}
{{ if .SlaveHosts96 -}}
{{ template "slaves_and_lag" .SlaveHosts96 }}
{{ else if .SlaveHosts10 -}}
{{ template "slaves_and_lag" .SlaveHosts10 }}
{{- end }}
{{ template "cluster" .ClusterInfo }}
{{ template "databases" .AllDatabases }}
{{ template "index_cache_ratios" .IndexCacheHitRatio }}
{{ template "table_cache_ratios" .TableCacheHitRatio }}
{{ template "global_wait_events" .GlobalWaitEvents }}
{{ template "connected_clients" .ConnectedClients }}
{{ template "counters_header" .Sleep }}
{{ template "counters" .Counters }}
{{ template "table_access" .TableAccess }}
{{ template "settings" .Settings }}
{{ template "processes" .Processes }}
{{ end }} {{/* end "report" */}}` +
`
{{ define "port_and_datadir" -}}
##### --- Database Port and Data_Directory --- ####
+----------------------+----------------------------------------------------+
| Name | Setting |
+----------------------+----------------------------------------------------+
| {{ printf "%-20s" .Name }} | {{ printf "%-50s" .Setting }} |
+----------------------+----------------------------------------------------+
{{ end -}}
` +
`{{ define "tablespaces" -}}
##### --- List of Tablespaces ---- ######
+----------------------+----------------------+----------------------------------------------------+
| Name | Owner | Location |
+----------------------+----------------------+----------------------------------------------------+
{{ range . -}}
| {{ printf "%-20s" .Name }} | {{ printf "%-20s" .Owner }} | {{ printf "%-50s" .Location }} |
{{ end -}}
+----------------------+----------------------+----------------------------------------------------+
{{ end -}} {{/* end define */}}
` +
`{{ define "slaves_and_lag" -}}
##### --- Slave and the lag with Master --- ####
{{ if . -}}
+----------------------+----------------------+----------------------------------------------------+
| Application Name | Client Address | State | Lag |
+----------------------+----------------------+----------------------------------------------------+
{{ range . -}}` +
`| {{ printf "%-20s" .ApplicationName }} ` +
`| {{ printf "%-20s" .ClientAddr }} ` +
`| {{ printf "%-50s" .State }} ` +
`| {{ printf "% 4.2f" .ByteLag }}` +
`{{ end -}} {{/* end define */}}
+----------------------+----------------------+----------------------------------------------------+
{{- else -}}
There are no slave hosts
{{ end -}}
{{ end -}}
` +
`{{ define "cluster" -}}
##### --- Cluster Information --- ####
{{ if . -}}
+------------------------------------------------------------------------------------------------------+
{{- range . }}
Usename : {{ printf "%-20s" .Usename }}
Time : {{ printf "%v" .Time }}
Client Address : {{ printf "%-20s" .ClientAddr }}
Client Hostname: {{ trim .ClientHostname.String 80 }}
Version : {{ trim .Version 80 }}
Started : {{ printf "%v" .Started }}
Is Slave : {{ .IsSlave }}
+------------------------------------------------------------------------------------------------------+
{{ end -}}
{{ else -}}
There is no Cluster info
{{ end -}}
{{- end -}} {{/* end define */}}
` +
`{{ define "databases" -}}
##### --- Databases --- ####
+----------------------+------------+
| Dat Name | Size |
+----------------------+------------+
{{ range . -}}
| {{ printf "%-20s" .Datname }} | {{ printf "%10s" .PgSizePretty }} |
{{ end -}}
+----------------------+------------+
{{ end }} {{/* end define */}}
` +
`{{ define "index_cache_ratios" -}}
##### --- Index Cache Hit Ratios --- ####
{{ if . -}}
{{ range $dbname, $value := . }}
Database: {{ $dbname }}
+----------------------+------------+
| Index Name | Ratio |
+----------------------+------------+
| {{ printf "%-20s" .Name }} | {{ printf "% 5.2f" .Ratio.Float64 }} |
+----------------------+------------+
{{ else -}}
No stats available
{{ end -}}
{{ end -}}
{{ end -}} {{/* end define */}}
` +
`{{ define "table_cache_ratios" -}}
##### --- Table Cache Hit Ratios --- ####
{{ if . -}}
{{ range $dbname, $value := . -}}
Database: {{ $dbname }}
+----------------------+------------+
| Index Name | Ratio |
+----------------------+------------+
| {{ printf "%-20s" .Name }} | {{ printf "%5.2f" .Ratio.Float64 }} |
+----------------------+------------+
{{ else -}}
No stats available
{{ end -}}
{{ end }}
{{- end -}} {{/* end define */}}
` +
`{{ define "global_wait_events" -}}
##### --- List of Wait_events for the entire Cluster - all-databases --- ####
{{ if . -}}
+----------------------+----------------------+---------+
| Wait Event Type | Event | Count |
+----------------------+----------------------+---------+
{{ range . -}}
| {{ printf "%-20s" .WaitEventType }} | {{ printf "%-20s" .WaitEvent }} | {{ printf "% 5d" .Count }} |
{{ end -}}
+----------------------+----------------------+---------+
{{ else -}}
No stats available
{{ end -}}
{{- end -}} {{/* end define */}}
` +
`{{ define "connected_clients" -}}
##### --- List of users and client_addr or client_hostname connected to --all-databases --- ####
{{ if . -}}
+----------------------+------------+---------+----------------------+---------+
| Wait Event Type | Client | State | Count |
+----------------------+------------+---------+----------------------+---------+
{{ range . -}}` +
`| {{ printf "%-20s" .Usename }} | ` +
`{{ printf "%-20s" .Client.String }} | ` +
`{{ printf "%-20s" .State.String }} | ` +
`{{ printf "% 7d" .Count.Int64 }} |` + "\n" +
`{{ end -}}
+----------------------+------------+---------+----------------------+---------+
{{ else -}}
No stats available
{{ end -}}
{{- end -}} {{/* end define */}}
` +
/*
Counters header
*/
`{{ define "counters_header" -}}` +
"##### --- Counters diff after {{ . }} seconds --- ####\n" +
`{{end}}` +
/*
Counters
*/
`{{ define "counters" -}}` +
"+----------------------" +
"+-------------" +
"+------------" +
"+--------------" +
"+-------------" +
"+------------" +
"+-------------" +
"+------------" +
"+-------------" +
"+------------" +
"+------------" +
"+-----------" +
"+-----------" +
"+-----------" +
"+------------+" + "\n" +
"| Database " +
"| Numbackends " +
"| XactCommit " +
"| XactRollback " +
"| BlksRead " +
"| BlksHit " +
"| TupReturned " +
"| TupFetched " +
"| TupInserted " +
"| TupUpdated " +
"| TupDeleted " +
"| Conflicts " +
"| TempFiles " +
"| TempBytes " +
"| Deadlocks |" + "\n" +
"+----------------------" +
"+-------------" +
"+------------" +
"+--------------" +
"+-------------" +
"+------------" +
"+-------------" +
"+------------" +
"+-------------" +
"+------------" +
"+------------" +
"+-----------" +
"+-----------" +
"+-----------" +
"+------------+" + "\n" +
`{{ range $key, $value := . -}} ` +
`| {{ printf "%-20s" (index $value 2).Datname }} ` +
`| {{ printf "% 7d" (index $value 2).Numbackends }} ` +
`| {{ printf "% 7d" (index $value 2).XactCommit }} ` +
`| {{ printf "% 7d" (index $value 2).XactRollback }} ` +
`| {{ printf "% 7d" (index $value 2).BlksRead }} ` +
`| {{ printf "% 7d" (index $value 2).BlksHit }} ` +
`| {{ printf "% 7d" (index $value 2).TupReturned }} ` +
`| {{ printf "% 7d" (index $value 2).TupFetched }} ` +
`| {{ printf "% 7d" (index $value 2).TupInserted }} ` +
`| {{ printf "% 7d" (index $value 2).TupUpdated }} ` +
`| {{ printf "% 7d" (index $value 2).TupDeleted }} ` +
`| {{ printf "% 7d" (index $value 2).Conflicts }} ` +
`| {{ printf "% 7d" (index $value 2).TempFiles }} ` +
`| {{ printf "% 7d" (index $value 2).TempBytes }} ` +
`| {{ printf "% 7d" (index $value 2).Deadlocks }} ` +
"|\n" +
`{{ end }}` +
"+----------------------" +
"+-------------" +
"+------------" +
"+--------------" +
"+-------------" +
"+------------" +
"+-------------" +
"+------------" +
"+-------------" +
"+------------" +
"+------------" +
"+-----------" +
"+-----------" +
"+-----------" +
"+------------+" + "\n" +
`{{ end }}` +
`{{ define "table_access" -}}` +
"##### --- Table access per database --- ####\n" +
`{{ range $dbname, $values := . -}}` +
"Database: {{ $dbname }}\n" +
"+----------------------------------------------------" +
"+------" +
"+--------------------------------" +
"+---------+\n" +
"| Relname " +
"| Kind " +
"| Datname " +
"| Count |\n" +
"+----------------------------------------------------" +
"+------" +
"+--------------------------------" +
"+---------+\n" +
`{{ range . -}}
| {{ printf "%-50s" .Relname }} ` +
`| {{ printf "%1s" .Relkind }} ` +
`| {{ printf "%-30s" .Datname }} ` +
`| {{ printf "% 7d" .Count.Int64 }} ` +
"|\n" +
"{{ end }}" +
"+----------------------------------------------------" +
"+------" +
"+--------------------------------" +
"+---------+\n" +
"{{ end -}}" +
"{{ end }}" +
`{{ define "settings" -}}` +
/*
Settings
*/
"##### --- Instance settings --- ####\n" +
" Setting " +
" Value \n" +
`{{ range $name, $values := . -}}` +
` {{ printf "%-45s" .Name }} ` +
`: {{ printf "%-60s" .Setting }} ` +
"\n" +
"{{ end }}" +
"{{ end }}" +
/*
Processes
*/
`{{ define "processes" -}}` +
"##### --- Processes start up command --- ####\n" +
"{{ if . -}}" +
" PID " +
": Command line\n" +
`{{ range $name, $values := . }}` +
` {{ printf "% 5d" .PID }} ` +
`: {{ printf "%-s" .CmdLine }} ` +
"\n" +
"{{ end }}" +
"{{ else }}" +
"No postgres process found\n" +
"{{ end }}" +
"{{ end }}"

View File

@@ -84,9 +84,10 @@ var (
"pt-mysql-summary --host=$mysql-host --port=$mysql-port --user=$mysql-user --password=$mysql-pass",
}
Build string = "01-01-1980"
GoVersion string = "1.8"
Version string = "3.0.1"
Build string = "01-01-1980" //nolint
GoVersion string = "1.8" //nolint
Version string = "3.0.1" //nolint
Commit string //nolint
)
func main() {
@@ -177,7 +178,8 @@ func processCliParams(baseTempPath string, usageWriter io.Writer) (*cliOptions,
}
// Add support for --version flag
app.Version(TOOLNAME + "\nVersion " + Version + "\nBuild: " + Build + " using " + GoVersion)
app.Version(TOOLNAME + "\nVersion " + Version + "\nBuild: " + Build + " using " + GoVersion +
" Go version: " + GoVersion)
opts := &cliOptions{
CollectCommand: app.Command(collectCmd, "Collect, sanitize, pack and encrypt data from pt-tools."),

View File

@@ -102,6 +102,7 @@ my $cxn = make_cxn(
dsn_string => 'h=127.1,P=12345,u=msandbox,p=msandbox',
set => sub {
my ($dbh) = @_;
warn "---------------";
$set_calls++;
$dbh->do("SET \@a := \@a + 1");
},

View File

@@ -36,6 +36,7 @@ my $dp = new DSNParser(opts=>$dsn_opts);
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
my $dbh = $sb->get_dbh_for('master');
plan skip_all => 'Cannot connect to sandbox master';
if ( !$dbh ) {
plan skip_all => 'Cannot connect to sandbox master';
} else {

View File

@@ -37,6 +37,7 @@ my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
my $dbh = $sb->get_dbh_for('master');
my $output;
plan skip_all => 'Cannot connect to sandbox master';
if ( !$dbh ) {
plan skip_all => 'Cannot connect to sandbox master';
}

View File

@@ -1267,14 +1267,6 @@ is_deeply(
'Column having the word "generated" as part of the comment is OK',
) or diag Data::Dumper::Dumper($tbl);
$tbl = $tp->parse( load_file('t/lib/samples/generated_cols_comments.sql') );
warn Data::Dumper::Dumper($tbl);
is_deeply(
$tbl,
{},
'pt-1728',
);
# #############################################################################
# Done.
# #############################################################################

View File

@@ -303,6 +303,7 @@ CREATE TABLE `user` (
`Drop_role_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N',
`Password_reuse_history` smallint(5) unsigned DEFAULT NULL,
`Password_reuse_time` smallint(5) unsigned DEFAULT NULL,
`Password_require_current` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
PRIMARY KEY (`Host`,`User`)
) /*!50100 TABLESPACE `mysql` */ ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0 COMMENT='Users and global privileges'

View File

@@ -49,6 +49,7 @@ CREATE TABLE `user` (
`Drop_role_priv` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL DEFAULT 'N',
`Password_reuse_history` smallint(5) unsigned DEFAULT NULL,
`Password_reuse_time` smallint(5) unsigned DEFAULT NULL,
`Password_require_current` enum('N','Y') CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
PRIMARY KEY (`Host`,`User`)
) /*!50100 TABLESPACE `mysql` */ ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0 COMMENT='Users and global privileges'

View File

@@ -201,19 +201,6 @@ wsrep_cluster_size 100
HandlerSocket NoSQL | Not Supported
Fast Hash UDFs | Unknown
# Percona XtraDB Cluster #####################################
Cluster Name | pt_sandbox_cluster
Cluster Address | gcomm://
Cluster Size | 3
Cluster Nodes | 192.168.0.100,192.168.0.100,192.168.0.100
Node Name | 12345
Node Status | Primary
SST Method | rsync
Slave Threads | 2
Ignore Split Brain | false
Ignore Quorum | false
gcache Size | 128M
gcache Directory | /tmp/12345/data/
gcache Name | /tmp/12345/data//galera.cache
# Plugins ####################################################
InnoDB compression | ACTIVE
# Query cache ################################################

View File

@@ -167,7 +167,6 @@ wsrep_local_index 4000000000000 45000000
HandlerSocket NoSQL | Not Supported
Fast Hash UDFs | Unknown
# Percona XtraDB Cluster #####################################
wsrep_on | OFF
# Plugins ####################################################
InnoDB compression | ACTIVE
# Query cache ################################################

View File

@@ -52,17 +52,23 @@ $sb->load_file('master', "$sample/long_fk_constraints.sql");
warn $output;
my $constraints = $master_dbh->selectall_arrayref("SELECT TABLE_NAME, CONSTRAINT_NAME FROM information_schema.KEY_COLUMN_USAGE WHERE table_schema='bug1215587' and (TABLE_NAME='Table1' OR TABLE_NAME='Table2') and CONSTRAINT_NAME LIKE '%fkey%' ORDER BY TABLE_NAME, CONSTRAINT_NAME");
warn Data::Dumper::Dumper($constraints);
my $query = <<_SQL;
SELECT TABLE_NAME, CONSTRAINT_NAME
FROM information_schema.KEY_COLUMN_USAGE
WHERE table_schema='bug1215587'
and (TABLE_NAME='Table1' OR TABLE_NAME='Table2')
and CONSTRAINT_NAME LIKE '%fkey%'
ORDER BY TABLE_NAME, CONSTRAINT_NAME
_SQL
my $constraints = $master_dbh->selectall_arrayref($query);
is_deeply(
$constraints,
[
[ 'Table1', '__fkey1a' ],
[ 'Table1', '__fkey_SALES_RECURRING_PROFILE_CUSTOMER_CUSTOMER_ENTITY_ENTITY_I' ],
[ 'Table2', '__fkey2b' ],
[ 'Table2', '_fkey2a' ],
[ 'Table2', '__fkey2b' ]
],
"First run adds or removes underscore from constraint names, accordingly"
);

View File

@@ -21,6 +21,13 @@ my $dp = new DSNParser(opts=>$dsn_opts);
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
my $master_dbh = $sb->get_dbh_for('master');
my $vp = VersionParser->new($master_dbh);
warn Data::Dumper::Dumper($vp);
if ($vp->cmp('8.0.14') > -1 && $vp->flavor() !~ m/maria/i) {
plan skip_all => 'Cannot run this test under the current MySQL version';
}
if ( !$master_dbh ) {
plan skip_all => 'Cannot connect to sandbox master';
}
@@ -50,16 +57,24 @@ $sb->load_file('master', "$sample/bug-1215587.sql");
qw(--execute)) },
);
my $constraints = $master_dbh->selectall_arrayref("SELECT TABLE_NAME, CONSTRAINT_NAME FROM information_schema.KEY_COLUMN_USAGE WHERE table_schema='bug1215587' and (TABLE_NAME='Table1' OR TABLE_NAME='Table2') and CONSTRAINT_NAME LIKE '%fkey%' ORDER BY TABLE_NAME, CONSTRAINT_NAME");
my $query = <<__SQL;
SELECT TABLE_NAME, CONSTRAINT_NAME
FROM information_schema.KEY_COLUMN_USAGE
WHERE table_schema='bug1215587'
and (TABLE_NAME='Table1' OR TABLE_NAME='Table2')
and CONSTRAINT_NAME LIKE '%fkey%'
ORDER BY TABLE_NAME, CONSTRAINT_NAME
__SQL
my $constraints = $master_dbh->selectall_arrayref($query);
is_deeply(
$constraints,
[
['Table1', '_fkey1b'],
['Table1', '__fkey1a'],
['Table2', '_fkey2a'],
['Table1', '_fkey1b'],
['Table2', '__fkey2b'],
['Table2', '_fkey2a'],
],
"First run adds or removes underscore from constraint names, accordingly"
);
@@ -81,10 +96,10 @@ $constraints = $master_dbh->selectall_arrayref("SELECT TABLE_NAME, CONSTRAINT_NA
is_deeply(
$constraints,
[
['Table1', 'fkey1a'],
['Table1', '__fkey1b'],
['Table2', 'fkey2b'],
['Table1', 'fkey1a'],
['Table2', '__fkey2a'],
['Table2', 'fkey2b'],
],
"Second run adds or removes underscore from constraint names, accordingly"
);
@@ -103,10 +118,10 @@ $constraints = $master_dbh->selectall_arrayref("SELECT TABLE_NAME, CONSTRAINT_NA
is_deeply(
$constraints,
[
['Table1', 'fkey1b'],
['Table1', '_fkey1a'],
['Table2', 'fkey2a'],
['Table1', 'fkey1b'],
['Table2', '_fkey2b'],
['Table2', 'fkey2a'],
],
"Third run toggles constraint names back to how they were"
);

View File

@@ -39,7 +39,9 @@ my $sample = "t/pt-online-schema-change/samples/";
# pt-online-schema-change fails with duplicate key in table for self-referencing FK
# ############################################################################
diag("Before loading sql");
$sb->load_file('master', "$sample/bug-1632522.sql");
diag("after loading sql");
# run once: we expect the constraint name to be appended with one underscore
# but the self-referencing constraint will have 2 underscore

View File

@@ -1,2 +1,2 @@
{"classes":[{"attribute":"fingerprint","checksum":"C29D79D8CB57E235AA8E9FA785927259","distillate":"SELECT d.t","example":{"Query_time":"0.000286","as_select":"SELECT i FROM d.t WHERE i=?","query":"PREPARE SELECT i FROM d.t WHERE i=?","ts":"2009-12-08 09:23:49.637394"},"fingerprint":"prepare select i from d.t where i=?","histograms":{"Query_time":[0,0,1,0,0,0,0,0]},"metrics":{"No_good_index_used":{"yes":"0"},"No_index_used":{"yes":"0"},"Query_length":{"avg":"35","max":"35","median":"35","min":"35","pct":"0","pct_95":"35","stddev":"0","sum":"35"},"Query_time":{"avg":"0.000286","max":"0.000286","median":"0.000286","min":"0.000286","pct":"0.333333","pct_95":"0.000286","stddev":"0.000000","sum":"0.000286"},"Statement_id":{"value":2},"Warning_count":{"avg":"0","max":"0","median":"0","min":"0","pct":"0","pct_95":"0","stddev":"0","sum":"0"},"host":{"value":"127.0.0.1"}},"query_count":1,"tables":[{"create":"SHOW CREATE TABLE `d`.`t`\\G","status":"SHOW TABLE STATUS FROM `d` LIKE 't'\\G"}],"ts_max":"2009-12-08 09:23:49.637394","ts_min":"2009-12-08 09:23:49.637394"},{"attribute":"fingerprint","checksum":"53704700F9CECAAF3F79759E7FA2F117","distillate":"SELECT d.t","example":{"Query_time":"0.000281","as_select":"SELECT i FROM d.t WHERE i=\"3\"","query":"EXECUTE SELECT i FROM d.t WHERE i=\"3\"","ts":"2009-12-08 09:23:49.637892"},"fingerprint":"execute select i from d.t where i=?","histograms":{"Query_time":[0,0,1,0,0,0,0,0]},"metrics":{"No_good_index_used":{"yes":"0"},"No_index_used":{"yes":"1"},"Query_length":{"avg":"37","max":"37","median":"37","min":"37","pct":"0","pct_95":"37","stddev":"0","sum":"37"},"Query_time":{"avg":"0.000281","max":"0.000281","median":"0.000281","min":"0.000281","pct":"0.333333","pct_95":"0.000281","stddev":"0.000000","sum":"0.000281"},"Statement_id":{"value":2},"Warning_count":{"avg":"0","max":"0","median":"0","min":"0","pct":"0","pct_95":"0","stddev":"0","sum":"0"},"host":{"value":"127.0.0.1"}},"query_count":1,"tables":[{"create":"SHOW CREATE TABLE `d`.`t`\\G","status":"SHOW TABLE STATUS FROM `d` LIKE 't'\\G"}],"ts_max":"2009-12-08 09:23:49.637892","ts_min":"2009-12-08 09:23:49.637892"},{"attribute":"fingerprint","checksum":"EDBC971AEC392917AA353644DE4C4CB4","distillate":"ADMIN QUIT","example":{"Query_time":"0.000000","query":"administrator command: Quit","ts":"2009-12-08 09:23:49.638381"},"fingerprint":"administrator command: Quit","histograms":{"Query_time":[0,0,0,0,0,0,0,0]},"metrics":{"No_good_index_used":{"yes":"0"},"No_index_used":{"yes":"0"},"Query_length":{"avg":"27","max":"27","median":"27","min":"27","pct":"0","pct_95":"27","stddev":"0","sum":"27"},"Query_time":{"avg":"0.000000","max":"0.000000","median":"0.000000","min":"0.000000","pct":"0.333333","pct_95":"0.000000","stddev":"0.000000","sum":"0.000000"},"Warning_count":{"avg":"0","max":"0","median":"0","min":"0","pct":"0","pct_95":"0","stddev":"0","sum":"0"},"host":{"value":"127.0.0.1"}},"query_count":1,"ts_max":"2009-12-08 09:23:49.638381","ts_min":"2009-12-08 09:23:49.638381"}],"global":{"files":[{"name":"tcpdump021.txt","size":2827}],"metrics":{"No_good_index_used":{"cnt":"0"},"No_index_used":{"cnt":"1"},"Query_length":{"avg":"33","max":"37","median":"34","min":"27","pct_95":"36","stddev":"4","sum":"99"},"Query_time":{"avg":"0.000189","max":"0.000286","median":"0.000273","min":"0.000000","pct_95":"0.000273","stddev":"0.000129","sum":"0.000567"},"Rows_affected":{"avg":"0","max":"0","median":"0","min":"0","pct_95":"0","stddev":"0","sum":"0"},"Warning_count":{"avg":"0","max":"0","median":"0","min":"0","pct_95":"0","stddev":"0","sum":"0"}},"query_count":3,"unique_query_count":3}}
{"classes":[{"attribute":"fingerprint","checksum":"C29D79D8CB57E235AA8E9FA785927259","distillate":"SELECT d.t","example":{"Query_time":"0.000286","as_select":"SELECT i FROM d.t WHERE i=?","query":"PREPARE SELECT i FROM d.t WHERE i=?","ts":"2009-12-08 09:23:49.637394"},"fingerprint":"prepare select i from d.t where i=?","histograms":{"Query_time":[0,0,1,0,0,0,0,0]},"metrics":{"No_good_index_used":{"yes":"0"},"No_index_used":{"yes":"0"},"Query_length":{"avg":"35","max":"35","median":"35","min":"35","pct":"0","pct_95":"35","stddev":"0","sum":"35"},"Query_time":{"avg":"0.000286","max":"0.000286","median":"0.000286","min":"0.000286","pct":"0.333333","pct_95":"0.000286","stddev":"0.000000","sum":"0.000286"},"Statement_id":{"value":2},"Warning_count":{"avg":"0","max":"0","median":"0","min":"0","pct":"0","pct_95":"0","stddev":"0","sum":"0"},"host":{"value":"127.0.0.1"}},"query_count":1,"tables":[{"create":"SHOW CREATE TABLE `d`.`t`\\G","status":"SHOW TABLE STATUS FROM `d` LIKE 't'\\G"}],"ts_max":"2009-12-08 09:23:49.637394","ts_min":"2009-12-08 09:23:49.637394"},{"attribute":"fingerprint","checksum":"53704700F9CECAAF3F79759E7FA2F117","distillate":"SELECT d.t","example":{"Query_time":"0.000281","as_select":"SELECT i FROM d.t WHERE i=\"3\"","query":"EXECUTE SELECT i FROM d.t WHERE i=\"3\"","ts":"2009-12-08 09:23:49.637892"},"fingerprint":"execute select i from d.t where i=?","histograms":{"Query_time":[0,0,1,0,0,0,0,0]},"metrics":{"No_good_index_used":{"yes":"0"},"No_index_used":{"yes":"1"},"Query_length":{"avg":"37","max":"37","median":"37","min":"37","pct":"0","pct_95":"37","stddev":"0","sum":"37"},"Query_time":{"avg":"0.000281","max":"0.000281","median":"0.000281","min":"0.000281","pct":"0.333333","pct_95":"0.000281","stddev":"0.000000","sum":"0.000281"},"Statement_id":{"value":"2"},"Warning_count":{"avg":"0","max":"0","median":"0","min":"0","pct":"0","pct_95":"0","stddev":"0","sum":"0"},"host":{"value":"127.0.0.1"}},"query_count":1,"tables":[{"create":"SHOW CREATE TABLE `d`.`t`\\G","status":"SHOW TABLE STATUS FROM `d` LIKE 't'\\G"}],"ts_max":"2009-12-08 09:23:49.637892","ts_min":"2009-12-08 09:23:49.637892"},{"attribute":"fingerprint","checksum":"EDBC971AEC392917AA353644DE4C4CB4","distillate":"ADMIN QUIT","example":{"Query_time":"0.000000","query":"administrator command: Quit","ts":"2009-12-08 09:23:49.638381"},"fingerprint":"administrator command: Quit","histograms":{"Query_time":[0,0,0,0,0,0,0,0]},"metrics":{"No_good_index_used":{"yes":"0"},"No_index_used":{"yes":"0"},"Query_length":{"avg":"27","max":"27","median":"27","min":"27","pct":"0","pct_95":"27","stddev":"0","sum":"27"},"Query_time":{"avg":"0.000000","max":"0.000000","median":"0.000000","min":"0.000000","pct":"0.333333","pct_95":"0.000000","stddev":"0.000000","sum":"0.000000"},"Warning_count":{"avg":"0","max":"0","median":"0","min":"0","pct":"0","pct_95":"0","stddev":"0","sum":"0"},"host":{"value":"127.0.0.1"}},"query_count":1,"ts_max":"2009-12-08 09:23:49.638381","ts_min":"2009-12-08 09:23:49.638381"}],"global":{"files":[{"name":"tcpdump021.txt","size":2827}],"metrics":{"No_good_index_used":{"cnt":"0"},"No_index_used":{"cnt":"1"},"Query_length":{"avg":"33","max":"37","median":"34","min":"27","pct_95":"36","stddev":"4","sum":"99"},"Query_time":{"avg":"0.000189","max":"0.000286","median":"0.000273","min":"0.000000","pct_95":"0.000273","stddev":"0.000129","sum":"0.000567"},"Rows_affected":{"avg":"0","max":"0","median":"0","min":"0","pct_95":"0","stddev":"0","sum":"0"},"Warning_count":{"avg":"0","max":"0","median":"0","min":"0","pct_95":"0","stddev":"0","sum":"0"}},"query_count":3,"unique_query_count":3}}

View File

@@ -82,17 +82,6 @@ unlike(
"Truncating tables while checksum is running"
);
$sb->load_file('master', 't/pt-table-checksum/samples/pt-1728.sql');
@args = ($master_dsn, qw(--no-check-binlog-format));
my $new_rows_count = $num_rows * 5;
diag(`util/mysql_random_data_load --host=127.0.0.1 --port=12345 --user=msandbox --password=msandbox test $table $new_rows_count`);
$output = output(
sub { pt_table_checksum::main(@args) },
stderr => 1,
);
diag($output);
$thr->join();
# #############################################################################

View File

@@ -183,9 +183,14 @@ update_version() {
}
update_copyright_year() {
echo -n "Updating copyright year in tools... "
echo -n "Updating copyright year in tools to $YEAR ... "
cd $BRANCH/bin
for tool_file in *; do
# Skip checking binary files (golang binaries)
file bin/pt-pg-summary | grep -q "ELF"
if [ $? -eq 0 ]; then
continue
fi
local copyright="$(grep "[0-9] Percona LLC and/or its affiliates" $tool_file)"
local new_copyright="$(../util/new-copyright-year "$YEAR" "$copyright")"
if [ $? -ne 0 ]; then

Binary file not shown.