Compare commits

..

7 Commits

Author SHA1 Message Date
Carlos
df9dacc2ed PMM-4192 Removed unused deps 2020-07-06 09:03:41 -03:00
Carlos
c780bea836 PMM-4192 Changes for CR 2020-07-05 22:47:53 -03:00
Carlos
775070efe4 PMM-4192 Changes for CR 2020-06-30 09:25:52 -03:00
Carlos
30f3e5b5a9 PMM-4192 Code clean-up 2020-06-29 21:06:38 -03:00
Carlos
1e32dacac6 PMM-4192 Updated MongoDB explain tests
Updated test to use bson.D instead of BsonD
2020-06-29 20:59:27 -03:00
Carlos
1f6cea8f9c PMM-4192 Updated MongoDB fingerprint
SystemProfile has been changed to use the new bson.D from the official
MongoDB driver instead of the old BsonD. Updated the fingerprinter
module and all tests
2020-06-24 23:09:59 -03:00
Carlos Salguero
0e4a19d356 WIP 2020-06-20 19:27:07 -03:00
65 changed files with 271 additions and 736 deletions

1
.gitignore vendored
View File

@@ -24,4 +24,3 @@ src/go/.env
config/deb/control.bak
config/rpm/percona-toolkit.spec.bak
config/sphinx-build/percona-theme/*
coverage.out

View File

@@ -1,8 +1,7 @@
language: go
go:
- 1.14.x
- tip
- 1.13.x
services:
- docker
@@ -37,16 +36,13 @@ env:
# REVIEWDOG_GITHUB_API_TOKEN
- secure: "px8XYeNEAFTSTb1hYZuEOxqOXUxvp3EoU+KCtPck/KNozkoS95eBd9klgr3Os4wPKloLdMhrr0VE98lukogUxA/NmnYnos01kegjWgwwM6fkob8JxaN5KK4oUFF1wmirBlrjGlw8vUErPwINmrK4BywKpDbw6Yip6FzxdlWESHI="
matrix:
include:
matrix:
- MONGODB_IMAGE=mongo:3.0
- MONGODB_IMAGE=mongo:3.2
- MONGODB_IMAGE=mongo:3.4
- MONGODB_IMAGE=percona/percona-server-mongodb:3.0
- MONGODB_IMAGE=percona/percona-server-mongodb:3.2
- MONGODB_IMAGE=percona/percona-server-mongodb:3.4
allow_failures:
- go: tip
# skip non-trunk PMM-XXXX branch builds, but still build pull requests
branches:

View File

@@ -1,15 +1,5 @@
Changelog for Percona Toolkit
* Fixed bug PT-1859: pt-pg-summary fails for Postgres12 (Thanks Sergey Kuzmichev)
* Improvement PT-1853: Added --no-check-foreing-keys to pt-osc
* Improvement PT-1851: Backslashes missing from documentation
* Improvement PT-1836: Review and consider lintian reported issues
* Fixed bug PT-1829: pt-heartbeat doesn't reconnect for check-read-only
* Fixed bug PT-1822: pt-mongodb-summary fails on standalone mongodb instances
* Fixed bug PT-1518: pt-table-checksum gives error CRC32. (Thanks @ovidiustanila)
v3.2.0 release 2020-04-23
* Fixed bug PT-1824: Name of a constraint can exceed 64 chars (Thanks Iwo Panowicz)
* Fixed bug PT-1793: Protocol parser cannot handle year 2020 (Thanks Kei Tsuchiya)
* Fixed bug PT-1782: pt-online-schema-change: FK keys warning, but there are no foreign keys

9
Gopkg.lock generated
View File

@@ -9,6 +9,14 @@
revision = "c7af12943936e8c39859482e61f0574c2fd7fc75"
version = "v1.4.2"
[[projects]]
digest = "1:c39fbf3b3e138accc03357c72417c0153c54cc1ae8c9f40e8f120a550d876a76"
name = "github.com/Percona-Lab/pt-pg-summary"
packages = ["models"]
pruneopts = ""
revision = "f06beea959eb00acfe44ce39342c27582ad84caa"
version = "v0.1.9"
[[projects]]
digest = "1:f82b8ac36058904227087141017bb82f4b0fc58272990a4cdae3e2d6d222644e"
name = "github.com/StackExchange/wmi"
@@ -356,6 +364,7 @@
analyzer-version = 1
input-imports = [
"github.com/Masterminds/semver",
"github.com/Percona-Lab/pt-pg-summary/models",
"github.com/alecthomas/kingpin",
"github.com/go-ini/ini",
"github.com/golang/mock/gomock",

View File

@@ -2,7 +2,7 @@ use ExtUtils::MakeMaker;
WriteMakefile(
NAME => 'percona-toolkit',
VERSION => '3.2.1',
VERSION => '3.2.0',
EXE_FILES => [ <bin/*> ],
MAN1PODS => {
'docs/percona-toolkit.pod' => 'blib/man1/percona-toolkit.1p',

View File

@@ -1359,6 +1359,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-align 3.2.1
pt-align 3.2.0
=cut

View File

@@ -45,7 +45,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -8654,6 +8654,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-archiver 3.2.1
pt-archiver 3.2.0
=cut

View File

@@ -43,7 +43,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -5912,6 +5912,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-config-diff 3.2.1
pt-config-diff 3.2.0
=cut

View File

@@ -42,7 +42,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -5702,6 +5702,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-deadlock-logger 3.2.1
pt-deadlock-logger 3.2.0
=cut

View File

@@ -38,7 +38,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -5677,6 +5677,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-diskstats 3.2.1
pt-diskstats 3.2.0
=cut

View File

@@ -39,7 +39,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -5765,6 +5765,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-duplicate-key-checker 3.2.1
pt-duplicate-key-checker 3.2.0
=cut

View File

@@ -1648,6 +1648,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-fifo-split 3.2.1
pt-fifo-split 3.2.0
=cut

View File

@@ -35,7 +35,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -5126,6 +5126,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-find 3.2.1
pt-find 3.2.0
=cut

View File

@@ -2239,6 +2239,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-fingerprint 3.2.1
pt-fingerprint 3.2.0
=cut

View File

@@ -37,7 +37,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -4688,6 +4688,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-fk-error-logger 3.2.1
pt-fk-error-logger 3.2.0
=cut

View File

@@ -44,7 +44,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -7384,6 +7384,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-heartbeat 3.2.1
pt-heartbeat 3.2.0
=cut

View File

@@ -45,7 +45,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -7695,6 +7695,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-index-usage 3.2.1
pt-index-usage 3.2.0
=cut

View File

@@ -1127,7 +1127,7 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-ioprofile 3.2.1
pt-ioprofile 3.2.0
=cut

View File

@@ -47,7 +47,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -8554,6 +8554,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-kill 3.2.1
pt-kill 3.2.0
=cut

View File

@@ -804,7 +804,7 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-mext 3.2.1
pt-mext 3.2.0
=cut

View File

@@ -3289,7 +3289,7 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-mysql-summary 3.2.1
pt-mysql-summary 3.2.0
=cut

View File

@@ -56,7 +56,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -4255,7 +4255,7 @@ sub get_slaves {
else {
die "Unexpected recursion methods: @$methods";
}
return $slaves;
}
@@ -5015,32 +5015,10 @@ sub wait {
my $worst; # most lagging slave
my $pr_callback;
my $pr_first_report;
### refresh list of slaves. In: self passed to wait()
### Returns: new slave list
my $pr_refresh_slave_list = sub {
my ($self) = @_;
my ($slaves, $refresher) = ($self->{slaves}, $self->{get_slaves_cb});
return $slaves if ( not defined $refresher );
my $before = join ' ', sort map {$_->name()} @$slaves;
$slaves = $refresher->();
my $after = join ' ', sort map {$_->name()} @$slaves;
if ($before ne $after) {
$self->{slaves} = $slaves;
printf STDERR "Slave set to watch has changed\n Was: %s\n Now: %s\n",
$before, $after;
}
return($self->{slaves});
};
$slaves = $pr_refresh_slave_list->($self);
if ( $pr ) {
# If you use the default Progress report callback, you'll need to
# to add Transformers.pm to this tool.
$pr_callback = sub {
my ($fraction, $elapsed, $remaining, $eta, $completed) = @_;
my $dsn_name = $worst->{cxn}->name();
my $dsn_name = $worst->{cxn}->{dsn_name};
if ( defined $worst->{lag} ) {
print STDERR "Replica lag is " . ($worst->{lag} || '?')
. " seconds on $dsn_name. Waiting.\n";
@@ -5055,34 +5033,21 @@ sub wait {
};
$pr->set_callback($pr_callback);
# If a replic is stopped, don't wait 30s (or whatever interval)
# to report this. Instead, report it once, immediately, then
# keep reporting it every interval.
$pr_first_report = sub {
my $dsn_name = $worst->{cxn}->name();
my $dsn_name = $worst->{cxn}->{dsn_name};
if ( !defined $worst->{lag} ) {
if ($self->{fail_on_stopped_replication}) {
die 'replication is stopped';
}
print STDERR "(2) Replica '$dsn_name' is stopped. Waiting.\n";
print STDERR "(2) Replica $dsn_name is stopped. Waiting.\n";
}
return;
};
}
# First check all slaves.
my @lagged_slaves = map { {cxn=>$_, lag=>undef} } @$slaves;
my @lagged_slaves = map { {cxn=>$_, lag=>undef} } @$slaves;
while ( $oktorun->() && @lagged_slaves ) {
PTDEBUG && _d('Checking slave lag');
### while we were waiting our list of slaves may have changed
$slaves = $pr_refresh_slave_list->($self);
my $watched = 0;
@lagged_slaves = grep {
my $slave_name = $_->{cxn}->name();
grep {$slave_name eq $_->name()} @{$slaves // []}
} @lagged_slaves;
for my $i ( 0..$#lagged_slaves ) {
my $lag;
eval {
@@ -5101,10 +5066,8 @@ sub wait {
}
}
# Remove slaves that aren't lagging.
@lagged_slaves = grep { defined $_ } @lagged_slaves;
if ( @lagged_slaves ) {
# Sort lag, undef is highest because it means the slave is stopped.
@lagged_slaves = reverse sort {
defined $a->{lag} && defined $b->{lag} ? $a->{lag} <=> $b->{lag}
: defined $a->{lag} ? -1
@@ -5115,10 +5078,6 @@ sub wait {
$worst->{lag}, 'on', Dumper($worst->{cxn}->dsn()));
if ( $pr ) {
# There's no real progress because we can't estimate how long
# it will take all slaves to catch up. The progress reports
# are just to inform the user every 30s which slave is still
# lagging this most.
$pr->update(
sub { return 0; },
first_report => $pr_first_report,
@@ -8635,12 +8594,6 @@ sub main {
# ########################################################################
my $set_on_connect = sub {
my ($dbh) = @_;
if (!$o->get('check-foreign-keys')) {
my $sql = "SET foreign_key_checks=0";
PTDEBUG && _d($sql);
print $sql, "\n" if $o->get('print');
$dbh->do($sql);
}
return;
};
@@ -8800,42 +8753,13 @@ sub main {
channel => $o->get('channel'),
);
my $slaves_to_skip = $o->get('skip-check-slave-lag');
my $get_slaves_cb = sub {
my ($intolerant) = @_;
my $slaves =$ms->get_slaves(
dbh => $cxn->dbh(),
dsn => $cxn->dsn(),
make_cxn => sub {
return $make_cxn->(
@_,
prev_dsn => $cxn->dsn(),
errok => (not $intolerant)
);
},
);
if ($slaves_to_skip) {
my $filtered_slaves = [];
for my $slave (@$slaves) {
for my $slave_to_skip (@$slaves_to_skip) {
if ($slave->{dsn}->{h} eq $slave_to_skip->{h} && $slave->{dsn}->{P} eq $slave_to_skip->{P}) {
print "Skipping slave " . $slave->description() . "\n";
} else {
push @$filtered_slaves, $slave;
}
}
}
$slaves = $filtered_slaves;
}
return $slaves;
};
### first ever call only: do not tolerate connection errors
$slaves = $get_slaves_cb->('intolerant');
$slaves = $ms->get_slaves(
dbh => $cxn->dbh(),
dsn => $cxn->dsn(),
make_cxn => sub {
return $make_cxn->(@_, prev_dsn => $cxn->dsn());
},
);
PTDEBUG && _d(scalar @$slaves, 'slaves found');
if ( scalar @$slaves ) {
print "Found " . scalar(@$slaves) . " slaves:\n";
@@ -8859,7 +8783,6 @@ sub main {
#prev_dsn => $cxn->dsn(),
);
$slave_lag_cxns = [ $cxn ];
$get_slaves_cb = undef;
}
else {
PTDEBUG && _d('Will check slave lag on all slaves');
@@ -8867,9 +8790,31 @@ sub main {
}
if ( $slave_lag_cxns && scalar @$slave_lag_cxns ) {
print "Will check slave lag on:\n";
foreach my $cxn ( @$slave_lag_cxns ) {
print $cxn->description()."\n";
if ($o->get('skip-check-slave-lag')) {
my $slaves_to_skip = $o->get('skip-check-slave-lag');
my $filtered_slaves = [];
for my $slave (@$slave_lag_cxns) {
my $found=0;
for my $slave_to_skip (@$slaves_to_skip) {
if ($slave->{dsn}->{h} eq $slave_to_skip->{h} && $slave->{dsn}->{P} eq $slave_to_skip->{P}) {
$found=1;
}
}
if ($found) {
print "Skipping slave ". $slave->description()."\n";
} else {
push @$filtered_slaves, $slave;
}
}
$slave_lag_cxns = $filtered_slaves;
}
if (!scalar @$slave_lag_cxns) {
print "Not checking slave lag because all slaves were skipped\n";
} else{
print "Will check slave lag on:\n";
foreach my $cxn ( @$slave_lag_cxns ) {
print $cxn->description()."\n";
}
}
}
else {
@@ -8980,12 +8925,11 @@ sub main {
}
$replica_lag = new ReplicaLagWaiter(
slaves => $slave_lag_cxns,
get_slaves_cb => $get_slaves_cb,
max_lag => $o->get('max-lag'),
oktorun => sub { return $oktorun },
get_lag => $get_lag,
sleep => $sleep,
slaves => $slave_lag_cxns,
max_lag => $o->get('max-lag'),
oktorun => sub { return $oktorun },
get_lag => $get_lag,
sleep => $sleep,
);
my $get_status;
@@ -9158,15 +9102,6 @@ sub main {
$child_table->{name},
$child_table->{row_est} || '?';
}
# TODO: Fix self referencing foreign keys handling.
# See: https://jira.percona.com/browse/PT-1802
# https://jira.percona.com/browse/PT-1853
if (_has_self_ref_fks($orig_tbl->{db}, $orig_tbl->{tbl}, $child_tables) && $o->get('check-foreign-keys')) {
print "The table has self-referencing foreign keys and that might lead to errors.\n";
print "Use --no-check-foreign-keys to disable this check.\n";
return 1;
}
if ( $alter_fk_method ) {
# Let the user know how we're going to update the child table
@@ -10461,20 +10396,6 @@ sub check_alter {
return;
}
sub _has_self_ref_fks {
my ($orig_db, $orig_table, $child_tables) = @_;
my $db_tbl = sprintf('`%s`.`%s`', $orig_db, $orig_table);
foreach my $child_table ( @$child_tables ) {
if ("$db_tbl" eq "$child_table->{name}") {
return 1;
}
}
return 0;
}
# This function tries to detect if the --alter param is adding unique indexes.
# It returns an array of arrays, having a list of fields for each unique index
# found.
@@ -12247,15 +12168,6 @@ L<"--print"> and verify that the triggers are correct.
=back
=item --[no]check-foreign-keys
default: yes
Check for self-referencing foreign keys. Currently self referencing FKs are
not full supported, so, to prevent errors, this program won't run if the table
has self-referencing foreign keys. Use this parameter to disable self-referencing
FK checks.
=item --check-interval
type: time; default: 1
@@ -13379,6 +13291,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-online-schema-change 3.2.1
pt-online-schema-change 3.2.0
=cut

View File

@@ -896,7 +896,7 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-pmp 3.2.1
pt-pmp 3.2.0
=cut

View File

@@ -64,7 +64,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -16957,6 +16957,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-query-digest 3.2.1
pt-query-digest 3.2.0
=cut

View File

@@ -2613,6 +2613,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-show-grants 3.2.1
pt-show-grants 3.2.0
=cut

View File

@@ -1245,7 +1245,7 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-sift 3.2.1
pt-sift 3.2.0
=cut

View File

@@ -40,7 +40,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -4988,6 +4988,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-slave-delay 3.2.1
pt-slave-delay 3.2.0
=cut

View File

@@ -4523,6 +4523,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-slave-find 3.2.1
pt-slave-find 3.2.0
=cut

View File

@@ -41,7 +41,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -6159,6 +6159,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-slave-restart 3.2.1
pt-slave-restart 3.2.0
=cut

View File

@@ -2419,7 +2419,7 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-stalk 3.2.1
pt-stalk 3.2.0
=cut

View File

@@ -2723,7 +2723,7 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-summary 3.2.1
pt-summary 3.2.0
=cut

View File

@@ -58,7 +58,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -6190,10 +6190,7 @@ sub _get_crc_type {
$type = $sth->{mysql_type_name}->[0];
$length = $sth->{mysql_length}->[0];
PTDEBUG && _d($sql, $type, $length);
if ( $type eq 'integer' && $length < 11 ) {
$type = 'int';
}
elsif ( $type eq 'bigint' && $length < 20 ) {
if ( $type eq 'bigint' && $length < 20 ) {
$type = 'int';
}
};
@@ -14182,6 +14179,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-table-checksum 3.2.1
pt-table-checksum 3.2.0
=cut

View File

@@ -55,7 +55,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -4747,10 +4747,7 @@ sub get_crc_type {
$type = $sth->{mysql_type_name}->[0];
$length = $sth->{mysql_length}->[0];
PTDEBUG && _d($sql, $type, $length);
if ( $type eq 'integer' && $length < 11 ) {
$type = 'int';
}
elsif ( $type eq 'bigint' && $length < 20 ) {
if ( $type eq 'bigint' && $length < 20 ) {
$type = 'int';
}
};
@@ -13083,6 +13080,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-table-sync 3.2.1
pt-table-sync 3.2.0
=cut

View File

@@ -8509,6 +8509,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-table-usage 3.2.1
pt-table-usage 3.2.0
=cut

View File

@@ -61,7 +61,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -11444,6 +11444,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-upgrade 3.2.1
pt-upgrade 3.2.0
=cut

View File

@@ -44,7 +44,7 @@ BEGIN {
{
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';
@@ -6257,6 +6257,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-variable-advisor 3.2.1
pt-variable-advisor 3.2.0
=cut

View File

@@ -3303,6 +3303,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
pt-visual-explain 3.2.1
pt-visual-explain 3.2.0
=cut

View File

@@ -50,7 +50,7 @@ copyright = u'2020, Percona LLC and/or its affiliates'
# The short X.Y version.
version = '3.2'
# The full version, including alpha/beta/rc tags.
release = '3.2.1'
release = '3.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

View File

@@ -567,6 +567,6 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
=head1 VERSION
Percona Toolkit v3.2.1 released 2020-08-12
Percona Toolkit v3.2.0 released 2020-04-23
=cut

View File

@@ -1,8 +1,8 @@
.. _pt-mongodb-query-digest:
==================================
:program:`pt-mongodb-query-digest`
==================================
=======================
pt-mongodb-query-digest
=======================
``pt-mongodb-query-digest`` reports query usage statistics
by aggregating queries from MongoDB query profiler.
@@ -89,11 +89,11 @@ Output Example
# Time range: 2017-01-11 12:58:26.519 -0300 ART to 2017-01-11 12:58:26.686 -0300 ART
# Attribute pct total min max avg 95% stddev median
# ================== === ======== ======== ======== ======== ======== ======= ========
# Count (docs) 36
# Exec Time ms 0 0 0 0 0 0 0 0
# Docs Scanned 0 148.00 0.00 74.00 4.11 74.00 16.95 0.00
# Docs Returned 2 148.00 0.00 74.00 4.11 74.00 16.95 0.00
# Bytes recv 0 2.11M 215.00 1.05M 58.48K 1.05M 240.22K 215.00
# Count (docs) 36
# Exec Time ms 0 0 0 0 0 0 0 0
# Docs Scanned 0 148.00 0.00 74.00 4.11 74.00 16.95 0.00
# Docs Returned 2 148.00 0.00 74.00 4.11 74.00 16.95 0.00
# Bytes recv 0 2.11M 215.00 1.05M 58.48K 1.05M 240.22K 215.00
# String:
# Namespaces samples.col1
# Fingerprint $gte,$lt,$meta,$sortKey,filter,find,projection,shardVersion,sort,user_id,user_id

View File

@@ -1,8 +1,8 @@
.. pt-mongodb-summary:
=============================
:program:`pt-mongodb-summary`
=============================
==================
pt-mongodb-summary
==================
``pt-mongodb-summary`` collects information about a MongoDB cluster.
It collects information from several sources
@@ -58,14 +58,14 @@ Output Example
.. code-block:: none
# Instances ####################################################################################
ID Host Type ReplSet
0 localhost:17001 PRIMARY r1
1 localhost:17002 SECONDARY r1
2 localhost:17003 SECONDARY r1
0 localhost:18001 PRIMARY r2
1 localhost:18002 SECONDARY r2
ID Host Type ReplSet
0 localhost:17001 PRIMARY r1
1 localhost:17002 SECONDARY r1
2 localhost:17003 SECONDARY r1
0 localhost:18001 PRIMARY r2
1 localhost:18002 SECONDARY r2
2 localhost:18003 SECONDARY r2
# This host
# Mongo Executable #############################################################################
Path to executable | /home/karl/tmp/MongoDB32Labs/3.0/bin/mongos
@@ -79,9 +79,9 @@ Output Example
Started | 2016-10-30 00:18:49 -0300 ART
Datadir | /data/db
Process Type | mongos
# Running Ops ##################################################################################
Type Min Max Avg
Insert 0 0 0/5s
Query 0 0 0/5s
@@ -89,21 +89,21 @@ Output Example
Delete 0 0 0/5s
GetMore 0 0 0/5s
Command 0 22 16/5s
# Security #####################################################################################
Users 0
Roles 0
Auth disabled
SSL disabled
# Oplog ########################################################################################
Oplog Size 18660 Mb
Oplog Used 55 Mb
Oplog Length 0.91 hours
Last Election 2016-10-30 00:18:44 -0300 ART
# Cluster wide #################################################################################
Databases: 3
Collections: 17

View File

@@ -1,7 +1,5 @@
========================
:program:`pt-pg-summary`
========================
pt-pg-summary
=============
**pt-pg-summary** collects information about a PostgreSQL cluster.
Usage

View File

@@ -1,3 +1,5 @@
.. program:: pt-secure-collect
============================
:program:`pt-secure-collect`
============================
@@ -63,7 +65,7 @@ COMMANDS
Include this dir into the sanitized tar file.
.. option:: --config-file
Path to the config file. Default: ``~/.my.cnf``
.. option:: --mysql-host
@@ -131,7 +133,7 @@ COMMANDS
.. option:: --outfile
Write the output to this file. If ommited, the output file
Write the output to this file. If ommited, the output file
name will be the same as the input file, adding the ``.aes`` extension.
* **Encrypt command**
@@ -144,7 +146,7 @@ COMMANDS
.. option:: --outfile
Write the output to this file. If ommited, the output file
Write the output to this file. If ommited, the output file
name will be the same as the input file, without the ``.aes`` extension.
* **Sanitize command**

View File

@@ -29,22 +29,22 @@ use constant PTDEBUG => $ENV{PTDEBUG} || 0;
# Sub: check_recursion_method
# Check that the arrayref of recursion methods passed in is valid
sub check_recursion_method {
sub check_recursion_method {
my ($methods) = @_;
if ( @$methods != 1 ) {
if ( grep({ !m/processlist|hosts/i } @$methods)
&& $methods->[0] !~ /^dsn=/i )
{
die "Invalid combination of recursion methods: "
. join(", ", map { defined($_) ? $_ : 'undef' } @$methods) . ". "
. "Only hosts and processlist may be combined.\n"
}
}
else {
if ( @$methods != 1 ) {
if ( grep({ !m/processlist|hosts/i } @$methods)
&& $methods->[0] !~ /^dsn=/i )
{
die "Invalid combination of recursion methods: "
. join(", ", map { defined($_) ? $_ : 'undef' } @$methods) . ". "
. "Only hosts and processlist may be combined.\n"
}
}
else {
my ($method) = @$methods;
die "Invalid recursion method: " . ( $method || 'undef' )
unless $method && $method =~ m/^(?:processlist$|hosts$|none$|cluster$|dsn=)/i;
}
die "Invalid recursion method: " . ( $method || 'undef' )
unless $method && $method =~ m/^(?:processlist$|hosts$|none$|cluster$|dsn=)/i;
}
}
sub new {
@@ -73,7 +73,7 @@ sub get_slaves {
my $methods = $self->_resolve_recursion_methods($args{dsn});
return $slaves unless @$methods;
if ( grep { m/processlist|hosts/i } @$methods ) {
my @required_args = qw(dbh dsn);
foreach my $arg ( @required_args ) {
@@ -86,7 +86,7 @@ sub get_slaves {
{ dbh => $dbh,
dsn => $dsn,
slave_user => $o->got('slave-user') ? $o->get('slave-user') : '',
slave_password => $o->got('slave-password') ? $o->get('slave-password') : '',
slave_password => $o->got('slave-password') ? $o->get('slave-password') : '',
callback => sub {
my ( $dsn, $dbh, $level, $parent ) = @_;
return unless $level;
@@ -118,7 +118,7 @@ sub get_slaves {
else {
die "Unexpected recursion methods: @$methods";
}
return $slaves;
}
@@ -798,7 +798,7 @@ sub short_host {
# Returns:
# True if the proclist item is the given type of replication thread.
sub is_replication_thread {
my ( $self, $query, %args ) = @_;
my ( $self, $query, %args ) = @_;
return unless $query;
my $type = lc($args{type} || 'all');
@@ -814,7 +814,7 @@ sub is_replication_thread {
# On a slave, there are two threads. Both have user="system user".
if ( ($query->{User} || $query->{user} || '') eq "system user" ) {
PTDEBUG && _d("Slave replication thread");
if ( $type ne 'all' ) {
if ( $type ne 'all' ) {
# Match a particular slave thread.
my $state = $query->{State} || $query->{state} || '';
@@ -831,7 +831,7 @@ sub is_replication_thread {
|Reading\sevent\sfrom\sthe\srelay\slog
|Has\sread\sall\srelay\slog;\swaiting
|Making\stemp\sfile
|Waiting\sfor\sslave\smutex\son\sexit)/xi;
|Waiting\sfor\sslave\smutex\son\sexit)/xi;
# Type is either "slave_sql" or "slave_io". The second line
# implies that if this isn't the sql thread then it must be
@@ -919,7 +919,7 @@ sub get_replication_filters {
replicate_do_db
replicate_ignore_db
replicate_do_table
replicate_ignore_table
replicate_ignore_table
replicate_wild_do_table
replicate_wild_ignore_table
);
@@ -931,7 +931,7 @@ sub get_replication_filters {
$filters{slave_skip_errors} = $row->[1] if $row->[1] && $row->[1] ne 'OFF';
}
return \%filters;
return \%filters;
}

View File

@@ -18,7 +18,7 @@
# ###########################################################################
package Percona::Toolkit;
our $VERSION = '3.2.1';
our $VERSION = '3.2.0';
use strict;
use warnings FATAL => 'all';

View File

@@ -40,7 +40,7 @@ use Data::Dumper;
# slaves - Arrayref of <Cxn> objects
#
# Returns:
# ReplicaLagWaiter object
# ReplicaLagWaiter object
sub new {
my ( $class, %args ) = @_;
my @required_args = qw(oktorun get_lag sleep max_lag slaves);
@@ -80,26 +80,6 @@ sub wait {
my $worst; # most lagging slave
my $pr_callback;
my $pr_first_report;
### refresh list of slaves. In: self passed to wait()
### Returns: new slave list
my $pr_refresh_slave_list = sub {
my ($self) = @_;
my ($slaves, $refresher) = ($self->{slaves}, $self->{get_slaves_cb});
return $slaves if ( not defined $refresher );
my $before = join ' ', sort map {$_->name()} @$slaves;
$slaves = $refresher->();
my $after = join ' ', sort map {$_->name()} @$slaves;
if ($before ne $after) {
$self->{slaves} = $slaves;
printf STDERR "Slave set to watch has changed\n Was: %s\n Now: %s\n",
$before, $after;
}
return($self->{slaves});
};
$slaves = $pr_refresh_slave_list->($self);
if ( $pr ) {
# If you use the default Progress report callback, you'll need to
# to add Transformers.pm to this tool.
@@ -136,26 +116,11 @@ sub wait {
}
# First check all slaves.
my @lagged_slaves = map { {cxn=>$_, lag=>undef} } @$slaves;
my @lagged_slaves = map { {cxn=>$_, lag=>undef} } @$slaves;
while ( $oktorun->() && @lagged_slaves ) {
PTDEBUG && _d('Checking slave lag');
### while we were waiting our list of slaves may have changed
$slaves = $pr_refresh_slave_list->($self);
my $watched = 0;
@lagged_slaves = grep {
my $slave_name = $_->{cxn}->name();
grep {$slave_name eq $_->name()} @{$slaves // []}
} @lagged_slaves;
for my $i ( 0..$#lagged_slaves ) {
my $lag;
eval {
$lag = $get_lag->($lagged_slaves[$i]->{cxn});
};
if ($EVAL_ERROR) {
die $EVAL_ERROR;
}
my $lag = $get_lag->($lagged_slaves[$i]->{cxn});
PTDEBUG && _d($lagged_slaves[$i]->{cxn}->name(),
'slave lag:', $lag);
if ( !defined $lag || $lag > $max_lag ) {

View File

@@ -338,10 +338,7 @@ sub _get_crc_type {
$type = $sth->{mysql_type_name}->[0];
$length = $sth->{mysql_length}->[0];
PTDEBUG && _d($sql, $type, $length);
if ( $type eq 'integer' && $length < 11 ) {
$type = 'int';
}
elsif ( $type eq 'bigint' && $length < 20 ) {
if ( $type eq 'bigint' && $length < 20 ) {
$type = 'int';
}
};

View File

@@ -88,10 +88,7 @@ sub get_crc_type {
$type = $sth->{mysql_type_name}->[0];
$length = $sth->{mysql_length}->[0];
PTDEBUG && _d($sql, $type, $length);
if ( $type eq 'integer' && $length < 11 ) {
$type = 'int';
}
elsif ( $type eq 'bigint' && $length < 20 ) {
if ( $type eq 'bigint' && $length < 20 ) {
$type = 'int';
}
};

View File

@@ -5,7 +5,7 @@ import (
"regexp"
"time"
"github.com/percona/percona-toolkit/src/go/pt-pg-summary/models"
"github.com/Percona-Lab/pt-pg-summary/models"
"github.com/hashicorp/go-version"
"github.com/pkg/errors"
"github.com/shirou/gopsutil/process"
@@ -94,11 +94,11 @@ func new(db models.XODB, databases []string, sleep int, logger *logrus.Logger) (
serverVersion, err := models.GetServerVersion(db)
if err != nil {
return nil, errors.Wrap(err, "Cannot get server version")
return nil, errors.Wrap(err, "Cannot get the connected clients list")
}
if info.ServerVersion, err = parseServerVersion(serverVersion.Version); err != nil {
return nil, fmt.Errorf("Cannot parse server version: %s", err.Error())
return nil, fmt.Errorf("cannot get server version: %s", err.Error())
}
info.logger.Infof("Detected PostgreSQL version: %v", info.ServerVersion)
@@ -198,7 +198,7 @@ func (i *PGInfo) CollectGlobalInfo(db models.XODB) []error {
}
}
if i.ServerVersion.GreaterThanOrEqual(version10) {
if !i.ServerVersion.LessThan(version10) {
i.logger.Info("Collecting Slave Hosts (PostgreSQL 10+)")
if i.SlaveHosts10, err = models.GetSlaveHosts10s(db); err != nil {
errs = append(errs, errors.Wrap(err, "Cannot get slave hosts in Postgre 10+"))

View File

@@ -178,6 +178,7 @@ func (self ExampleQuery) ExplainCmd() bson.D {
} else {
cmd = append(cmd[:i], cmd[i+1:]...)
}
break
}
}
}
@@ -249,8 +250,6 @@ func (self ExampleQuery) ExplainCmd() bson.D {
}
}
case "command":
cmd = sanitizeCommand(cmd)
if len(cmd) == 0 || cmd[0].Key != "group" {
break
}
@@ -285,28 +284,3 @@ func (self ExampleQuery) ExplainCmd() bson.D {
},
}
}
func sanitizeCommand(cmd bson.D) bson.D {
if len(cmd) < 1 {
return cmd
}
key := cmd[0].Key
if key != "count" && key != "distinct" {
return cmd
}
for i := range cmd {
// drop $db param as it is not supported in MongoDB 3.0
if cmd[i].Key == "$db" {
if len(cmd)-1 == i {
cmd = cmd[:i]
} else {
cmd = append(cmd[:i], cmd[i+1:]...)
}
break
}
}
return cmd
}

View File

@@ -1,44 +0,0 @@
package proto_test
import (
"testing"
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
"github.com/stretchr/testify/assert"
"go.mongodb.org/mongo-driver/bson"
)
func TestExplainCmd(t *testing.T) {
tests := []struct {
inDoc []byte
want []byte
}{
{
inDoc: []byte(`{"ns":"sbtest.orders","op":"command","command":{"aggregate":"orders",` +
`"pipeline":[{"$match":{"status":"A"}},{"$group":{"_id":"$cust_id","total":{"$sum":"$amount"}}},` +
`{"$sort":{"total":-1}}],"cursor":{},"$db":"sbtest"}}`),
want: []byte(`{"explain":{"aggregate":"orders","pipeline":[{"$match":{"status":"A"}},` +
`{"$group":{"_id":"$cust_id","total":{"$sum":"$amount"}}},` +
`{"$sort":{"total":-1}}],"cursor":{},"$db":"sbtest"}}`),
},
{
inDoc: []byte(`{"ns":"sbtest.people","op":"command","command":` +
`{"count":"people","query":{},"fields":{},"$db":"sbtest"}}`),
want: []byte(`{"explain":{"count":"people","query":{},"fields":{}}}`),
},
}
for _, tc := range tests {
var want bson.D
err := bson.UnmarshalExtJSON(tc.want, false, &want)
assert.NoError(t, err)
var doc proto.SystemProfile
err = bson.UnmarshalExtJSON(tc.inDoc, false, &doc)
assert.NoError(t, err)
eq := proto.NewExampleQuery(doc)
assert.Equal(t, want, eq.ExplainCmd())
}
}

View File

@@ -50,9 +50,9 @@ var (
IPv6PG12Port = getVar("PG_IPV6_12_PORT", ipv6PG12Port)
PG9DockerIP = getContainerIP(pg9Container)
PG10DockerIP = getContainerIP(pg10Container)
PG11DockerIP = getContainerIP(pg11Container)
PG12DockerIP = getContainerIP(pg12Container)
PG10DockerIP = getContainerIP(pg9Container)
PG11DockerIP = getContainerIP(pg9Container)
PG12DockerIP = getContainerIP(pg9Container)
DefaultPGPort = "5432"
)

View File

@@ -127,33 +127,12 @@ func connect(dsn string) (*sql.DB, error) {
func funcsMap() template.FuncMap {
return template.FuncMap{
"trim": func(size int, s string) string {
"trim": func(s string, size int) string {
if len(s) < size {
return s
}
return s[:size]+"..."
return s[:size]
},
"convertnullstring": func(s sql.NullString) string {
if s.Valid {
return s.String
} else {
return ""
}
},
"convertnullint64": func(s sql.NullInt64) int64 {
if s.Valid {
return s.Int64
} else {
return 0
}
},
"convertnullfloat64": func(s sql.NullFloat64) float64 {
if s.Valid {
return s.Float64
} else {
return 0.0
}
},
}
}

View File

@@ -6,38 +6,30 @@ import (
"testing"
"github.com/percona/percona-toolkit/src/go/pt-pg-summary/internal/tu"
"github.com/percona/percona-toolkit/src/go/lib/pginfo"
"github.com/sirupsen/logrus"
)
type Test struct {
name string
host string
port string
username string
password string
}
var tests []Test = []Test{
{"IPv4PG9", tu.IPv4Host, tu.IPv4PG9Port, tu.Username, tu.Password},
{"IPv4PG10", tu.IPv4Host, tu.IPv4PG10Port, tu.Username, tu.Password},
{"IPv4PG11", tu.IPv4Host, tu.IPv4PG11Port, tu.Username, tu.Password},
{"IPv4PG12", tu.IPv4Host, tu.IPv4PG12Port, tu.Username, tu.Password},
}
var logger = logrus.New()
func TestMain(m *testing.M) {
logger.SetLevel(logrus.WarnLevel)
os.Exit(m.Run())
}
func TestConnection(t *testing.T) {
// use an "external" IP to simulate a remote host
tests := append(tests, Test{"remote_host", tu.PG9DockerIP, tu.DefaultPGPort, tu.Username, tu.Password})
// use IPV6 for PostgreSQL 9
//tests := append(tests, Test{"IPV6", tu.IPv6Host, tu.IPv6PG9Port, tu.Username, tu.Password})
tests := []struct {
name string
host string
port string
username string
password string
}{
{"IPv4PG9", tu.IPv4Host, tu.IPv4PG9Port, tu.Username, tu.Password},
{"IPv4PG10", tu.IPv4Host, tu.IPv4PG10Port, tu.Username, tu.Password},
{"IPv4PG11", tu.IPv4Host, tu.IPv4PG11Port, tu.Username, tu.Password},
{"IPv4PG12", tu.IPv4Host, tu.IPv4PG12Port, tu.Username, tu.Password},
// use IPV6 for PostgreSQL 9
//{"IPV6", tu.IPv6Host, tu.IPv6PG9Port, tu.Username, tu.Password},
// use an "external" IP to simulate a remote host
{"remote_host", tu.PG9DockerIP, tu.DefaultPGPort, tu.Username, tu.Password},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
@@ -50,77 +42,3 @@ func TestConnection(t *testing.T) {
}
}
func TestNewWithLogger(t *testing.T) {
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
dsn := fmt.Sprintf("host=%s port=%s user=%s password=%s sslmode=disable dbname=%s",
test.host, test.port, test.username, test.password, "postgres")
db, err := connect(dsn);
if err != nil {
t.Errorf("Cannot connect to the db using %q: %s", dsn, err)
}
if _, err := pginfo.NewWithLogger(db, nil, 30, logger); err != nil {
t.Errorf("Cannot run NewWithLogger using %q: %s", dsn, err)
}
})
}
}
func TestCollectGlobalInfo(t *testing.T) {
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
dsn := fmt.Sprintf("host=%s port=%s user=%s password=%s sslmode=disable dbname=%s",
test.host, test.port, test.username, test.password, "postgres")
db, err := connect(dsn);
if err != nil {
t.Errorf("Cannot connect to the db using %q: %s", dsn, err)
}
info, err := pginfo.NewWithLogger(db, nil, 30, logger);
if err != nil {
t.Errorf("Cannot run NewWithLogger using %q: %s", dsn, err)
}
errs := info.CollectGlobalInfo(db)
if len(errs) > 0 {
logger.Errorf("Cannot collect info")
for _, err := range errs {
logger.Error(err)
}
t.Errorf("Cannot collect global information using %q", dsn)
}
})
}
}
func TestCollectPerDatabaseInfo(t *testing.T) {
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
dsn := fmt.Sprintf("host=%s port=%s user=%s password=%s sslmode=disable dbname=%s",
test.host, test.port, test.username, test.password, "postgres")
db, err := connect(dsn);
if err != nil {
t.Errorf("Cannot connect to the db using %q: %s", dsn, err)
}
info, err := pginfo.NewWithLogger(db, nil, 30, logger);
if err != nil {
t.Errorf("Cannot run New using %q: %s", dsn, err)
}
for _, dbName := range info.DatabaseNames() {
dsn := fmt.Sprintf("host=%s port=%s user=%s password=%s sslmode=disable dbname=%s",
test.host, test.port, test.username, test.password, dbName)
conn, err := connect(dsn);
if err != nil {
t.Errorf("Cannot connect to the %s database using %q: %s", dbName, dsn, err)
}
if err := info.CollectPerDatabaseInfo(conn, dbName); err != nil {
t.Errorf("Cannot collect information for the %s database using %q: %s", dbName, dsn, err)
}
conn.Close()
}
})
}
}

View File

@@ -12,7 +12,7 @@ import (
type ClusterInfo struct {
Usename string // usename
Time time.Time // time
ClientAddr sql.NullString // client_addr
ClientAddr string // client_addr
ClientHostname sql.NullString // client_hostname
Version string // version
Started time.Time // started

View File

@@ -27,7 +27,7 @@ func GetCounters(db XODB) ([]*Counters, error) {
var err error
// sql query
var sqlstr = `SELECT COALESCE(datname, '') datname, numbackends, xact_commit, xact_rollback, ` +
var sqlstr = `SELECT datname, numbackends, xact_commit, xact_rollback, ` +
`blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted, ` +
`tup_updated, tup_deleted, conflicts, temp_files, ` +
`temp_bytes, deadlocks ` +

View File

@@ -15,8 +15,7 @@ func GetDatabases(db XODB) ([]*Databases, error) {
// sql query
var sqlstr = `SELECT datname, pg_size_pretty(pg_database_size(datname)) ` +
`FROM pg_stat_database ` +
`WHERE datid <> 0`
`FROM pg_stat_database`
// run query
XOLog(sqlstr)

View File

@@ -3,10 +3,9 @@ USERNAME=postgres
PASSWORD=root
PORT9=6432
PORT10=6433
PORT12=6435
DO_CLEANUP=0
if [ ! "$(docker ps -q -f name=go_postgres9_1)" ]; then
if [ ! "$(docker ps -q -f name=pt-pg-summary_postgres9_1)" ]; then
DO_CLEANUP=1
docker-compose up -d --force-recreate
sleep 20
@@ -54,7 +53,7 @@ xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
ORDER BY 1
ENDSQL
FIELDS='Usename string,Time time.Time,ClientAddr sql.NullString,ClientHostname sql.NullString,Version string,Started time.Time,IsSlave bool'
FIELDS='Usename string,Time time.Time,ClientAddr string,ClientHostname sql.NullString,Version string,Started time.Time,IsSlave bool'
COMMENT='Cluster info'
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
@@ -78,7 +77,7 @@ SELECT usename, now() AS "Time",
ENDSQL
COMMENT="Databases"
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT12}/?sslmode=disable \
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-trim \
--query-interpolate \
@@ -88,7 +87,6 @@ xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT12}/?sslmode=disable \
--out ./ << ENDSQL
SELECT datname, pg_size_pretty(pg_database_size(datname))
FROM pg_stat_database
WHERE datid <> 0
ENDSQL
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
@@ -103,14 +101,14 @@ xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
GROUP BY 1
ENDSQL
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT12}/?sslmode=disable \
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-interpolate \
--query-trim \
--query-type Counters \
--package models \
--out ./ << ENDSQL
SELECT COALESCE(datname, '') datname, numbackends, xact_commit, xact_rollback,
SELECT datname, numbackends, xact_commit, xact_rollback,
blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted,
tup_updated, tup_deleted, conflicts, temp_files,
temp_bytes, deadlocks
@@ -118,9 +116,9 @@ xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT12}/?sslmode=disable \
ORDER BY datname
ENDSQL
FIELDS='Relname string, Relkind string, Datname sql.NullString, Count sql.NullInt64'
FIELDS='Relname string, Relkind string,Datname string,Count sql.NullInt64'
COMMENT='Table Access'
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT12}/?sslmode=disable \
xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT9}/?sslmode=disable \
--query-mode \
--query-trim \
--query-type TableAccess \
@@ -130,7 +128,7 @@ xo pgsql://${USERNAME}:${PASSWORD}@127.0.0.1:${PORT12}/?sslmode=disable \
--query-allow-nulls \
--package models \
--out ./ << ENDSQL
SELECT c.relname, c.relkind, b.datname datname, count(*) FROM pg_locks a
SELECT c.relname, c.relkind, b.datname, count(*) FROM pg_locks a
JOIN pg_stat_database b
ON a.database=b.datid
JOIN pg_class c

View File

@@ -9,10 +9,10 @@ import (
// Table Access
type TableAccess struct {
Relname string // relname
Relkind string // relkind
Datname sql.NullString // datname
Count sql.NullInt64 // count
Relname string // relname
Relkind string // relkind
Datname string // datname
Count sql.NullInt64 // count
}
// GetTableAccesses runs a custom query, returning results as TableAccess.
@@ -20,7 +20,7 @@ func GetTableAccesses(db XODB) ([]*TableAccess, error) {
var err error
// sql query
var sqlstr = `SELECT c.relname, c.relkind, b.datname datname, count(*) FROM pg_locks a ` +
var sqlstr = `SELECT c.relname, c.relkind, b.datname, count(*) FROM pg_locks a ` +
`JOIN pg_stat_database b ` +
`ON a.database=b.datid ` +
`JOIN pg_class c ` +

View File

@@ -5,10 +5,8 @@ var TPL = `{{define "report"}}
{{ template "tablespaces" .Tablespaces }}
{{ if .SlaveHosts96 -}}
{{ template "slaves_and_lag" .SlaveHosts96 }}
{{- else if .SlaveHosts10 -}}
{{ else if .SlaveHosts10 -}}
{{ template "slaves_and_lag" .SlaveHosts10 }}
{{- else -}}
{{ template "slaves_and_log_none" }}
{{- end }}
{{ template "cluster" .ClusterInfo }}
{{ template "databases" .AllDatabases }}
@@ -45,35 +43,34 @@ var TPL = `{{define "report"}}
` +
`{{ define "slaves_and_lag" -}}
##### --- Slave and the lag with Master --- ####
+----------------------+----------------------+--------------------------------+-------------------+
| Application Name | Client Address | State | Lag |
+----------------------+----------------------+--------------------------------+-------------------+
{{ range . -}}` +
`| {{ convertnullstring .ApplicationName | printf "%-20s" }} | ` +
`{{ convertnullstring .ClientAddr | printf "%-20s" }} | ` +
`{{ convertnullstring .State | printf "%-30s" }} | ` +
`{{ convertnullfloat64 .ByteLag | printf "% 17.2f" }} |` + "\n" +
`{{ end -}}
{{ if . -}}
+----------------------+----------------------+----------------------------------------------------+
{{ end -}} {{/* end define */}}
` +
`{{- define "slaves_and_log_none" -}}
##### --- Slave and the lag with Master --- ####
| Application Name | Client Address | State | Lag |
+----------------------+----------------------+----------------------------------------------------+
{{ range . -}}` +
`| {{ printf "%-20s" .ApplicationName }} ` +
`| {{ printf "%-20s" .ClientAddr }} ` +
`| {{ printf "%-50s" .State }} ` +
`| {{ printf "% 4.2f" .ByteLag }}` +
`{{ end -}} {{/* end define */}}
+----------------------+----------------------+----------------------------------------------------+
{{- else -}}
There are no slave hosts
{{ end -}} {{/* end define */}}
{{ end -}}
{{ end -}}
` +
`{{ define "cluster" -}}
##### --- Cluster Information --- ####
{{ if . -}}
+------------------------------------------------------------------------------------------------------+
{{- range . }}
Usename : {{ trim 20 .Usename }}
Time : {{ printf "%v" .Time }}
Client Address : {{ convertnullstring .ClientAddr | trim 20 }}
Client Hostname: {{ convertnullstring .ClientHostname | trim 90 }}
Version : {{ trim 90 .Version }}
Started : {{ printf "%v" .Started }}
Is Slave : {{ .IsSlave }}
{{- range . }}
Usename : {{ printf "%-20s" .Usename }}
Time : {{ printf "%v" .Time }}
Client Address : {{ printf "%-20s" .ClientAddr }}
Client Hostname: {{ trim .ClientHostname.String 80 }}
Version : {{ trim .Version 80 }}
Started : {{ printf "%v" .Started }}
Is Slave : {{ .IsSlave }}
+------------------------------------------------------------------------------------------------------+
{{ end -}}
{{ else -}}
@@ -100,7 +97,7 @@ Database: {{ $dbname }}
+----------------------+------------+
| Index Name | Ratio |
+----------------------+------------+
| {{ printf "%-20s" .Name }} | {{ convertnullfloat64 .Ratio | printf "% 5.2f" }} |
| {{ printf "%-20s" .Name }} | {{ printf "% 5.2f" .Ratio.Float64 }} |
+----------------------+------------+
{{ else -}}
No stats available
@@ -147,10 +144,10 @@ Database: {{ $dbname }}
+----------------------+------------+---------+----------------------+---------+
{{ range . -}}` +
`| {{ printf "%-20s" .Usename }} | ` +
`{{ convertnullstring .Client | printf "%-20s" }} | ` +
`{{ convertnullstring .State | printf "%-20s" }} | ` +
`{{ convertnullint64 .Count | printf "% 7d" }} |` + "\n" +
`{{ end -}}
`{{ printf "%-20s" .Client.String }} | ` +
`{{ printf "%-20s" .State.String }} | ` +
`{{ printf "% 7d" .Count.Int64 }} |` + "\n" +
`{{ end -}}
+----------------------+------------+---------+----------------------+---------+
{{ else -}}
No stats available
@@ -269,8 +266,8 @@ Database: {{ $dbname }}
`{{ range . -}}
| {{ printf "%-50s" .Relname }} ` +
`| {{ printf "%1s" .Relkind }} ` +
`| {{ convertnullstring .Datname | printf "%-30s" }} ` +
`| {{ convertnullint64 .Count | printf "% 7d" }} ` +
`| {{ printf "%-30s" .Datname }} ` +
`| {{ printf "% 7d" .Count.Int64 }} ` +
"|\n" +
"{{ end }}" +
"+----------------------------------------------------" +
@@ -289,7 +286,7 @@ Database: {{ $dbname }}
" Value \n" +
`{{ range $name, $values := . -}}` +
` {{ printf "%-45s" .Name }} ` +
`: {{ printf "%s" .Setting }}` +
`: {{ printf "%-60s" .Setting }} ` +
"\n" +
"{{ end }}" +
"{{ end }}" +

View File

@@ -1,80 +0,0 @@
#!/usr/bin/env perl
BEGIN {
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
};
use strict;
use warnings FATAL => 'all';
use threads;
use threads::shared;
use Thread::Semaphore;
use English qw(-no_match_vars);
use Test::More;
use Data::Dumper;
use PerconaTest;
use Sandbox;
use SqlModes;
use File::Temp qw/ tempdir /;
require "$trunk/bin/pt-online-schema-change";
plan tests => 3;
my $dp = new DSNParser(opts=>$dsn_opts);
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
my $master_dbh = $sb->get_dbh_for("master");
my $master_dsn = $sb->dsn_for("master");
# The sandbox servers run with lock_wait_timeout=3 and it's not dynamic
# so we need to specify --set-vars innodb_lock_wait_timeout=3 else the
# tool will die.
my @args = (qw(--set-vars innodb_lock_wait_timeout=3));
my $output;
my $exit_status;
$sb->load_file('master', "t/pt-online-schema-change/samples/pt-1853.sql");
($output, $exit_status) = full_output(
sub { pt_online_schema_change::main(@args, "$master_dsn,D=test,t=jointit",
'--execute',
'--alter', "engine=innodb",
'--alter-foreign-keys-method', 'rebuild_constraints'
),
},
stderr => 1,
);
isnt(
$exit_status,
0,
"PT-1853, there are self-referencing FKs -> exit status != 0",
);
($output, $exit_status) = full_output(
sub { pt_online_schema_change::main(@args, "$master_dsn,D=test,t=jointit",
'--execute',
'--alter', "engine=innodb",
'--alter-foreign-keys-method', 'rebuild_constraints',
'--no-check-foreign-keys'
),
},
stderr => 1,
);
isnt(
$exit_status,
0,
"PT-1853, there are self-referencing FKs but --no-check-foreign-keys was specified -> exit status = 0",
);
# #############################################################################
# Done.
# #############################################################################
$sb->wipe_clean($master_dbh);
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
done_testing;

View File

@@ -64,9 +64,9 @@ my $constraints = $master_dbh->selectall_arrayref($query);
is_deeply(
$constraints,
[
['person', 'fk_testId'],
['test_table', 'fk_person'],
['test_table', 'fk_refId'],
['person', '_fk_testId'],
['test_table', '_fk_person'],
['test_table', '__fk_refId'],
],
"First run adds or removes underscore from constraint names, accordingly"
);
@@ -94,9 +94,9 @@ $constraints = $master_dbh->selectall_arrayref($query);
is_deeply(
$constraints,
[
['person', 'fk_testId'],
['test_table', 'fk_person'],
['test_table', 'fk_refId'],
['person', '__fk_testId'],
['test_table', '_fk_refId'],
['test_table', '__fk_person'],
],
"Second run self-referencing will be one due to rebuild_constraints"
);

View File

@@ -60,14 +60,13 @@ my $query = <<"END";
ORDER BY TABLE_NAME, CONSTRAINT_NAME
END
my $constraints = $master_dbh->selectall_arrayref($query);
my @constraints = sort { @$a[0].@$a[1] cmp @$b[0].@$b[1] } @$constraints;
is_deeply(
$constraints,
[
['person', 'fk_testId'],
['test_table', 'fk_person'],
['test_table', 'fk_refId'],
['person', '_fk_testId'],
['test_table', '_fk_person'],
['test_table', '__fk_refId'],
],
"First run adds or removes underscore from constraint names, accordingly"
);
@@ -91,14 +90,13 @@ ORDER BY TABLE_NAME, CONSTRAINT_NAME
END
$constraints = $master_dbh->selectall_arrayref($query);
@constraints = sort { @$a[0].@$a[1] cmp @$b[0].@$b[1] } @$constraints;
is_deeply(
\@constraints,
$constraints,
[
['person', 'fk_testId'],
['test_table', 'fk_person'],
['test_table', 'fk_refId'],
['person', '__fk_testId'],
['test_table', '_fk_refId'],
['test_table', '__fk_person'],
],
"Second run self-referencing will be one due to rebuild_constraints"
);

View File

@@ -1,19 +0,0 @@
DROP DATABASE IF EXISTS test;
CREATE DATABASE test;
USE test;
CREATE TABLE t1 (
id int,
f1 int
);
CREATE TABLE `joinit` (
`i` int(11) NOT NULL AUTO_INCREMENT,
`s` varchar(64) DEFAULT NULL,
`t` time NOT NULL,
`g` int(11) NOT NULL,
`j` int(11) NOT NULL DEFAULT 1,
PRIMARY KEY (`i`))
ENGINE=InnoDB;
ALTER TABLE joinit ADD FOREIGN KEY i_fk (j) REFERENCES joinit (i) ON UPDATE cascade ON DELETE restrict;

View File

@@ -17,12 +17,12 @@ use Data::Dumper;
use PerconaTest;
use Sandbox;
use SqlModes;
use File::Temp qw/ tempdir tempfile /;
use File::Temp qw/ tempdir /;
if ($ENV{PERCONA_SLOW_BOX}) {
plan skip_all => 'This test needs a fast machine';
} else {
plan tests => 6;
plan tests => 4;
}
our $delay = 30;
@@ -37,7 +37,6 @@ my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
my $master_dbh = $sb->get_dbh_for('master');
my $slave_dbh = $sb->get_dbh_for('slave1');
my $master_dsn = 'h=127.0.0.1,P=12345,u=msandbox,p=msandbox';
my $slave_dsn = 'h=127.0.0.1,P=12346,u=msandbox,p=msandbox';
if ( !$master_dbh ) {
plan skip_all => 'Cannot connect to sandbox master';
@@ -59,29 +58,29 @@ $slave_dbh->do('STOP SLAVE');
$slave_dbh->do('RESET SLAVE');
$slave_dbh->do('START SLAVE');
diag('Loading test data');
$sb->load_file('master', "t/pt-online-schema-change/samples/slave_lag.sql");
my $num_rows = 5000;
diag("Loading $num_rows into the table. This might take some time.");
diag(`util/mysql_random_data_load --host=127.0.0.1 --port=12345 --user=msandbox --password=msandbox test pt178 --bulk-size=1 --max-threads=1 $num_rows`);
diag("Setting slave delay to $delay seconds");
$slave_dbh->do('STOP SLAVE');
$slave_dbh->do("CHANGE MASTER TO MASTER_DELAY=$delay");
$slave_dbh->do('START SLAVE');
diag('Loading test data');
$sb->load_file('master', "t/pt-online-schema-change/samples/slave_lag.sql");
my $num_rows = 10000;
diag("Loading $num_rows into the table. This might take some time.");
diag(`util/mysql_random_data_load --host=127.0.0.1 --port=12345 --user=msandbox --password=msandbox test pt178 $num_rows`);
# Run a full table scan query to ensure the slave is behind the master
# There is no query cache in MySQL 8.0+
reset_query_cache($master_dbh, $master_dbh);
$master_dbh->do('UPDATE `test`.`pt178` SET f2 = f2 + 1 WHERE f1 = ""');
$master_dbh->do('UPDATE `test`.`pt178` SET f2 = f2 + 1 WHERE f1 = ""');
# This is the base test, ust to ensure that without using --check-slave-lag nor --skip-check-slave-lag
# pt-online-schema-change will wait on the slave at port 12346
my $max_lag = $delay / 2;
my $args = "$master_dsn,D=test,t=pt178 --execute --chunk-size 10 --max-lag $max_lag --alter 'ENGINE=InnoDB' --pid $tmp_file_name";
my $args = "$master_dsn,D=test,t=pt178 --execute --chunk-size 1 --max-lag 5 --alter 'ENGINE=InnoDB' --pid $tmp_file_name";
diag("Starting base test. This is going to take some time due to the delay in the slave");
diag("pid: $tmp_file_name");
my $output = `$trunk/bin/pt-online-schema-change $args 2>&1`;
@@ -93,12 +92,12 @@ like(
);
# Repeat the test now using --check-slave-lag
$args = "$master_dsn,D=test,t=pt178 --execute --chunk-size 1 --max-lag $max_lag --alter 'ENGINE=InnoDB' "
. "--check-slave-lag h=127.0.0.1,P=12346,u=msandbox,p=msandbox,D=test,t=sbtest --pid $tmp_file_name";
$args = "$master_dsn,D=test,t=pt178 --execute --chunk-size 1 --max-lag 5 --alter 'ENGINE=InnoDB' "
. "--check-slave-lag h=127.0.0.1,P=12346,u=msandbox,p=msandbox,D=test,t=sbtest";
# Run a full table scan query to ensure the slave is behind the master
reset_query_cache($master_dbh, $master_dbh);
$master_dbh->do('UPDATE `test`.`pt178` SET f2 = f2 + 1 WHERE f1 = ""');
$master_dbh->do('UPDATE `test`.`pt178` SET f2 = f2 + 1 WHERE f1 = ""');
diag("Starting --check-slave-lag test. This is going to take some time due to the delay in the slave");
$output = `$trunk/bin/pt-online-schema-change $args 2>&1`;
@@ -109,56 +108,13 @@ like(
"--check-slave-lag waits on the correct slave",
);
# Repeat the test new adding and removing a slave during the process
$args = "$master_dsn,D=test,t=pt178 --execute --chunk-size 1 --max-lag $max_lag --alter 'ENGINE=InnoDB' "
. "--recursion-method=dsn=D=test,t=dynamic_replicas --recurse 0 --pid $tmp_file_name";
$master_dbh->do('CREATE TABLE `test`.`dynamic_replicas` (id INTEGER PRIMARY KEY, dsn VARCHAR(255) )');
$master_dbh->do("INSERT INTO `test`.`dynamic_replicas` (id, dsn) VALUES (1, '$slave_dsn')");
# Run a full table scan query to ensure the slave is behind the master
reset_query_cache($master_dbh, $master_dbh);
$master_dbh->do('UPDATE `test`.`pt178` SET f2 = f2 + 1 WHERE f1 = ""');
diag("Starting --recursion-method with changes during the process");
my ($fh, $filename) = tempfile();
my $pid = fork();
if (!$pid) {
open(STDERR, '>', $filename);
open(STDOUT, '>', $filename);
exec("$trunk/bin/pt-online-schema-change $args");
}
sleep(60);
$master_dbh->do("DELETE FROM `test`.`dynamic_replicas` WHERE id = 1;");
waitpid($pid, 0);
$output = do {
local $/ = undef;
<$fh>;
};
unlink $filename;
like(
$output,
qr/Slave set to watch has changed/s,
"--recursion-method=dsn updates the slave list",
);
like(
$output,
qr/Replica lag is \d+ seconds on .* Waiting/s,
"--recursion-method waits on a replica",
);
# Repeat the test now using --skip-check-slave-lag
# Run a full table scan query to ensure the slave is behind the master
reset_query_cache($master_dbh, $master_dbh);
$master_dbh->do('UPDATE `test`.`pt178` SET f2 = f2 + 1 WHERE f1 = ""');
$master_dbh->do('UPDATE `test`.`pt178` SET f2 = f2 + 1 WHERE f1 = ""');
$args = "$master_dsn,D=test,t=pt178 --execute --chunk-size 1 --max-lag $max_lag --alter 'ENGINE=InnoDB' "
. "--skip-check-slave-lag h=127.0.0.1,P=12346,u=msandbox,p=msandbox,D=test,t=sbtest --pid $tmp_file_name";
$args = "$master_dsn,D=test,t=pt178 --execute --chunk-size 1 --max-lag 5 --alter 'ENGINE=InnoDB' "
. "--skip-check-slave-lag h=127.0.0.1,P=12346,u=msandbox,p=msandbox,D=test,t=sbtest";
diag("Starting --skip-check-slave-lag test. This is going to take some time due to the delay in the slave");
$output = `$trunk/bin/pt-online-schema-change $args 2>&1`;