Compare commits

..

1 Commits

Author SHA1 Message Date
Carlos Salguero
3b52114e93 Fixed release date in changelog 2022-12-02 09:38:56 -03:00
33 changed files with 630 additions and 1029 deletions

View File

@@ -112,10 +112,10 @@ sub _d {
# ###########################################################################
# VersionCompare package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/VersionCompare.pm
# t/lib/VersionCompare.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package VersionCompare;
@@ -130,13 +130,13 @@ sub cmp {
$v1 =~ s/[^\d\.]//;
$v2 =~ s/[^\d\.]//;
my @a = ( $v1 =~ /(\d+)\.?/g );
my @b = ( $v2 =~ /(\d+)\.?/g );
my @a = ( $v1 =~ /(\d+)\.?/g );
my @b = ( $v2 =~ /(\d+)\.?/g );
foreach my $n1 (@a) {
$n1 += 0; #convert to number
if (!@b) {
return 1;
}
}
my $n2 = shift @b;
$n2 += 0; # convert to number
if ($n1 == $n2) {
@@ -144,8 +144,8 @@ sub cmp {
}
else {
return $n1 <=> $n2;
}
}
}
}
return @b ? -1 : 0;
}
@@ -159,10 +159,10 @@ sub cmp {
# ###########################################################################
# OptionParser package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/OptionParser.pm
# t/lib/OptionParser.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package OptionParser;
@@ -220,7 +220,7 @@ sub new {
rules => [], # desc of rules for --help
mutex => [], # rule: opts are mutually exclusive
atleast1 => [], # rule: at least one opt is required
disables => {}, # rule: opt disables other opts
disables => {}, # rule: opt disables other opts
defaults_to => {}, # rule: opt defaults to value of other opt
DSNParser => undef,
default_files => [
@@ -383,7 +383,7 @@ sub _pod_to_specs {
}
push @specs, {
spec => $self->{parse_attributes}->($self, $option, \%attribs),
spec => $self->{parse_attributes}->($self, $option, \%attribs),
desc => $para
. (defined $attribs{default} ? " (default $attribs{default})" : ''),
group => ($attribs{'group'} ? $attribs{'group'} : 'default'),
@@ -474,7 +474,7 @@ sub _parse_specs {
$self->{opts}->{$long} = $opt;
}
else { # It's an option rule, not a spec.
PTDEBUG && _d('Parsing rule:', $opt);
PTDEBUG && _d('Parsing rule:', $opt);
push @{$self->{rules}}, $opt;
my @participants = $self->_get_participants($opt);
my $rule_ok = 0;
@@ -519,7 +519,7 @@ sub _parse_specs {
PTDEBUG && _d('Option', $long, 'disables', @participants);
}
return;
return;
}
sub _get_participants {
@@ -606,7 +606,7 @@ sub _set_option {
}
sub get_opts {
my ( $self ) = @_;
my ( $self ) = @_;
foreach my $long ( keys %{$self->{opts}} ) {
$self->{opts}->{$long}->{got} = 0;
@@ -737,7 +737,7 @@ sub _check_opts {
else {
$err = join(', ',
map { "--$self->{opts}->{$_}->{long}" }
grep { $_ }
grep { $_ }
@restricted_opts[0..scalar(@restricted_opts) - 2]
)
. ' or --'.$self->{opts}->{$restricted_opts[-1]}->{long};
@@ -747,7 +747,7 @@ sub _check_opts {
}
}
elsif ( $opt->{is_required} ) {
elsif ( $opt->{is_required} ) {
$self->save_error("Required option --$long must be specified");
}
@@ -1131,7 +1131,7 @@ sub clone {
$clone{$scalar} = $self->{$scalar};
}
return bless \%clone;
return bless \%clone;
}
sub _parse_size {
@@ -1271,10 +1271,10 @@ if ( PTDEBUG ) {
# ###########################################################################
# Lmo::Utils package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/Lmo/Utils.pm
# t/lib/Lmo/Utils.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package Lmo::Utils;
@@ -1331,10 +1331,10 @@ sub _unimport_coderefs {
# ###########################################################################
# Lmo::Meta package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/Lmo/Meta.pm
# t/lib/Lmo/Meta.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package Lmo::Meta;
@@ -1388,10 +1388,10 @@ sub attributes_for_new {
# ###########################################################################
# Lmo::Object package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/Lmo/Object.pm
# t/lib/Lmo/Object.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package Lmo::Object;
@@ -1484,10 +1484,10 @@ sub meta {
# ###########################################################################
# Lmo::Types package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/Lmo/Types.pm
# t/lib/Lmo/Types.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package Lmo::Types;
@@ -1585,10 +1585,10 @@ sub _nested_constraints {
# ###########################################################################
# Lmo package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/Lmo.pm
# t/lib/Lmo.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
BEGIN {
@@ -1646,7 +1646,7 @@ sub extends {
sub _load_module {
my ($class) = @_;
(my $file = $class) =~ s{::|'}{/}g;
$file .= '.pm';
{ local $@; eval { require "$file" } } # or warn $@;
@@ -1677,7 +1677,7 @@ sub has {
my $caller = scalar caller();
my $class_metadata = Lmo::Meta->metadata_for($caller);
for my $attribute ( ref $names ? @$names : $names ) {
my %args = @_;
my $method = ($args{is} || '') eq 'ro'
@@ -1696,16 +1696,16 @@ sub has {
if ( my $type_check = $args{isa} ) {
my $check_name = $type_check;
if ( my ($aggregate_type, $inner_type) = $type_check =~ /\A(ArrayRef|Maybe)\[(.*)\]\z/ ) {
$type_check = Lmo::Types::_nested_constraints($attribute, $aggregate_type, $inner_type);
}
my $check_sub = sub {
my ($new_val) = @_;
Lmo::Types::check_type_constaints($attribute, $type_check, $check_name, $new_val);
};
$class_metadata->{$attribute}{isa} = [$check_name, $check_sub];
my $orig_method = $method;
$method = sub {
@@ -1920,10 +1920,10 @@ sub override {
# ###########################################################################
# VersionParser package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/VersionParser.pm
# t/lib/VersionParser.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package VersionParser;
@@ -2112,10 +2112,10 @@ no Lmo;
# ###########################################################################
# DSNParser package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/DSNParser.pm
# t/lib/DSNParser.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package DSNParser;
@@ -2199,7 +2199,7 @@ sub parse {
foreach my $key ( keys %$opts ) {
PTDEBUG && _d('Finding value for', $key);
$final_props{$key} = $given_props{$key};
if ( !defined $final_props{$key}
if ( !defined $final_props{$key}
&& defined $prev->{$key} && $opts->{$key}->{copy} )
{
$final_props{$key} = $prev->{$key};
@@ -2339,7 +2339,7 @@ sub get_dbh {
my $dbh;
my $tries = 2;
while ( !$dbh && $tries-- ) {
PTDEBUG && _d($cxn_string, ' ', $user, ' ', $pass,
PTDEBUG && _d($cxn_string, ' ', $user, ' ', $pass,
join(', ', map { "$_=>$defaults->{$_}" } keys %$defaults ));
$dbh = eval { DBI->connect($cxn_string, $user, $pass, $defaults) };
@@ -2537,7 +2537,7 @@ sub set_vars {
}
}
return;
return;
}
sub _d {
@@ -2557,10 +2557,10 @@ sub _d {
# ###########################################################################
# Daemon package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/Daemon.pm
# t/lib/Daemon.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package Daemon;
@@ -2618,7 +2618,7 @@ sub run {
$parent_exit->($child_pid) if $parent_exit;
exit 0;
}
POSIX::setsid() or die "Cannot start a new session: $OS_ERROR";
chdir '/' or die "Cannot chdir to /: $OS_ERROR";
@@ -2644,7 +2644,7 @@ sub run {
close STDERR;
open STDERR, ">&STDOUT"
or die "Cannot dupe STDERR to STDOUT: $OS_ERROR";
or die "Cannot dupe STDERR to STDOUT: $OS_ERROR";
}
else {
if ( -t STDOUT ) {
@@ -2682,7 +2682,7 @@ sub _make_pid_file {
eval {
sysopen(PID_FH, $pid_file, O_RDWR|O_CREAT|O_EXCL) or die $OS_ERROR;
print PID_FH $PID, "\n";
close PID_FH;
close PID_FH;
};
if ( my $e = $EVAL_ERROR ) {
if ( $e =~ m/file exists/i ) {
@@ -2811,10 +2811,10 @@ sub _d {
# ###########################################################################
# Quoter package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/Quoter.pm
# t/lib/Quoter.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package Quoter;
@@ -2869,7 +2869,7 @@ sub split_unquote {
s/`\z//;
s/``/`/g;
}
return ($db, $tbl);
}
@@ -2964,10 +2964,10 @@ sub _d {
# ###########################################################################
# TableNibbler package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/TableNibbler.pm
# t/lib/TableNibbler.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package TableNibbler;
@@ -2999,7 +2999,7 @@ sub generate_asc_stmt {
die "Index '$index' does not exist in table"
unless exists $tbl_struct->{keys}->{$index};
PTDEBUG && _d('Will ascend index', $index);
PTDEBUG && _d('Will ascend index', $index);
my @asc_cols = @{$tbl_struct->{keys}->{$index}->{cols}};
if ( $args{asc_first} ) {
@@ -3230,10 +3230,10 @@ sub _d {
# ###########################################################################
# TableParser package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/TableParser.pm
# t/lib/TableParser.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package TableParser;
@@ -3389,9 +3389,9 @@ sub parse {
sub remove_quoted_text {
my ($string) = @_;
$string =~ s/\\['"]//g;
$string =~ s/`[^`]*?`//g;
$string =~ s/"[^"]*?"//g;
$string =~ s/'[^']*?'//g;
$string =~ s/`[^`]*?`//g;
$string =~ s/"[^"]*?"//g;
$string =~ s/'[^']*?'//g;
return $string;
}
@@ -3663,10 +3663,10 @@ sub _d {
# ###########################################################################
# Progress package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/Progress.pm
# t/lib/Progress.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package Progress;
@@ -3810,10 +3810,10 @@ sub _d {
# ###########################################################################
# Retry package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/Retry.pm
# t/lib/Retry.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package Retry;
@@ -3890,10 +3890,10 @@ sub _d {
# ###########################################################################
# Cxn package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/Cxn.pm
# t/lib/Cxn.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package Cxn;
@@ -4054,7 +4054,7 @@ sub get_id {
my $sql = q{SHOW STATUS LIKE 'wsrep\_local\_index'};
my (undef, $wsrep_local_index) = $cxn->dbh->selectrow_array($sql);
PTDEBUG && _d("Got cluster wsrep_local_index: ",$wsrep_local_index);
$unique_id = $wsrep_local_index."|";
$unique_id = $wsrep_local_index."|";
foreach my $val ('server\_id', 'wsrep\_sst\_receive\_address', 'wsrep\_node\_name', 'wsrep\_node\_address') {
my $sql = "SHOW VARIABLES LIKE '$val'";
PTDEBUG && _d($cxn->name, $sql);
@@ -4084,7 +4084,7 @@ sub is_cluster_node {
PTDEBUG && _d($sql); #don't invoke name() if it's not a Cxn!
}
else {
$dbh = $cxn->dbh();
$dbh = $cxn->dbh();
PTDEBUG && _d($cxn->name, $sql);
}
@@ -4154,10 +4154,10 @@ sub _d {
# ###########################################################################
# MasterSlave package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/MasterSlave.pm
# t/lib/MasterSlave.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package MasterSlave;
@@ -4294,7 +4294,7 @@ sub recurse_to_slaves {
my $slave_dsn = $dsn;
if ($slave_user) {
$slave_dsn->{u} = $slave_user;
PTDEBUG && _d("Using slave user $slave_user on ".$slave_dsn->{h}.":".$slave_dsn->{P});
PTDEBUG && _d("Using slave user $slave_user on ".$slave_dsn->{h}.":".($slave_dsn->{P}?$slave_dsn->{P}:""));
}
if ($slave_password) {
$slave_dsn->{p} = $slave_password;
@@ -4969,10 +4969,10 @@ sub _d {
# ###########################################################################
# ReplicaLagWaiter package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/ReplicaLagWaiter.pm
# t/lib/ReplicaLagWaiter.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package ReplicaLagWaiter;
@@ -5017,6 +5017,8 @@ sub wait {
my $pr_callback;
my $pr_first_report;
### refresh list of slaves. In: self passed to wait()
### Returns: new slave list
my $pr_refresh_slave_list = sub {
my ($self) = @_;
my ($slaves, $refresher) = ($self->{slaves}, $self->{get_slaves_cb});
@@ -5035,6 +5037,8 @@ sub wait {
$slaves = $pr_refresh_slave_list->($self);
if ( $pr ) {
# If you use the default Progress report callback, you'll need to
# to add Transformers.pm to this tool.
$pr_callback = sub {
my ($fraction, $elapsed, $remaining, $eta, $completed) = @_;
my $dsn_name = $worst->{cxn}->name();
@@ -5046,28 +5050,33 @@ sub wait {
if ($self->{fail_on_stopped_replication}) {
die 'replication is stopped';
}
print STDERR "Replica $dsn_name is stopped. Waiting.\n";
print STDERR "(1) Replica '$dsn_name' is stopped. Waiting.\n";
}
return;
};
$pr->set_callback($pr_callback);
# If a replic is stopped, don't wait 30s (or whatever interval)
# to report this. Instead, report it once, immediately, then
# keep reporting it every interval.
$pr_first_report = sub {
my $dsn_name = $worst->{cxn}->name();
if ( !defined $worst->{lag} ) {
if ($self->{fail_on_stopped_replication}) {
die 'replication is stopped';
}
print STDERR "Replica $dsn_name is stopped. Waiting.\n";
print STDERR "(2) Replica '$dsn_name' is stopped. Waiting.\n";
}
return;
};
}
# First check all slaves.
my @lagged_slaves = map { {cxn=>$_, lag=>undef} } @$slaves;
while ( $oktorun->() && @lagged_slaves ) {
PTDEBUG && _d('Checking slave lag');
### while we were waiting our list of slaves may have changed
$slaves = $pr_refresh_slave_list->($self);
my $watched = 0;
@lagged_slaves = grep {
@@ -5093,8 +5102,10 @@ sub wait {
}
}
# Remove slaves that aren't lagging.
@lagged_slaves = grep { defined $_ } @lagged_slaves;
if ( @lagged_slaves ) {
# Sort lag, undef is highest because it means the slave is stopped.
@lagged_slaves = reverse sort {
defined $a->{lag} && defined $b->{lag} ? $a->{lag} <=> $b->{lag}
: defined $a->{lag} ? -1
@@ -5105,6 +5116,10 @@ sub wait {
$worst->{lag}, 'on', Dumper($worst->{cxn}->dsn()));
if ( $pr ) {
# There's no real progress because we can't estimate how long
# it will take all slaves to catch up. The progress reports
# are just to inform the user every 30s which slave is still
# lagging this most.
$pr->update(
sub { return 0; },
first_report => $pr_first_report,
@@ -5138,10 +5153,10 @@ sub _d {
# ###########################################################################
# FlowControlWaiter package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/FlowControlWaiter.pm
# t/lib/FlowControlWaiter.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package FlowControlWaiter;
@@ -5164,9 +5179,9 @@ sub new {
my $self = {
%args
};
$self->{last_time} = time();
$self->{last_time} = time();
my (undef, $last_fc_ns) = $self->{node}->selectrow_array('SHOW STATUS LIKE "wsrep_flow_control_paused_ns"');
$self->{last_fc_secs} = $last_fc_ns/1000_000_000;
@@ -5202,11 +5217,11 @@ sub wait {
my $current_time = time();
my (undef, $current_fc_ns) = $node->selectrow_array('SHOW STATUS LIKE "wsrep_flow_control_paused_ns"');
my $current_fc_secs = $current_fc_ns/1000_000_000;
my $current_avg = ($current_fc_secs - $self->{last_fc_secs}) / ($current_time - $self->{last_time});
if ( $current_avg > $max_avg ) {
my $current_avg = ($current_fc_secs - $self->{last_fc_secs}) / ($current_time - $self->{last_time});
if ( $current_avg > $max_avg ) {
if ( $pr ) {
$pr->update(sub { return 0; });
}
}
PTDEBUG && _d('Calling sleep callback');
if ( $self->{simple_progress} ) {
print STDERR "Waiting for Flow Control to abate\n";
@@ -5243,10 +5258,10 @@ sub _d {
# ###########################################################################
# MySQLStatusWaiter package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/MySQLStatusWaiter.pm
# t/lib/MySQLStatusWaiter.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package MySQLStatusWaiter;
@@ -5321,7 +5336,7 @@ sub _parse_spec {
}
}
return \%max_val_for;
return \%max_val_for;
}
sub max_values {
@@ -5374,7 +5389,7 @@ sub wait {
die "$var=$val exceeds its critical threshold "
. "$self->{critical_val_for}->{$var}\n";
}
if ( $val && $val >= $self->{max_val_for}->{$var} ) {
if ( $val >= $self->{max_val_for}->{$var} ) {
$vals_too_high{$var} = $val;
}
else {
@@ -5443,10 +5458,10 @@ sub _d {
# ###########################################################################
# WeightedAvgRate package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/WeightedAvgRate.pm
# t/lib/WeightedAvgRate.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package WeightedAvgRate;
@@ -5512,10 +5527,10 @@ sub _d {
# ###########################################################################
# NibbleIterator package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/NibbleIterator.pm
# t/lib/NibbleIterator.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package NibbleIterator;
@@ -5608,7 +5623,7 @@ sub new {
sub switch_to_nibble {
my $self = shift;
my $params = _nibble_params($self->{nibble_params}, $self->{tbl}, $self->{args}, $self->{cols},
my $params = _nibble_params($self->{nibble_params}, $self->{tbl}, $self->{args}, $self->{cols},
$self->{chunk_size}, $self->{where}, $self->{comments}, $self->{Quoter});
$self->{one_nibble} = 0;
@@ -5645,7 +5660,7 @@ sub _one_nibble {
my $explain_nibble_sql
= "EXPLAIN SELECT "
. ($args->{select} ? $args->{select}
: join(', ', map{ $tbl->{tbl_struct}->{type_for}->{$_} eq 'enum'
: join(', ', map{ $tbl->{tbl_struct}->{type_for}->{$_} eq 'enum'
? "CAST(".$q->quote($_)." AS UNSIGNED)" : $q->quote($_) } @$cols))
. " FROM $tbl->{name}"
. ($where ? " WHERE $where" : '')
@@ -5742,7 +5757,7 @@ sub _nibble_params {
. " /*$comments->{nibble}*/";
PTDEBUG && _d('Nibble statement:', $nibble_sql);
my $explain_nibble_sql
my $explain_nibble_sql
= "EXPLAIN SELECT "
. ($args->{select} ? $args->{select}
: join(', ', map { $q->quote($_) } @{$asc->{cols}}))
@@ -5831,7 +5846,7 @@ sub next {
sleep($self->{sleep});
}
}
if ( !$self->{have_rows} ) {
$self->{nibbleno}++;
PTDEBUG && _d('Nibble:', $self->{nibble_sth}->{Statement}, 'params:',
@@ -5861,7 +5876,7 @@ sub next {
}
$self->{rowno} = 0;
$self->{have_rows} = 0;
}
PTDEBUG && _d('Done nibbling');
@@ -5998,7 +6013,7 @@ sub can_nibble {
}
my $pause_file = ($o->has('pause-file') && $o->get('pause-file')) || undef;
return {
row_est => $row_est, # nibble about this many rows
index => $index, # using this index
@@ -6043,7 +6058,7 @@ sub _find_best_index {
push @possible_indexes, $want_index;
}
}
if (!$best_index) {
PTDEBUG && _d('Auto-selecting best index');
foreach my $index ( $tp->sort_indexes($tbl_struct) ) {
@@ -6141,7 +6156,7 @@ sub _prepare_sths {
return;
}
sub _get_bounds {
sub _get_bounds {
my ($self) = @_;
if ( $self->{one_nibble} ) {
@@ -6154,7 +6169,7 @@ sub _get_bounds {
my $dbh = $self->{Cxn}->dbh();
$self->{first_lower} = $dbh->selectrow_arrayref($self->{first_lb_sql});
PTDEBUG && _d('First lower boundary:', Dumper($self->{first_lower}));
PTDEBUG && _d('First lower boundary:', Dumper($self->{first_lower}));
if ( my $nibble = $self->{resume} ) {
if ( defined $nibble->{lower_boundary}
@@ -6168,9 +6183,9 @@ sub _get_bounds {
}
}
else {
$self->{next_lower} = $self->{first_lower};
$self->{next_lower} = $self->{first_lower};
}
PTDEBUG && _d('Next lower boundary:', Dumper($self->{next_lower}));
PTDEBUG && _d('Next lower boundary:', Dumper($self->{next_lower}));
if ( !$self->{next_lower} ) {
PTDEBUG && _d('At end of table, or no more boundaries to resume');
@@ -6256,7 +6271,7 @@ sub _next_boundaries {
$self->{upper} = $dbh->selectrow_arrayref($self->{last_ub_sql});
PTDEBUG && _d('Last upper boundary:', Dumper($self->{upper}));
$self->{no_more_boundaries} = 1; # for next call
$self->{last_upper} = $self->{upper};
}
$self->{ub_sth}->finish();
@@ -6308,10 +6323,10 @@ sub _d {
# ###########################################################################
# Transformers package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/Transformers.pm
# t/lib/Transformers.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package Transformers;
@@ -6606,7 +6621,7 @@ sub value_to_json {
my $b_obj = B::svref_2object(\$value); # for round trip problem
my $flags = $b_obj->FLAGS;
return $value # as is
return $value # as is
if $flags & ( B::SVp_IOK | B::SVp_NOK ) and !( $flags & B::SVp_POK ); # SvTYPE is IV or NV?
my $type = ref($value);
@@ -6661,10 +6676,10 @@ sub _d {
# ###########################################################################
# CleanupTask package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/CleanupTask.pm
# t/lib/CleanupTask.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package CleanupTask;
@@ -6723,10 +6738,10 @@ sub _d {
# ###########################################################################
# IndexLength package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/IndexLength.pm
# t/lib/IndexLength.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
@@ -6885,10 +6900,10 @@ sub _d {
# ###########################################################################
# HTTP::Micro package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/HTTP/Micro.pm
# t/lib/HTTP/Micro.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package HTTP::Micro;
@@ -7114,7 +7129,7 @@ sub _split_url {
or die(qq/SSL certificate not valid for $host\n/);
}
}
$self->{host} = $host;
$self->{port} = $port;
@@ -7538,10 +7553,10 @@ if ( $INC{"IO/Socket/SSL.pm"} ) {
# ###########################################################################
# VersionCheck package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/VersionCheck.pm
# t/lib/VersionCheck.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package VersionCheck;
@@ -7589,7 +7604,7 @@ my @vc_dirs = (
}
PTDEBUG && _d('Version check file', $file, 'in', $ENV{PWD});
return $file; # in the CWD
}
}
}
sub version_check_time_limit {
@@ -7606,11 +7621,11 @@ sub version_check {
PTDEBUG && _d('FindBin::Bin:', $FindBin::Bin);
if ( !$args{force} ) {
if ( $FindBin::Bin
&& (-d "$FindBin::Bin/../.bzr" ||
&& (-d "$FindBin::Bin/../.bzr" ||
-d "$FindBin::Bin/../../.bzr" ||
-d "$FindBin::Bin/../.git" ||
-d "$FindBin::Bin/../../.git"
)
-d "$FindBin::Bin/../.git" ||
-d "$FindBin::Bin/../../.git"
)
) {
PTDEBUG && _d("$FindBin::Bin/../.bzr disables --version-check");
return;
@@ -7634,7 +7649,7 @@ sub version_check {
PTDEBUG && _d(scalar @$instances_to_check, 'instances to check');
return unless @$instances_to_check;
my $protocol = 'https';
my $protocol = 'https';
eval { require IO::Socket::SSL; };
if ( $EVAL_ERROR ) {
PTDEBUG && _d($EVAL_ERROR);
@@ -7642,15 +7657,13 @@ sub version_check {
return;
}
PTDEBUG && _d('Using', $protocol);
my $url = $args{url} # testing
|| $ENV{PERCONA_VERSION_CHECK_URL} # testing
|| "$protocol://v.percona.com";
PTDEBUG && _d('API URL:', $url);
my $advice = pingback(
instances => $instances_to_check,
protocol => $protocol,
url => $url,
url => $args{url} # testing
|| $ENV{PERCONA_VERSION_CHECK_URL} # testing
|| "$protocol://v.percona.com",
);
if ( $advice ) {
PTDEBUG && _d('Advice:', Dumper($advice));
@@ -7808,17 +7821,12 @@ sub get_uuid {
my $filename = $ENV{"HOME"} . $uuid_file;
my $uuid = _generate_uuid();
my $fh;
eval {
open($fh, '>', $filename);
};
if (!$EVAL_ERROR) {
print $fh $uuid;
close $fh;
}
open(my $fh, '>', $filename) or die "Could not open file '$filename' $!";
print $fh $uuid;
close $fh;
return $uuid;
}
}
sub _generate_uuid {
return sprintf+($}="%04x")."$}-$}-$}-$}-".$}x3,map rand 65537,0..7;
@@ -7867,7 +7875,7 @@ sub pingback {
);
die "Failed to parse server requested programs: $response->{content}"
if !scalar keys %$items;
my $versions = get_versions(
items => $items,
instances => $instances,
@@ -7881,9 +7889,8 @@ sub pingback {
general_id => get_uuid(),
);
my $tool_name = $ENV{XTRABACKUP_VERSION} ? "Percona XtraBackup" : File::Basename::basename($0);
my $client_response = {
headers => { "X-Percona-Toolkit-Tool" => $tool_name },
headers => { "X-Percona-Toolkit-Tool" => File::Basename::basename($0) },
content => $client_content,
};
PTDEBUG && _d('Client response:', Dumper($client_response));
@@ -7966,7 +7973,6 @@ my %sub_for_type = (
perl_version => \&get_perl_version,
perl_module_version => \&get_perl_module_version,
mysql_variable => \&get_mysql_variable,
xtrabackup => \&get_xtrabackup_version,
);
sub valid_item {
@@ -8094,10 +8100,6 @@ sub get_perl_version {
return $version;
}
sub get_xtrabackup_version {
return $ENV{XTRABACKUP_VERSION};
}
sub get_perl_module_version {
my (%args) = @_;
my $item = $args{item};
@@ -8132,7 +8134,7 @@ sub get_from_mysql {
if ($item->{item} eq 'MySQL' && $item->{type} eq 'mysql_variable') {
@{$item->{vars}} = grep { $_ eq 'version' || $_ eq 'version_comment' } @{$item->{vars}};
}
my @versions;
my %version_for;
@@ -8175,10 +8177,10 @@ sub _d {
# ###########################################################################
# Percona::XtraDB::Cluster package
# This package is a copy without comments from the original. The original
# with comments and its test file can be found in the GitHub repository at,
# with comments and its test file can be found in the Bazaar repository at,
# lib/Percona/XtraDB/Cluster.pm
# t/lib/Percona/XtraDB/Cluster.t
# See https://github.com/percona/percona-toolkit for more information.
# See https://launchpad.net/percona-toolkit for more information.
# ###########################################################################
{
package Percona::XtraDB::Cluster;
@@ -8237,7 +8239,7 @@ sub find_cluster_nodes {
my $dp = $args{DSNParser};
my $make_cxn = $args{make_cxn};
my $sql = q{SHOW STATUS LIKE 'wsrep\_incoming\_addresses'};
PTDEBUG && _d($sql);
my (undef, $addresses) = $dbh->selectrow_array($sql);
@@ -8317,7 +8319,7 @@ sub autodetect_nodes {
my $new_nodes = [];
return $new_nodes unless @$nodes;
for my $node ( @$nodes ) {
my $nodes_found = $self->find_cluster_nodes(
dbh => $node->dbh(),
@@ -8349,12 +8351,12 @@ sub autodetect_nodes {
);
my @new_slave_nodes = grep { $self->is_cluster_node($_) } @$new_slaves;
my $slaves_of_slaves = $self->autodetect_nodes(
%args,
nodes => \@new_slave_nodes,
);
my @autodetected_nodes = ( @$new_nodes, @$new_slaves, @$slaves_of_slaves );
return \@autodetected_nodes;
}

459
bin/pt-table-checksum Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -46,13 +46,6 @@ Data, collected for PXC
"perconaxtradbclusterrestores",
"perconaxtradbclusters"
Summary, collected for PXC (available in file summary.txt)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
"pt-mysql-summary"
Individual files, collected for PXC
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -67,22 +60,6 @@ Individual files, collected for PXC
"var/lib/mysql/mysqld.post.processing.log",
"var/lib/mysql/auto.cnf"
Data, collected for MySQL
~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
"perconaservermysqlbackups",
"perconaservermysqlrestores",
"perconaservermysqls"
Summary, collected for MySQL (available in file summary.txt)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
"pt-mysql-summary"
Data, collected for MongoDB
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -92,90 +69,22 @@ Data, collected for MongoDB
"perconaservermongodbrestores",
"perconaservermongodbs"
Summary, collected for MongoDB (available in file summary.txt)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
"pt-mongodb-summary"
Data, collected for PostgreSQL
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
"perconapgclusters",
"pgclusters",
"pgpolicies",
"pgreplicas",
"pgtasks"
Summary, collected for PostgreSQL (available in file summary.txt)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
"pg_gather"
Usage
=====
``pt-k8s-debug-collector <flags>``
Supported Flags:
================
Flags:
``--resource``
``--resource` targeted custom resource name (default "pxc")``
Targeted custom resource name. Supported values:
``--namespace` targeted namespace. By default data will be collected from all namespaces``
* ``pxc`` - PXC
* ``psmdb`` - MongoDB
* ``pg`` - PostgreSQL
* ``ps`` - MySQL
* ``none`` - Collect only general Kubernetes data, do not collect anything specific to the particular operator).
Default: ``none``
``--namespace``
Targeted namespace. By default data will be collected from all namespaces
``--cluster``
Targeted cluster. By default data from all available clusters to be collected
``--kubeconfig``
Path to kubeconfig. Default configuration be used if none specified
``--forwardport``
Port to use when collecting database-specific summaries. By default, 3306 will be used for PXC and MySQL, 27017 for MongoDB, and 5432 for PostgreSQL
``--cluster` targeted pxc/psmdb cluster. By default data from all available clusters to be collected``
Requirements
============
- Installed, configured, and available in PATH ``kubectl``
- Installed, configured, and available in PATH ``pt-mysql-summary`` for PXC and MySQL
- Installed, configured, and available in PATH ``pt-mongodb-summary`` for MongoDB
Known Issues
============
On Kubernetes 1.21 - 1.24 warning is printed:
.. code-block:: bash
2022/12/15 17:43:16 Error: get resource podsecuritypolicies in namespace default: error: <nil>, stderr: Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
, stdout: apiVersion: v1
items: []
kind: List
metadata:
resourceVersion: ""
This warning is harmless and does not affect data collection. We will remove podsecuritypolicies once everyone upgrade to Kubernetes 1.25 or newer. Before that we advise to ignore this warning.
- Installed and configured ``kubectl``
- Installed and configured ``pt-mysql-summary``
- Installed and configured ``pt-mongodb-summary``

8
go.mod
View File

@@ -51,11 +51,11 @@ require (
github.com/xdg-go/stringprep v1.0.3 // indirect
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/net v0.4.0 // indirect
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/term v0.3.0 // indirect
golang.org/x/text v0.5.0 // indirect
golang.org/x/sys v0.1.0 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect

15
go.sum
View File

@@ -215,8 +215,8 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b h1:vI32FkLJNAWtGD4BwkThwEy6XS7ZLLMHkSkYfF8M0W0=
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -243,20 +243,19 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=

View File

@@ -100,7 +100,7 @@ sub get_slaves {
$slave_dsn->{p} = $o->get('slave-password');
PTDEBUG && _d("Slave password set");
}
push @$slaves, $make_cxn->(dsn => $slave_dsn, dbh => $dbh, parent => $parent);
push @$slaves, $make_cxn->(dsn => $slave_dsn, dbh => $dbh);
return;
},
}

View File

@@ -763,8 +763,7 @@ sub _get_bounds {
if ( defined $nibble->{lower_boundary}
&& defined $nibble->{upper_boundary} ) {
my $sth = $dbh->prepare($self->{resume_lb_sql});
#my @ub = split ',', $nibble->{upper_boundary};
my @ub = $self->{Quoter}->deserialize_list($nibble->{upper_boundary});
my @ub = split ',', $nibble->{upper_boundary};
PTDEBUG && _d($sth->{Statement}, 'params:', @ub);
$sth->execute(@ub);
$self->{next_lower} = $sth->fetchrow_arrayref();

View File

@@ -102,30 +102,18 @@ sub make_row_checksum {
$sep =~ s/'//g;
$sep ||= '#';
my @converted_cols;
for my $col(@{$cols->{select}}) {
my $colname = $col;
$colname =~ s/`//g;
my $type = $tbl_struct->{type_for}->{$colname} || '';
if ($type =~ m/^(CHAR|VARCHAR|BINARY|VARBINARY|BLOB|TEXT|ENUM|SET|JSON)$/i) {
push @converted_cols, "convert($col using utf8mb4)";
} else {
push @converted_cols, "$col";
}
}
# Add a bitmap of which nullable columns are NULL.
my @nulls = grep { $cols->{allowed}->{$_} } @{$tbl_struct->{null_cols}};
if ( @nulls ) {
my $bitmap = "CONCAT("
. join(', ', map { 'ISNULL(' . $q->quote($_) . ')' } @nulls)
. ")";
push @converted_cols, $bitmap;
push @{$cols->{select}}, $bitmap;
}
$query .= scalar @converted_cols > 1
? "$func(CONCAT_WS('$sep', " . join(', ', @converted_cols) . '))'
: "$func($converted_cols[0])";
$query .= @{$cols->{select}} > 1
? "$func(CONCAT_WS('$sep', " . join(', ', @{$cols->{select}}) . '))'
: "$func($cols->{select}->[0])";
}
else {
# As a special case, FNV1A_64/FNV_64 doesn't need its arguments

Binary file not shown.

View File

@@ -1,43 +0,0 @@
[client]
user = msandbox
password = msandbox
port = PORT
socket = /tmp/PORT/mysql_sandboxPORT.sock
[mysqld]
port = PORT
socket = /tmp/PORT/mysql_sandboxPORT.sock
pid-file = /tmp/PORT/data/mysql_sandboxPORT.pid
basedir = PERCONA_TOOLKIT_SANDBOX
datadir = /tmp/PORT/data
key_buffer_size = 16M
innodb_buffer_pool_size = 16M
innodb_data_home_dir = /tmp/PORT/data
innodb_log_group_home_dir = /tmp/PORT/data
innodb_data_file_path = ibdata1:10M:autoextend
innodb_log_file_size = 5M
log-bin = mysql-bin
relay_log = mysql-relay-bin
log_slave_updates
server-id = PORT
report-host = 127.0.0.1
report-port = PORT
log-error = /tmp/PORT/data/mysqld.log
innodb_lock_wait_timeout = 3
general_log
general_log_file = genlog
binlog_format = ROW
wsrep_provider = LIBGALERA
wsrep_cluster_address = CLUSTER_AD
wsrep_sst_receive_address = ADDR:RECEIVE_PRT
wsrep_node_incoming_address= ADDR:PORT
wsrep_slave_threads = 2
wsrep_cluster_name = CLUSTER_NAME
wsrep_provider_options = "gmcast.listen_addr=tcp://ADDR:LISTEN_PRT;"
wsrep_sst_method = xtrabackup-v2
wsrep_node_name = PORT
innodb_autoinc_lock_mode = 2
wsrep-replicate-myisam
pxc_strict_mode = DISABLED
pxc_encrypt_cluster_traffic = 0

View File

@@ -164,6 +164,7 @@ make_sandbox() {
$PERCONA_TOOLKIT_BRANCH/util/version_cmp $minor_version 5.7.5
if [ $? -eq 2 ]; then
echo "ALTER USER 'root'@'localhost' IDENTIFIED BY 'msandbox';" > $init_file
echo "CREATE USER IF NOT EXISTS 'msandbox'@'%';" > $init_file
echo "ALTER USER 'msandbox'@'%' IDENTIFIED BY 'msandbox' REQUIRE NONE PASSWORD EXPIRE DEFAULT ACCOUNT UNLOCK;" >> $init_file
echo "GRANT ALL PRIVILEGES ON *.* TO 'msandbox'@'%';" >> $init_file

View File

@@ -0,0 +1,84 @@
# Debug collector tool
Collects debug data (logs, resource statuses etc.) from a k8s/OpenShift cluster. Data is packed into the `cluster-dump.tar.gz` archive in the current working directory.
## Data that will be collected
### Data, collected for all resources
```
"pods",
"replicasets",
"deployments",
"statefulsets",
"replicationcontrollers",
"events",
"configmaps",
"cronjobs",
"jobs",
"podsecuritypolicies",
"poddisruptionbudgets",
"perconaxtradbbackups",
"perconaxtradbclusterbackups",
"perconaxtradbclusterrestores",
"perconaxtradbclusters",
"clusterrolebindings",
"clusterroles",
"rolebindings",
"roles",
"storageclasses",
"persistentvolumeclaims",
"persistentvolumes",
"modes",
"your-custom-resource" (depends on 'resource' flag)
```
### Data, collected for PXC
```
"perconaxtradbbackups",
"perconaxtradbclusterbackups",
"perconaxtradbclusterrestores",
"perconaxtradbclusters"
```
### Individual files, collected for PXC
```
"var/lib/mysql/mysqld-error.log",
"var/lib/mysql/innobackup.backup.log",
"var/lib/mysql/innobackup.move.log",
"var/lib/mysql/innobackup.prepare.log",
"var/lib/mysql/grastate.dat",
"var/lib/mysql/gvwstate.dat",
"var/lib/mysql/mysqld.post.processing.log",
"var/lib/mysql/auto.cnf"
```
### Data, collected for MongoDB
```
"perconaservermongodbbackups",
"perconaservermongodbrestores",
"perconaservermongodbs"
```
## Usage
`pt-k8s-debug-collector <flags>`
Flags:
`--resource` targeted custom resource name (default "pxc")
`--namespace` targeted namespace. By default, data will be collected from all namespaces
`--cluster` targeted pxc/psmdb cluster. By default, data from all available clusters to be collected
## Requirements
- Installed and configured 'kubectl'
- Installed and configured 'pt-mysql-summary'
- Installed and configured 'pt-mongodb-summary'

View File

@@ -1,181 +0,0 @@
.. _pt-k8s-debug-collector:
==================================
:program:`pt-k8s-debug-collector`
==================================
Collects debug data (logs, resource statuses etc.) from a k8s/OpenShift cluster. Data is packed into the ``cluster-dump.tar.gz`` archive in the current working directory.
Data that will be collected
===========================
.. code-block:: bash
"pods",
"replicasets",
"deployments",
"statefulsets",
"replicationcontrollers",
"events",
"configmaps",
"cronjobs",
"jobs",
"podsecuritypolicies",
"poddisruptionbudgets",
"perconaxtradbbackups",
"perconaxtradbclusterbackups",
"perconaxtradbclusterrestores",
"perconaxtradbclusters",
"clusterrolebindings",
"clusterroles",
"rolebindings",
"roles",
"storageclasses",
"persistentvolumeclaims",
"persistentvolumes",
"modes",
"your-custom-resource" (depends on 'resource' flag)
Data, collected for PXC
~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
"perconaxtradbbackups",
"perconaxtradbclusterbackups",
"perconaxtradbclusterrestores",
"perconaxtradbclusters"
Summary, collected for PXC (available in file summary.txt)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
"pt-mysql-summary"
Individual files, collected for PXC
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
"var/lib/mysql/mysqld-error.log",
"var/lib/mysql/innobackup.backup.log",
"var/lib/mysql/innobackup.move.log",
"var/lib/mysql/innobackup.prepare.log",
"var/lib/mysql/grastate.dat",
"var/lib/mysql/gvwstate.dat",
"var/lib/mysql/mysqld.post.processing.log",
"var/lib/mysql/auto.cnf"
Data, collected for MySQL
~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
"perconaservermysqlbackups",
"perconaservermysqlrestores",
"perconaservermysqls"
Summary, collected for MySQL (available in file summary.txt)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
"pt-mysql-summary"
Data, collected for MongoDB
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
"perconaservermongodbbackups",
"perconaservermongodbrestores",
"perconaservermongodbs"
Summary, collected for MongoDB (available in file summary.txt)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
"pt-mongodb-summary"
Data, collected for PostgreSQL
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
"perconapgclusters",
"pgclusters",
"pgpolicies",
"pgreplicas",
"pgtasks"
Summary, collected for PostgreSQL (available in file summary.txt)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
"pg_gather"
Usage
=====
``pt-k8s-debug-collector <flags>``
Supported Flags:
================
``--resource``
Targeted custom resource name. Supported values:
* ``pxc`` - PXC
* ``psmdb`` - MongoDB
* ``pg`` - PostgreSQL
* ``ps`` - MySQL
* ``none`` - Collect only general Kubernetes data, do not collect anything specific to the particular operator).
Default: ``none``
``--namespace``
Targeted namespace. By default data will be collected from all namespaces
``--cluster``
Targeted cluster. By default data from all available clusters to be collected
``--kubeconfig``
Path to kubeconfig. Default configuration be used if none specified
``--forwardport``
Port to use when collecting database-specific summaries. By default, 3306 will be used for PXC and MySQL, 27017 for MongoDB, and 5432 for PostgreSQL
Requirements
============
- Installed, configured, and available in PATH ``kubectl``
- Installed, configured, and available in PATH ``pt-mysql-summary`` for PXC and MySQL
- Installed, configured, and available in PATH ``pt-mongodb-summary`` for MongoDB
Known Issues
============
On Kubernetes 1.21 - 1.24 warning is printed:
.. code-block:: bash
2022/12/15 17:43:16 Error: get resource podsecuritypolicies in namespace default: error: <nil>, stderr: Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
, stdout: apiVersion: v1
items: []
kind: List
metadata:
resourceVersion: ""
This warning is harmless and does not affect data collection. We will remove podsecuritypolicies once everyone upgrade to Kubernetes 1.25 or newer. Before that we advise to ignore this warning.

View File

@@ -20,20 +20,18 @@ import (
// Dumper struct is for dumping cluster
type Dumper struct {
cmd string
kubeconfig string
resources []string
filePaths []string
namespace string
location string
errors string
mode int64
crType string
forwardport string
cmd string
resources []string
filePaths []string
namespace string
location string
errors string
mode int64
crType string
}
// New return new Dumper object
func New(location, namespace, resource string, kubeconfig string, forwardport string) Dumper {
func New(location, namespace, resource string) Dumper {
resources := []string{
"pods",
"replicasets",
@@ -56,8 +54,11 @@ func New(location, namespace, resource string, kubeconfig string, forwardport st
}
filePaths := make([]string, 0)
if len(resource) > 0 {
resources = append(resources, resource)
if resourceType(resource) == "pxc" {
resources = append(resources,
"perconaxtradbbackups",
"perconaxtradbclusterbackups",
"perconaxtradbclusterrestores",
"perconaxtradbclusters")
@@ -77,32 +78,16 @@ func New(location, namespace, resource string, kubeconfig string, forwardport st
"perconaservermongodbrestores",
"perconaservermongodbs",
)
} else if resourceType(resource) == "pg" {
resources = append(resources,
"perconapgclusters",
"pgclusters",
"pgpolicies",
"pgreplicas",
"pgtasks",
)
} else if resourceType(resource) == "ps" {
resources = append(resources,
"perconaservermysqlbackups",
"perconaservermysqlrestores",
"perconaservermysqls",
)
}
}
return Dumper{
cmd: "kubectl",
kubeconfig: kubeconfig,
resources: resources,
filePaths: filePaths,
location: "cluster-dump",
mode: int64(0o777),
namespace: namespace,
crType: resource,
forwardport: forwardport,
cmd: "kubectl",
resources: resources,
filePaths: filePaths,
location: "cluster-dump",
mode: int64(0o777),
namespace: namespace,
crType: resource,
}
}
@@ -202,14 +187,11 @@ func (d *Dumper) DumpCluster() error {
if len(pod.Labels) == 0 {
continue
}
location = filepath.Join(d.location, ns.Name, pod.Name, "/summary.txt")
location = filepath.Join(d.location, ns.Name, pod.Name, "/pt-summary.txt")
component := resourceType(d.crType)
if component == "psmdb" {
component = "mongod"
}
if component == "ps" {
component = "mysql"
}
if pod.Labels["app.kubernetes.io/instance"] != "" && pod.Labels["app.kubernetes.io/component"] != "" {
resource := "secret/" + pod.Labels["app.kubernetes.io/instance"] + "-" + pod.Labels["app.kubernetes.io/component"]
err = d.getResource(resource, ns.Name, true, tw)
@@ -217,27 +199,20 @@ func (d *Dumper) DumpCluster() error {
log.Printf("Error: get %s resource: %v", resource, err)
}
}
if pod.Labels["app.kubernetes.io/component"] == component ||
(component == "pg" && pod.Labels["pgo-pg-database"] == "true") {
var crName string
if component == "pg" {
crName = pod.Labels["pg-cluster"]
} else {
crName = pod.Labels["app.kubernetes.io/instance"]
}
if pod.Labels["app.kubernetes.io/component"] == component {
// Get summary
output, err = d.getPodSummary(resourceType(d.crType), pod.Name, crName, ns.Name, tw)
output, err = d.getPodSummary(resourceType(d.crType), pod.Name, pod.Labels["app.kubernetes.io/instance"], tw)
if err != nil {
d.logError(err.Error(), d.crType, pod.Name)
err = addToArchive(location, d.mode, []byte(err.Error()), tw)
if err != nil {
log.Printf("Error: create summary errors archive for pod %s in namespace %s: %v", pod.Name, ns.Name, err)
log.Printf("Error: create pt-summary errors archive for pod %s in namespace %s: %v", pod.Name, ns.Name, err)
}
} else {
err = addToArchive(location, d.mode, output, tw)
if err != nil {
d.logError(err.Error(), "create summary archive for pod "+pod.Name)
log.Printf("Error: create summary archive for pod %s: %v", pod.Name, err)
d.logError(err.Error(), "create pt-summary archive for pod "+pod.Name)
log.Printf("Error: create pt-summary archive for pod %s: %v", pod.Name, err)
}
}
@@ -272,7 +247,6 @@ func (d *Dumper) DumpCluster() error {
// runCmd run command (Dumper.cmd) with given args, return it output
func (d *Dumper) runCmd(args ...string) ([]byte, error) {
var outb, errb bytes.Buffer
args = append(args, "--kubeconfig", d.kubeconfig)
cmd := exec.Command(d.cmd, args...)
cmd.Stdout = &outb
cmd.Stderr = &errb
@@ -354,7 +328,7 @@ func (d *Dumper) getIndividualFiles(resource, namespace string, podName, path, l
return addToArchive(location+"/"+path, d.mode, output, tw)
}
func (d *Dumper) getPodSummary(resource, podName, crName string, namespace string, tw *tar.Writer) ([]byte, error) {
func (d *Dumper) getPodSummary(resource, podName, crName string, tw *tar.Writer) ([]byte, error) {
var (
summCmdName string
ports string
@@ -362,86 +336,33 @@ func (d *Dumper) getPodSummary(resource, podName, crName string, namespace strin
)
switch resource {
case "ps":
fallthrough
case "pxc":
var pass, port string
if d.forwardport != "" {
port = d.forwardport
} else {
port = "3306"
}
cr, err := d.getCR(resource+"/"+crName, namespace)
cr, err := d.getCR("pxc/" + crName)
if err != nil {
return nil, errors.Wrap(err, "get cr")
}
if cr.Spec.SecretName != "" {
pass, err = d.getDataFromSecret(cr.Spec.SecretName, "root", namespace)
} else {
pass, err = d.getDataFromSecret(crName+"-secrets", "root", namespace)
}
pass, err := d.getDataFromSecret(cr.Spec.SecretName, "root")
if err != nil {
return nil, errors.Wrap(err, "get password from pxc users secret")
}
ports = port + ":3306"
ports = "3306:3306"
summCmdName = "pt-mysql-summary"
summCmdArgs = []string{"--host=127.0.0.1", "--port=" + port, "--user=root", "--password=" + string(pass)}
case "pg":
var user, pass, port string
if d.forwardport != "" {
port = d.forwardport
} else {
port = "5432"
}
cr, err := d.getCR("pgclusters", namespace)
if err != nil {
return nil, errors.Wrap(err, "get cr")
}
if cr.Spec.SecretName != "" {
user, err = d.getDataFromSecret(cr.Spec.SecretName, "username", namespace)
} else {
user, err = d.getDataFromSecret(crName+"-postgres-secret", "username", namespace)
}
if err != nil {
return nil, errors.Wrap(err, "get user from PostgreSQL users secret")
}
if cr.Spec.SecretName != "" {
pass, err = d.getDataFromSecret(cr.Spec.SecretName, "password", namespace)
} else {
pass, err = d.getDataFromSecret(crName+"-postgres-secret", "password", namespace)
}
if err != nil {
return nil, errors.Wrap(err, "get password from PostgreSQL users secret")
}
ports = port + ":5432"
summCmdName = "sh"
summCmdArgs = []string{"-c", "curl https://raw.githubusercontent.com/percona/support-snippets/master/postgresql/pg_gather/gather.sql" +
" 2>/dev/null | PGPASSWORD=" + string(pass) + " psql -X --host=127.0.0.1 --port=" + port + " --user=" + user}
summCmdArgs = []string{"--host=127.0.0.1", "--port=3306", "--user=root", "--password=" + string(pass)}
case "psmdb":
var port string
if d.forwardport != "" {
port = d.forwardport
} else {
port = "27017"
}
cr, err := d.getCR("psmdb/"+crName, namespace)
cr, err := d.getCR("psmdb/" + crName)
if err != nil {
return nil, errors.Wrap(err, "get cr")
}
user, err := d.getDataFromSecret(cr.Spec.Secrets.Users, "MONGODB_DATABASE_ADMIN_USER", namespace)
pass, err := d.getDataFromSecret(cr.Spec.Secrets.Users, "MONGODB_CLUSTER_ADMIN_PASSWORD")
if err != nil {
return nil, errors.Wrap(err, "get password from psmdb users secret")
}
pass, err := d.getDataFromSecret(cr.Spec.Secrets.Users, "MONGODB_DATABASE_ADMIN_PASSWORD", namespace)
if err != nil {
return nil, errors.Wrap(err, "get password from psmdb users secret")
}
ports = port + ":27017"
ports = "27017:27017"
summCmdName = "pt-mongodb-summary"
summCmdArgs = []string{"--username=" + user, "--password=" + pass, "--authenticationDatabase=admin", "127.0.0.1:" + port}
summCmdArgs = []string{"--username=clusterAdmin", "--password=" + pass, "--authenticationDatabase=admin", "127.0.0.1:27017"}
}
cmdPortFwd := exec.Command(d.cmd, "port-forward", "pod/"+podName, ports, "-n", namespace, "--kubeconfig", d.kubeconfig)
cmdPortFwd := exec.Command(d.cmd, "port-forward", "pod/"+podName, ports)
go func() {
err := cmdPortFwd.Run()
if err != nil {
@@ -469,22 +390,22 @@ func (d *Dumper) getPodSummary(resource, podName, crName string, namespace strin
return []byte(fmt.Sprintf("stderr: %s, stdout: %s", errb.String(), outb.String())), nil
}
func (d *Dumper) getCR(crName string, namespace string) (crSecrets, error) {
func (d *Dumper) getCR(crName string) (crSecrets, error) {
var cr crSecrets
output, err := d.runCmd("get", crName, "-o", "json", "-n", namespace)
output, err := d.runCmd("get", crName, "-o", "json")
if err != nil {
return cr, errors.Wrap(err, "get "+crName)
}
err = json.Unmarshal(output, &cr)
if err != nil {
return cr, errors.Wrap(err, "unmarshal "+crName+" cr")
return cr, errors.Wrap(err, "unmarshal psmdb cr")
}
return cr, nil
}
func (d *Dumper) getDataFromSecret(secretName, dataName string, namespace string) (string, error) {
passEncoded, err := d.runCmd("get", "secrets/"+secretName, "--template={{.data."+dataName+"}}", "-n", namespace)
func (d *Dumper) getDataFromSecret(secretName, dataName string) (string, error) {
passEncoded, err := d.runCmd("get", "secrets/"+secretName, "--template={{.data."+dataName+"}}")
if err != nil {
return "", errors.Wrap(err, "run get secret cmd")
}
@@ -501,10 +422,6 @@ func resourceType(s string) string {
return "pxc"
} else if s == "psmdb" || strings.HasPrefix(s, "psmdb/") {
return "psmdb"
} else if s == "pg" || strings.HasPrefix(s, "pg/") {
return "pg"
} else if s == "ps" || strings.HasPrefix(s, "ps/") {
return "ps"
}
return s
}

View File

@@ -12,21 +12,17 @@ func main() {
namespace := ""
resource := ""
clusterName := ""
kubeconfig := ""
forwardport := ""
flag.StringVar(&namespace, "namespace", "", "Namespace for collecting data. If empty data will be collected from all namespaces")
flag.StringVar(&resource, "resource", "none", "Collect data, specific to the resource. Supported values: pxc, psmdb, pg, ps, none")
flag.StringVar(&resource, "resource", "pxc", "Resource name. Default value - 'pxc'")
flag.StringVar(&clusterName, "cluster", "", "Cluster name")
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to kubeconfig")
flag.StringVar(&forwardport, "forwardport", "", "Port to use for port forwarding")
flag.Parse()
if len(clusterName) > 0 {
resource += "/" + clusterName
}
d := dumper.New("", namespace, resource, kubeconfig, forwardport)
d := dumper.New("", namespace, resource)
log.Println("Start collecting cluster data")
err := d.DumpCluster()

View File

@@ -2,7 +2,6 @@ package main
import (
"bytes"
"os"
"os/exec"
"path"
"strings"
@@ -11,33 +10,11 @@ import (
"golang.org/x/exp/slices"
)
/*
This test requires:
- Running K8 Operator installation
- kubectl configuration files, one for each supported operator
-- KUBECONFIG_PXC for K8SPXC
-- KUBECONFIG_PS for K8SPS
-- KUBECONFIG_PSMDB for K8SPSMDB
-- KUBECONFIG_PG for K8SPG
You can additionally set option FORWARDPORT if you want to use custom port when testing summaries.
pt-mysql-summary and pt-mongodb-summary must be in the PATH.
Since running pt-k8s-debug-collector may take long time run go test with increase timeout:
go test -timeout 6000s
We do not explicitly test --kubeconfig and --forwardport options, because they are used in other tests.
*/
/*
Tests collection of the individual files by pt-k8s-debug-collector.
Requires running K8SPXC instance and kubectl, configured to access that instance by default.
*/
func TestIndividualFiles(t *testing.T) {
if os.Getenv("KUBECONFIG_PXC") == "" {
t.Skip("TestIndividualFiles requires K8SPXC")
}
tests := []struct {
name string
cmd []string
@@ -79,7 +56,7 @@ func TestIndividualFiles(t *testing.T) {
},
}
cmd := exec.Command("../../../bin/pt-k8s-debug-collector", "--kubeconfig", os.Getenv("KUBECONFIG_PXC"), "--forwardport", os.Getenv("FORWARDPORT"), "--resource", "pxc")
cmd := exec.Command("../../../bin/pt-k8s-debug-collector")
if err := cmd.Run(); err != nil {
t.Errorf("error executing pt-k8s-debug-collector: %s", err.Error())
}
@@ -100,61 +77,3 @@ func TestIndividualFiles(t *testing.T) {
}
}
}
/*
Tests for supported values of the --resource option
*/
func TestResourceOption(t *testing.T) {
testcmd := []string{"sh", "-c", "tar -tf cluster-dump.tar.gz --wildcards '*/summary.txt' 2>/dev/null | wc -l"}
tests := []struct {
name string
want string
kubeconfig string
}{
{
name: "none",
want: "0",
kubeconfig: "",
},
{
name: "pxc",
want: "3",
kubeconfig: os.Getenv("KUBECONFIG_PXC"),
},
{
name: "ps",
want: "3",
kubeconfig: os.Getenv("KUBECONFIG_PS"),
},
{
name: "psmdb",
want: "3",
kubeconfig: os.Getenv("KUBECONFIG_PSMDB"),
},
{
name: "pg",
want: "3",
kubeconfig: os.Getenv("KUBECONFIG_PG"),
},
}
for _, test := range tests {
cmd := exec.Command("../../../bin/pt-k8s-debug-collector", "--kubeconfig", test.kubeconfig, "--forwardport", os.Getenv("FORWARDPORT"), "--resource", test.name)
if err := cmd.Run(); err != nil {
t.Errorf("error executing pt-k8s-debug-collector: %s", err.Error())
}
defer func() {
cmd = exec.Command("rm", "-f", "cluster-dump.tar.gz")
if err := cmd.Run(); err != nil {
t.Errorf("error cleaning up test data: %s", err.Error())
}
}()
out, err := exec.Command(testcmd[0], testcmd[1:]...).Output()
if err != nil {
t.Errorf("test %s, error running command %s:\n%s\n\nCommand output:\n%s", test.name, testcmd, err.Error(), out)
}
if strings.TrimRight(bytes.NewBuffer(out).String(), "\n") != test.want {
t.Errorf("test %s, output is not as expected\nOutput: %s\nWanted: %s", test.name, out, test.want)
}
}
}

View File

@@ -69,16 +69,11 @@ sub reset_repl_db {
# ############################################################################
# 1
# We need to remove mysql.plugin and percona_test.checksums tables from the
# result and the sample, because they have different number of rows than default
# if run test with enabled MyRocks or TokuDB SE
ok(
no_diff(
sub { pt_table_checksum::main(@args) },
"$sample/default-results-$sandbox_version.txt",
sed_out => '\'/mysql.plugin$/d; /percona_test.checksums$/d\'',
post_pipe => 'sed \'/mysql.plugin$/d; /percona_test.checksums$/d\' | ' .
'awk \'{print $2 " " $3 " " $4 " " $7 " " $9}\'',
post_pipe => 'awk \'{print $2 " " $3 " " $4 " " $7 " " $9}\'',
),
"Default checksum"
);
@@ -93,23 +88,18 @@ my $max_chunks = $sandbox_version < '5.7' ? 60 : 100;
ok(
$row->[0] > 25 && $row->[0] < $max_chunks,
"Between 25 and $max_chunks chunks"
'Between 25 and 60 chunks'
) or diag($row->[0]);
# ############################################################################
# Static chunk size (disable --chunk-time)
# ############################################################################
# 3
# We need to remove mysql.plugin and percona_test.checksums tables from the
# result and the sample, because they have different number of rows than default
# if run test with enabled MyRocks or TokuDB SE
ok(
no_diff(
sub { pt_table_checksum::main(@args, qw(--chunk-time 0 --ignore-databases mysql)) },
"$sample/static-chunk-size-results-$sandbox_version.txt",
sed_out => '\'/mysql.plugin$/d; /percona_test.checksums$/d\'',
post_pipe => 'sed \'/mysql.plugin$/d; /percona_test.checksums$/d\' | ' .
'awk \'{print $2 " " $3 " " $4 " " $6 " " $7 " " $9}\'',
post_pipe => 'awk \'{print $2 " " $3 " " $4 " " $6 " " $7 " " $9}\'',
),
"Static chunk size (--chunk-time 0)"
);

View File

@@ -30,6 +30,10 @@ elsif ( !$slave1_dbh ) {
plan skip_all => 'Cannot connect to sandbox slave';
}
if ($sandbox_version ge '8.0') {
plan skip_all => "TODO master master sandbox is failing with MySQL 8.0+. FIX ME !!!!";
}
# The sandbox servers run with lock_wait_timeout=3 and it's not dynamic
# so we need to specify --set-vars innodb_lock_wait_timeout=3 else the tool will die.
# And --max-load "" prevents waiting for status variables.

View File

@@ -32,13 +32,10 @@ my $sample = "t/pt-table-checksum/samples/";
my $row;
my $output;
$sb->create_dbs($master_dbh, [qw(test)]);
eval { $master_dbh->do('DROP FUNCTION IF EXISTS fnv_64'); };
eval { $master_dbh->do("CREATE FUNCTION fnv_64 RETURNS INTEGER SONAME 'libfnv_udf.so';"); };
eval { $master_dbh->do('DROP FUNCTION test.fnv_64'); };
eval { $master_dbh->do("CREATE FUNCTION fnv_64 RETURNS INTEGER SONAME 'fnv_udf.so';"); };
if ( $EVAL_ERROR ) {
#REMOVEME
print $EVAL_ERROR;
chomp $EVAL_ERROR;
plan skip_all => "No FNV_64 UDF lib"
}
@@ -46,6 +43,8 @@ else {
plan tests => 7;
}
$sb->create_dbs($master_dbh, [qw(test)]);
# ############################################################################
# First test the the FNV function works in MySQL and gives the correct results.
# ############################################################################

View File

@@ -32,7 +32,7 @@ $sb->load_file('master', 't/pt-table-checksum/samples/issue_1485195.sql');
# so we need to specify --set-vars innodb_lock_wait_timeout=3 else the tool will die.
# And --max-load "" prevents waiting for status variables.
my $master_dsn = 'h=127.1,P=12345,u=msandbox,p=msandbox,D=my_binary_database';
my @args = ($master_dsn, qw(--replicate my_binary_database.my_table -t percona_test.checksums));
my @args = ($master_dsn, qw(--replicate my_binary_database.my_table -d percona_test));
my $output;
$output = output(
@@ -40,13 +40,9 @@ $output = output(
stderr => 1,
);
# We do not count these tables by default, because their presense depends from
# previously running tests
my $extra_tables = $dbh->selectrow_arrayref("select count(*) from percona_test.checksums where db_tbl in ('mysql.plugin', 'mysql.func', 'mysql.proxies_priv');")->[0];
is(
PerconaTest::count_checksum_results($output, 'rows'),
$sandbox_version ge '8.0' ? 27 + $extra_tables : $sandbox_version lt '5.7' ? 24 : 23 + $extra_tables,
$sandbox_version ge '8.0' ? 29 : $sandbox_version lt '5.7' ? 24 : 25,
"Large BLOB/TEXT/BINARY Checksum"
);

View File

@@ -21,9 +21,8 @@ my $master_dbh = $sb->get_dbh_for('master');
if ( !$master_dbh ) {
plan skip_all => 'Cannot connect to sandbox master';
} elsif ($sandbox_version >= '8.0') {
plan skip_all => "8.0 requires fix for https://jira.percona.com/browse/PT-1805";
} else {
}
else {
plan tests => 3;
}

View File

@@ -21,13 +21,11 @@ my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
my $dbh = $sb->get_dbh_for('master');
my $sb_version = VersionParser->new($dbh);
my $rows = $dbh->selectall_hashref("SHOW VARIABLES LIKE '%version%'", ['variable_name']);
my $remove_plugin = 0;
if ( !$dbh ) {
plan skip_all => 'Cannot connect to sandbox master';
} elsif ( $sb_version < '5.7.21' || $sb_version >= '8.0' ||
!($rows->{version_comment}->{value} =~ m/percona server/i) ) {
plan skip_all => 'This test file needs 5.7 Percona Server, starting from 5.7.21';
} elsif ( $sb_version < '5.7.21' || !($rows->{version_comment}->{value} =~ m/percona server/i) ) {
plan skip_all => 'This test file needs Percona Server 5.7.21.21+';
} else {
plan tests => 3;
}
@@ -37,7 +35,6 @@ eval {
};
if ($EVAL_ERROR) {
$sb->load_file('master', 't/pt-table-checksum/samples/pt-131.sql');
$remove_plugin = 1;
}
# The sandbox servers run with lock_wait_timeout=3 and it is not dynamic
# so we need to specify --set-vars innodb_lock_wait_timeout=3 else the tool will die.
@@ -66,9 +63,6 @@ delete $ENV{PTDEBUG};
# #############################################################################
# Done.
# #############################################################################
if ($remove_plugin) {
$sb->load_file('master', 't/pt-table-checksum/samples/pt-131-wipe.sql');
}
$sb->wipe_clean($dbh);
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
exit;

View File

@@ -11,10 +11,6 @@ use warnings FATAL => 'all';
use English qw(-no_match_vars);
use Test::More;
if ( !$ENV{SLOW_TESTS} ) {
plan skip_all => "pt-table-checksum/replication_filters.t is one of the top slowest files; set SLOW_TESTS=1 to enable it.";
}
use PerconaTest;
use Sandbox;
use SqlModes;

View File

@@ -70,11 +70,9 @@ like(
stderr => 1,
);
my $return_code = ($sandbox_version >= '5.7') ? 16 : 0;
is(
$exit_status,
$return_code,
0,
"PT-204 Starting checksum since RocksDB table was skipped with --ignore-tables",
);
@@ -98,7 +96,7 @@ unlike(
is(
$exit_status,
$return_code,
0,
"PT-204 Starting checksum since RocksDB table was skipped with --ignore-engines",
);

View File

@@ -28,6 +28,7 @@ else {
}
diag("loading samples");
#$sb->load_file('master', 't/pt-table-checksum/samples/pt-226.sql');
$sb->load_file('master', 't/pt-table-checksum/samples/pt-226.sql');
# The sandbox servers run with lock_wait_timeout=3 and it's not dynamic
@@ -54,8 +55,6 @@ $output = output(
stderr => 1,
);
#REMOVEME
diag($exit_status);
isnt(
$exit_status,
0,
@@ -64,7 +63,7 @@ isnt(
like(
$output,
qr/1\s+100\s+10\s+1\s+0\s+.*test.joinit/,
qr/1\s+100\s+0\s+1\s+0\s+.*test.joinit/,
"PT-226 table joinit has differences",
);

View File

@@ -27,14 +27,9 @@ my $ip = qr/\Q127.1\E|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/;
my $dp = new DSNParser(opts=>$dsn_opts);
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
my $node1 = $sb->get_dbh_for('node1');
my $sb_version = VersionParser->new($node1);
my $node2 = $sb->get_dbh_for('node2');
my $node3 = $sb->get_dbh_for('node3');
if ($sb_version >= '8.0') {
plan skip_all => 'Cannot run tests on PXC 8.0 until PT-1699 is fixed';
}
if ( !$node1 ) {
plan skip_all => 'Cannot connect to cluster node1';
}
@@ -98,8 +93,8 @@ ok (
);
for my $args (
["using recusion-method=dsn", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns", '--ignore-tables=mysql.proxies_priv'],
["using recursion-method=cluster", '--recursion-method', 'cluster', '--ignore-tables=mysql.proxies_priv']
["using recusion-method=dsn", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns"],
["using recursion-method=cluster", '--recursion-method', 'cluster']
)
{
my $test = shift @$args;
@@ -130,12 +125,14 @@ for my $args (
}
# Now really test checksumming a cluster. To create a diff we have to disable
# wsrep replication, so we can make a change on one node without
# the binlog. Although PXC doesn't need or use the binlog to communicate
# (it has its own broadcast-based protocol implemented via the Galera lib)
# it still respects sql_log_bin, so we can make a change on one node without
# affecting the others.
$sb->load_file('node1', "$sample/a-z.sql");
$node2->do("set wsrep_on=0");
$node2->do("set sql_log_bin=0");
$node2->do("update test.t set c='zebra' where c='z'");
$node2->do("set wsrep_on=1");
$node2->do("set sql_log_bin=1");
my ($row) = $node2->selectrow_array("select c from test.t order by c desc limit 1");
is(
@@ -191,8 +188,8 @@ sub test_recursion_methods {
}
for my $args (
["using recusion-method=dsn", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns", '--ignore-tables=mysql.proxies_priv'],
["using recursion-method=cluster", '--recursion-method', 'cluster', '--ignore-tables=mysql.proxies_priv']
["using recusion-method=dsn", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns"],
["using recursion-method=cluster", '--recursion-method', 'cluster']
)
{
my $test = shift @$args;
@@ -230,7 +227,6 @@ sub test_recursion_methods {
0\s+ # errors
1\s+ # diffs
26\s+ # rows
0\s+ # diff_rows
\d+\s+ # chunks
0\s+ # skipped
\S+\s+ # time
@@ -268,16 +264,16 @@ my ($slave_dbh, $slave_dsn) = $sb->start_sandbox(
server => 'cslave1',
type => 'slave',
master => 'node1',
env => q/BINLOG_FORMAT="ROW"/,
env => q/FORK="pxc" BINLOG_FORMAT="ROW"/,
);
# Add the slave to the DSN table.
$node1->do(qq/INSERT INTO dsns.dsns VALUES (4, 3, '$slave_dsn')/);
# Fix what we changed earlier on node2 so the cluster is consistent.
$node2->do("set wsrep_on=0");
$node2->do("set sql_log_bin=0");
$node2->do("update test.t set c='z' where c='zebra'");
$node2->do("set wsrep_on=1");
$node2->do("set sql_log_bin=1");
# Wait for the slave to apply the binlogs from node1 (its master).
# Then change it so it's not consistent.
@@ -291,8 +287,8 @@ $slave_dbh->do("update test.t set c='zebra' where c='z'");
# Cluster nodes default to ROW format because that's what Galeara
# works best with, even though it doesn't really use binlogs.
for my $args (
["using recusion-method=dsn", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns", '--ignore-tables=mysql.user'],
["using recursion-method=cluster,hosts", '--recursion-method', 'cluster,hosts', '--ignore-tables=mysql.user']
["using recusion-method=dsn", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns"],
["using recursion-method=cluster,hosts", '--recursion-method', 'cluster,hosts']
)
{
my $test = shift @$args;
@@ -351,7 +347,7 @@ $sb->stop_sandbox('cslave1');
server => 'cslave1',
type => 'slave',
master => 'node2',
env => q/BINLOG_FORMAT="ROW"/,
env => q/FORK="pxc" BINLOG_FORMAT="ROW"/,
);
# Wait for the slave to apply the binlogs from node2 (its master).
@@ -368,8 +364,8 @@ is(
);
for my $args (
["using recusion-method=dsn", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns", '--ignore-tables=mysql.user'],
["using recursion-method=cluster,hosts", '--recursion-method', 'cluster,hosts', '--ignore-tables=mysql.user']
["using recusion-method=dsn", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns"],
["using recursion-method=cluster,hosts", '--recursion-method', 'cluster,hosts']
)
{
my $test = shift @$args;
@@ -408,7 +404,7 @@ $node1->do(qq/DELETE FROM dsns.dsns WHERE id=4/);
my ($master_dbh, $master_dsn) = $sb->start_sandbox(
server => 'cmaster',
type => 'master',
env => q/BINLOG_FORMAT="ROW"/,
env => q/FORK="pxc" BINLOG_FORMAT="ROW"/,
);
# Since master is new, node1 shouldn't have binlog to replay.
@@ -470,9 +466,9 @@ is(
# Make a diff on node1. If ptc is really auto-detecting node1, then it
# should report this diff.
$node1->do("set wsrep_on=0");
$node1->do("set sql_log_bin=0");
$node1->do("update test.t set c='zebra' where c='z'");
$node1->do("set wsrep_on=1");
$node1->do("set sql_log_bin=1");
$output = output(
sub { pt_table_checksum::main($master_dsn,
@@ -506,7 +502,6 @@ like(
0\s+ # errors
1\s+ # diffs
26\s+ # rows
0\s+ # diff_rows
\d+\s+ # chunks
0\s+ # skipped
\S+\s+ # time
@@ -553,7 +548,6 @@ like(
0\s+ # errors
1\s+ # diffs
26\s+ # rows
0\s+ # diff_rows
\d+\s+ # chunks
0\s+ # skipped
\S+\s+ # time
@@ -581,9 +575,9 @@ for my $args (
# Make a diff on node1. If ptc is really auto-detecting node1, then it
# should report this diff.
$node1->do("set wsrep_on=0");
$node1->do("set sql_log_bin=0");
$node1->do("update test.t set c='zebra' where c='z'");
$node1->do("set wsrep_on=1");
$node1->do("set sql_log_bin=1");
$output = output(
sub { pt_table_checksum::main($master_dsn,
@@ -606,7 +600,6 @@ for my $args (
0\s+ # errors
1\s+ # diffs
26\s+ # rows
0\s+ # diff_rows
\d+\s+ # chunks
0\s+ # skipped
\S+\s+ # time
@@ -626,13 +619,13 @@ for my $args (
# to node1, node1 isn't different, so it broadcasts the result in ROW format
# that all is ok, which node2 gets and thus false reports. This is why
# those ^ warnings exist.
$node1->do("set wsrep_on=0");
$node1->do("set sql_log_bin=0");
$node1->do("update test.t set c='z' where c='zebra'");
$node1->do("set wsrep_on=1");
$node1->do("set sql_log_bin=1");
$node2->do("set wsrep_on=0");
$node2->do("set sql_log_bin=0");
$node2->do("update test.t set c='zebra' where c='z'");
$node2->do("set wsrep_on=1");
$node2->do("set sql_log_bin=1");
($row) = $node2->selectrow_array("select c from test.t order by c desc limit 1");
is(

View File

@@ -31,7 +31,6 @@ use Data::Dumper;
my $dp = new DSNParser(opts=>$dsn_opts);
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
my $master_dbh = $sb->get_dbh_for('master');
my $sb_version = VersionParser->new($master_dbh);
my $slave1_dbh = $sb->get_dbh_for('slave1');
my $slave2_dbh = $sb->get_dbh_for('slave2');
@@ -283,7 +282,7 @@ elsif ( -x "$ENV{PERCONA_TOOLKIT_SANDBOX}/bin/mysqlbinlog" ) {
$mysqlbinlog = "$ENV{PERCONA_TOOLKIT_SANDBOX}/bin/mysqlbinlog";
}
$output = `$mysqlbinlog /tmp/12345/data/$row->{file} --start-position=$row->{position} | grep 'use ' | grep -v '^# Warning' | grep -v 'pseudo_replica_mode' | sort -u | sed -e 's/\`//g'`;
$output = `$mysqlbinlog /tmp/12345/data/$row->{file} --start-position=$row->{position} | grep 'use ' | grep -v '^# Warning' | sort -u | sed -e 's/\`//g'`;
my $use_dbs = "use mysql/*!*/;
use percona/*!*/;
@@ -291,10 +290,6 @@ use percona_test/*!*/;
use sakila/*!*/;
";
if ($sb_version >= '5.7') {
$use_dbs .= "use sys/*!*/;\n";
}
is(
$output,
$use_dbs,
@@ -306,7 +301,7 @@ $row = $master_dbh->selectrow_hashref('show master status');
pt_table_checksum::main(@args, qw(--quiet --replicate-database percona));
$output = `$mysqlbinlog /tmp/12345/data/$row->{file} --start-position=$row->{position} | grep 'use ' | grep -v '^# Warning' | grep -v 'pseudo_replica_mode' | sort -u | sed -e 's/\`//g'`;
$output = `$mysqlbinlog /tmp/12345/data/$row->{file} --start-position=$row->{position} | grep 'use ' | grep -v '^# Warning' | sort -u | sed -e 's/\`//g'`;
is(
$output,

View File

@@ -6,10 +6,10 @@ ERRORS DIFFS ROWS SKIPPED TABLE
0 0 2 0 mysql.engine_cost
0 0 0 0 mysql.event
0 0 0 0 mysql.func
0 0 50 0 mysql.help_category
0 0 908 0 mysql.help_keyword
0 0 1803 0 mysql.help_relation
0 0 659 0 mysql.help_topic
0 0 40 0 mysql.help_category
0 0 682 0 mysql.help_keyword
0 0 1340 0 mysql.help_relation
0 0 637 0 mysql.help_topic
0 0 0 0 mysql.ndb_binlog_index
0 0 0 0 mysql.plugin
0 0 0 0 mysql.proc
@@ -23,7 +23,7 @@ ERRORS DIFFS ROWS SKIPPED TABLE
0 0 0 0 mysql.time_zone_name
0 0 0 0 mysql.time_zone_transition
0 0 0 0 mysql.time_zone_transition_type
0 0 4 0 mysql.user
0 0 2 0 mysql.user
0 0 23 0 percona_test.checksums
0 0 1 0 percona_test.sentinel
0 0 200 0 sakila.actor

View File

@@ -5,23 +5,19 @@ ERRORS DIFFS ROWS SKIPPED TABLE
0 0 0 0 mysql.component
0 0 2 0 mysql.db
0 0 0 0 mysql.default_roles
0 1 2 0 mysql.engine_cost
0 0 2 0 mysql.engine_cost
0 0 0 0 mysql.func
0 0 85 0 mysql.global_grants
0 0 53 0 mysql.help_category
0 0 985 0 mysql.help_keyword
0 0 2043 0 mysql.help_relation
0 0 701 0 mysql.help_topic
0 0 26 0 mysql.global_grants
0 0 40 0 mysql.help_category
0 0 695 0 mysql.help_keyword
0 0 1457 0 mysql.help_relation
0 0 644 0 mysql.help_topic
0 0 0 0 mysql.password_history
0 0 0 0 mysql.plugin
0 0 0 0 mysql.procs_priv
0 1 1 0 mysql.proxies_priv
0 0 0 0 mysql.replication_asynchronous_connection_failover
0 0 0 0 mysql.replication_asynchronous_connection_failover_managed
0 0 1 0 mysql.replication_group_configuration_version
0 0 2 0 mysql.replication_group_member_actions
0 0 1 0 mysql.proxies_priv
0 0 0 0 mysql.role_edges
0 1 6 0 mysql.server_cost
0 0 6 0 mysql.server_cost
0 0 0 0 mysql.servers
0 0 2 0 mysql.tables_priv
0 0 0 0 mysql.time_zone
@@ -29,8 +25,8 @@ ERRORS DIFFS ROWS SKIPPED TABLE
0 0 0 0 mysql.time_zone_name
0 0 0 0 mysql.time_zone_transition
0 0 0 0 mysql.time_zone_transition_type
0 1 5 0 mysql.user
0 0 29 0 percona_test.checksums
0 0 5 0 mysql.user
0 0 27 0 percona_test.checksums
0 0 1 0 percona_test.sentinel
0 0 200 0 sakila.actor
0 0 603 0 sakila.address

View File

@@ -1,10 +1,8 @@
Checking if all tables can be checksummed ...
Starting checksum ...
--
-- sakila.city
--
REPLACE INTO `percona`.`checksums` (db, tbl, chunk, chunk_index, lower_boundary, upper_boundary, this_cnt, this_crc) SELECT ?, ?, ?, ?, ?, ?, COUNT(*) AS cnt, COALESCE(LOWER(CONV(BIT_XOR(CAST(FNV_64(`city_id`, `city`, `country_id`, UNIX_TIMESTAMP(`last_update`)) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `sakila`.`city` FORCE INDEX(`PRIMARY`) WHERE ((`city_id` >= ?)) AND ((`city_id` <= ?)) /*checksum chunk*/
REPLACE INTO `percona`.`checksums` (db, tbl, chunk, chunk_index, lower_boundary, upper_boundary, this_cnt, this_crc) SELECT ?, ?, ?, ?, ?, ?, COUNT(*) AS cnt, COALESCE(LOWER(CONV(BIT_XOR(CAST(FNV_64(`city_id`, `city`, `country_id`, `last_update` + 0) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `sakila`.`city` FORCE INDEX(`PRIMARY`) WHERE ((`city_id` >= ?)) AND ((`city_id` <= ?)) /*checksum chunk*/
REPLACE INTO `percona`.`checksums` (db, tbl, chunk, chunk_index, lower_boundary, upper_boundary, this_cnt, this_crc) SELECT ?, ?, ?, ?, ?, ?, COUNT(*), '0' FROM `sakila`.`city` FORCE INDEX(`PRIMARY`) WHERE ((`city_id` < ?)) ORDER BY `city_id` /*past lower chunk*/
@@ -16,7 +14,7 @@ SELECT /*!40001 SQL_NO_CACHE */ `city_id` FROM `sakila`.`city` FORCE INDEX(`PRIM
-- sakila.film_actor
--
REPLACE INTO `percona`.`checksums` (db, tbl, chunk, chunk_index, lower_boundary, upper_boundary, this_cnt, this_crc) SELECT ?, ?, ?, ?, ?, ?, COUNT(*) AS cnt, COALESCE(LOWER(CONV(BIT_XOR(CAST(FNV_64(`actor_id`, `film_id`, UNIX_TIMESTAMP(`last_update`)) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `sakila`.`film_actor` FORCE INDEX(`PRIMARY`) WHERE ((`actor_id` > ?) OR (`actor_id` = ? AND `film_id` >= ?)) AND ((`actor_id` < ?) OR (`actor_id` = ? AND `film_id` <= ?)) /*checksum chunk*/
REPLACE INTO `percona`.`checksums` (db, tbl, chunk, chunk_index, lower_boundary, upper_boundary, this_cnt, this_crc) SELECT ?, ?, ?, ?, ?, ?, COUNT(*) AS cnt, COALESCE(LOWER(CONV(BIT_XOR(CAST(FNV_64(`actor_id`, `film_id`, `last_update` + 0) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `sakila`.`film_actor` FORCE INDEX(`PRIMARY`) WHERE ((`actor_id` > ?) OR (`actor_id` = ? AND `film_id` >= ?)) AND ((`actor_id` < ?) OR (`actor_id` = ? AND `film_id` <= ?)) /*checksum chunk*/
REPLACE INTO `percona`.`checksums` (db, tbl, chunk, chunk_index, lower_boundary, upper_boundary, this_cnt, this_crc) SELECT ?, ?, ?, ?, ?, ?, COUNT(*), '0' FROM `sakila`.`film_actor` FORCE INDEX(`PRIMARY`) WHERE ((`actor_id` < ?) OR (`actor_id` = ? AND `film_id` < ?)) ORDER BY `actor_id`, `film_id` /*past lower chunk*/

View File

@@ -1,13 +0,0 @@
-- See https://www.percona.com/doc/percona-server/LATEST/diagnostics/response_time_distribution.html
-- This plugin is used for gathering statistics.
UNINSTALL PLUGIN QUERY_RESPONSE_TIME_AUDIT;
-- This plugin provides the interface (QUERY_RESPONSE_TIME) to output gathered statistics.
UNINSTALL PLUGIN QUERY_RESPONSE_TIME;
-- This plugin provides the interface (QUERY_RESPONSE_TIME_READ) to output gathered statistics.
UNINSTALL PLUGIN QUERY_RESPONSE_TIME_READ;
-- This plugin provides the interface (QUERY_RESPONSE_TIME_WRITE) to output gathered statistics.
UNINSTALL PLUGIN QUERY_RESPONSE_TIME_WRITE;

View File

@@ -1,7 +1,7 @@
if all tables be checksummed
checksum ...
ERRORS DIFFS ROWS CHUNKS SKIPPED TABLE
0 0 29 1 0 percona_test.checksums
0 0 27 1 0 percona_test.checksums
0 0 1 1 0 percona_test.sentinel
0 0 200 1 0 sakila.actor
0 0 603 1 0 sakila.address

View File

@@ -45,7 +45,7 @@ $dbh->do("CREATE TABLE IF NOT EXISTS percona_test.load_data (i int)");
`echo 1 > /tmp/load_data_test.$$`;
eval {
$dbh->do("LOAD DATA LOCAL INFILE '/tmp/load_data_test.$$' INTO TABLE percona_test.load_data");
$dbh->do("LOAD DATA INFILE '/tmp/load_data_test.$$' INTO TABLE percona_test.load_data");
};
if ( $EVAL_ERROR ) {