mirror of
https://github.com/percona/percona-toolkit.git
synced 2025-09-06 12:27:56 +00:00
7648 lines
242 KiB
Perl
Executable File
7648 lines
242 KiB
Perl
Executable File
#!/usr/bin/env perl
|
|
|
|
# This program is part of Percona Toolkit: http://www.percona.com/software/
|
|
# See "COPYRIGHT, LICENSE, AND WARRANTY" at the end of this file for legal
|
|
# notices and disclaimers.
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
# ###########################################################################
|
|
# OptionParser package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/OptionParser.pm
|
|
# t/lib/OptionParser.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package OptionParser;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
use List::Util qw(max);
|
|
use Getopt::Long;
|
|
|
|
my $POD_link_re = '[LC]<"?([^">]+)"?>';
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
my @required_args = qw();
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
|
|
my ($program_name) = $PROGRAM_NAME =~ m/([.A-Za-z-]+)$/;
|
|
$program_name ||= $PROGRAM_NAME;
|
|
my $home = $ENV{HOME} || $ENV{HOMEPATH} || $ENV{USERPROFILE} || '.';
|
|
|
|
my %attributes = (
|
|
'type' => 1,
|
|
'short form' => 1,
|
|
'group' => 1,
|
|
'default' => 1,
|
|
'cumulative' => 1,
|
|
'negatable' => 1,
|
|
);
|
|
|
|
my $self = {
|
|
head1 => 'OPTIONS', # These args are used internally
|
|
skip_rules => 0, # to instantiate another Option-
|
|
item => '--(.*)', # Parser obj that parses the
|
|
attributes => \%attributes, # DSN OPTIONS section. Tools
|
|
parse_attributes => \&_parse_attribs, # don't tinker with these args.
|
|
|
|
%args,
|
|
|
|
strict => 1, # disabled by a special rule
|
|
program_name => $program_name,
|
|
opts => {},
|
|
got_opts => 0,
|
|
short_opts => {},
|
|
defaults => {},
|
|
groups => {},
|
|
allowed_groups => {},
|
|
errors => [],
|
|
rules => [], # desc of rules for --help
|
|
mutex => [], # rule: opts are mutually exclusive
|
|
atleast1 => [], # rule: at least one opt is required
|
|
disables => {}, # rule: opt disables other opts
|
|
defaults_to => {}, # rule: opt defaults to value of other opt
|
|
DSNParser => undef,
|
|
default_files => [
|
|
"/etc/percona-toolkit/percona-toolkit.conf",
|
|
"/etc/percona-toolkit/$program_name.conf",
|
|
"$home/.percona-toolkit.conf",
|
|
"$home/.$program_name.conf",
|
|
],
|
|
types => {
|
|
string => 's', # standard Getopt type
|
|
int => 'i', # standard Getopt type
|
|
float => 'f', # standard Getopt type
|
|
Hash => 'H', # hash, formed from a comma-separated list
|
|
hash => 'h', # hash as above, but only if a value is given
|
|
Array => 'A', # array, similar to Hash
|
|
array => 'a', # array, similar to hash
|
|
DSN => 'd', # DSN
|
|
size => 'z', # size with kMG suffix (powers of 2^10)
|
|
time => 'm', # time, with an optional suffix of s/h/m/d
|
|
},
|
|
};
|
|
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub get_specs {
|
|
my ( $self, $file ) = @_;
|
|
$file ||= $self->{file} || __FILE__;
|
|
my @specs = $self->_pod_to_specs($file);
|
|
$self->_parse_specs(@specs);
|
|
|
|
open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR";
|
|
my $contents = do { local $/ = undef; <$fh> };
|
|
close $fh;
|
|
if ( $contents =~ m/^=head1 DSN OPTIONS/m ) {
|
|
PTDEBUG && _d('Parsing DSN OPTIONS');
|
|
my $dsn_attribs = {
|
|
dsn => 1,
|
|
copy => 1,
|
|
};
|
|
my $parse_dsn_attribs = sub {
|
|
my ( $self, $option, $attribs ) = @_;
|
|
map {
|
|
my $val = $attribs->{$_};
|
|
if ( $val ) {
|
|
$val = $val eq 'yes' ? 1
|
|
: $val eq 'no' ? 0
|
|
: $val;
|
|
$attribs->{$_} = $val;
|
|
}
|
|
} keys %$attribs;
|
|
return {
|
|
key => $option,
|
|
%$attribs,
|
|
};
|
|
};
|
|
my $dsn_o = new OptionParser(
|
|
description => 'DSN OPTIONS',
|
|
head1 => 'DSN OPTIONS',
|
|
dsn => 0, # XXX don't infinitely recurse!
|
|
item => '\* (.)', # key opts are a single character
|
|
skip_rules => 1, # no rules before opts
|
|
attributes => $dsn_attribs,
|
|
parse_attributes => $parse_dsn_attribs,
|
|
);
|
|
my @dsn_opts = map {
|
|
my $opts = {
|
|
key => $_->{spec}->{key},
|
|
dsn => $_->{spec}->{dsn},
|
|
copy => $_->{spec}->{copy},
|
|
desc => $_->{desc},
|
|
};
|
|
$opts;
|
|
} $dsn_o->_pod_to_specs($file);
|
|
$self->{DSNParser} = DSNParser->new(opts => \@dsn_opts);
|
|
}
|
|
|
|
if ( $contents =~ m/^=head1 VERSION\n\n^(.+)$/m ) {
|
|
$self->{version} = $1;
|
|
PTDEBUG && _d($self->{version});
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub DSNParser {
|
|
my ( $self ) = @_;
|
|
return $self->{DSNParser};
|
|
};
|
|
|
|
sub get_defaults_files {
|
|
my ( $self ) = @_;
|
|
return @{$self->{default_files}};
|
|
}
|
|
|
|
sub _pod_to_specs {
|
|
my ( $self, $file ) = @_;
|
|
$file ||= $self->{file} || __FILE__;
|
|
open my $fh, '<', $file or die "Cannot open $file: $OS_ERROR";
|
|
|
|
my @specs = ();
|
|
my @rules = ();
|
|
my $para;
|
|
|
|
local $INPUT_RECORD_SEPARATOR = '';
|
|
while ( $para = <$fh> ) {
|
|
next unless $para =~ m/^=head1 $self->{head1}/;
|
|
last;
|
|
}
|
|
|
|
while ( $para = <$fh> ) {
|
|
last if $para =~ m/^=over/;
|
|
next if $self->{skip_rules};
|
|
chomp $para;
|
|
$para =~ s/\s+/ /g;
|
|
$para =~ s/$POD_link_re/$1/go;
|
|
PTDEBUG && _d('Option rule:', $para);
|
|
push @rules, $para;
|
|
}
|
|
|
|
die "POD has no $self->{head1} section" unless $para;
|
|
|
|
do {
|
|
if ( my ($option) = $para =~ m/^=item $self->{item}/ ) {
|
|
chomp $para;
|
|
PTDEBUG && _d($para);
|
|
my %attribs;
|
|
|
|
$para = <$fh>; # read next paragraph, possibly attributes
|
|
|
|
if ( $para =~ m/: / ) { # attributes
|
|
$para =~ s/\s+\Z//g;
|
|
%attribs = map {
|
|
my ( $attrib, $val) = split(/: /, $_);
|
|
die "Unrecognized attribute for --$option: $attrib"
|
|
unless $self->{attributes}->{$attrib};
|
|
($attrib, $val);
|
|
} split(/; /, $para);
|
|
if ( $attribs{'short form'} ) {
|
|
$attribs{'short form'} =~ s/-//;
|
|
}
|
|
$para = <$fh>; # read next paragraph, probably short help desc
|
|
}
|
|
else {
|
|
PTDEBUG && _d('Option has no attributes');
|
|
}
|
|
|
|
$para =~ s/\s+\Z//g;
|
|
$para =~ s/\s+/ /g;
|
|
$para =~ s/$POD_link_re/$1/go;
|
|
|
|
$para =~ s/\.(?:\n.*| [A-Z].*|\Z)//s;
|
|
PTDEBUG && _d('Short help:', $para);
|
|
|
|
die "No description after option spec $option" if $para =~ m/^=item/;
|
|
|
|
if ( my ($base_option) = $option =~ m/^\[no\](.*)/ ) {
|
|
$option = $base_option;
|
|
$attribs{'negatable'} = 1;
|
|
}
|
|
|
|
push @specs, {
|
|
spec => $self->{parse_attributes}->($self, $option, \%attribs),
|
|
desc => $para
|
|
. (defined $attribs{default} ? " (default $attribs{default})" : ''),
|
|
group => ($attribs{'group'} ? $attribs{'group'} : 'default'),
|
|
};
|
|
}
|
|
while ( $para = <$fh> ) {
|
|
last unless $para;
|
|
if ( $para =~ m/^=head1/ ) {
|
|
$para = undef; # Can't 'last' out of a do {} block.
|
|
last;
|
|
}
|
|
last if $para =~ m/^=item /;
|
|
}
|
|
} while ( $para );
|
|
|
|
die "No valid specs in $self->{head1}" unless @specs;
|
|
|
|
close $fh;
|
|
return @specs, @rules;
|
|
}
|
|
|
|
sub _parse_specs {
|
|
my ( $self, @specs ) = @_;
|
|
my %disables; # special rule that requires deferred checking
|
|
|
|
foreach my $opt ( @specs ) {
|
|
if ( ref $opt ) { # It's an option spec, not a rule.
|
|
PTDEBUG && _d('Parsing opt spec:',
|
|
map { ($_, '=>', $opt->{$_}) } keys %$opt);
|
|
|
|
my ( $long, $short ) = $opt->{spec} =~ m/^([\w-]+)(?:\|([^!+=]*))?/;
|
|
if ( !$long ) {
|
|
die "Cannot parse long option from spec $opt->{spec}";
|
|
}
|
|
$opt->{long} = $long;
|
|
|
|
die "Duplicate long option --$long" if exists $self->{opts}->{$long};
|
|
$self->{opts}->{$long} = $opt;
|
|
|
|
if ( length $long == 1 ) {
|
|
PTDEBUG && _d('Long opt', $long, 'looks like short opt');
|
|
$self->{short_opts}->{$long} = $long;
|
|
}
|
|
|
|
if ( $short ) {
|
|
die "Duplicate short option -$short"
|
|
if exists $self->{short_opts}->{$short};
|
|
$self->{short_opts}->{$short} = $long;
|
|
$opt->{short} = $short;
|
|
}
|
|
else {
|
|
$opt->{short} = undef;
|
|
}
|
|
|
|
$opt->{is_negatable} = $opt->{spec} =~ m/!/ ? 1 : 0;
|
|
$opt->{is_cumulative} = $opt->{spec} =~ m/\+/ ? 1 : 0;
|
|
$opt->{is_required} = $opt->{desc} =~ m/required/ ? 1 : 0;
|
|
|
|
$opt->{group} ||= 'default';
|
|
$self->{groups}->{ $opt->{group} }->{$long} = 1;
|
|
|
|
$opt->{value} = undef;
|
|
$opt->{got} = 0;
|
|
|
|
my ( $type ) = $opt->{spec} =~ m/=(.)/;
|
|
$opt->{type} = $type;
|
|
PTDEBUG && _d($long, 'type:', $type);
|
|
|
|
|
|
$opt->{spec} =~ s/=./=s/ if ( $type && $type =~ m/[HhAadzm]/ );
|
|
|
|
if ( (my ($def) = $opt->{desc} =~ m/default\b(?: ([^)]+))?/) ) {
|
|
$self->{defaults}->{$long} = defined $def ? $def : 1;
|
|
PTDEBUG && _d($long, 'default:', $def);
|
|
}
|
|
|
|
if ( $long eq 'config' ) {
|
|
$self->{defaults}->{$long} = join(',', $self->get_defaults_files());
|
|
}
|
|
|
|
if ( (my ($dis) = $opt->{desc} =~ m/(disables .*)/) ) {
|
|
$disables{$long} = $dis;
|
|
PTDEBUG && _d('Deferring check of disables rule for', $opt, $dis);
|
|
}
|
|
|
|
$self->{opts}->{$long} = $opt;
|
|
}
|
|
else { # It's an option rule, not a spec.
|
|
PTDEBUG && _d('Parsing rule:', $opt);
|
|
push @{$self->{rules}}, $opt;
|
|
my @participants = $self->_get_participants($opt);
|
|
my $rule_ok = 0;
|
|
|
|
if ( $opt =~ m/mutually exclusive|one and only one/ ) {
|
|
$rule_ok = 1;
|
|
push @{$self->{mutex}}, \@participants;
|
|
PTDEBUG && _d(@participants, 'are mutually exclusive');
|
|
}
|
|
if ( $opt =~ m/at least one|one and only one/ ) {
|
|
$rule_ok = 1;
|
|
push @{$self->{atleast1}}, \@participants;
|
|
PTDEBUG && _d(@participants, 'require at least one');
|
|
}
|
|
if ( $opt =~ m/default to/ ) {
|
|
$rule_ok = 1;
|
|
$self->{defaults_to}->{$participants[0]} = $participants[1];
|
|
PTDEBUG && _d($participants[0], 'defaults to', $participants[1]);
|
|
}
|
|
if ( $opt =~ m/restricted to option groups/ ) {
|
|
$rule_ok = 1;
|
|
my ($groups) = $opt =~ m/groups ([\w\s\,]+)/;
|
|
my @groups = split(',', $groups);
|
|
%{$self->{allowed_groups}->{$participants[0]}} = map {
|
|
s/\s+//;
|
|
$_ => 1;
|
|
} @groups;
|
|
}
|
|
if( $opt =~ m/accepts additional command-line arguments/ ) {
|
|
$rule_ok = 1;
|
|
$self->{strict} = 0;
|
|
PTDEBUG && _d("Strict mode disabled by rule");
|
|
}
|
|
|
|
die "Unrecognized option rule: $opt" unless $rule_ok;
|
|
}
|
|
}
|
|
|
|
foreach my $long ( keys %disables ) {
|
|
my @participants = $self->_get_participants($disables{$long});
|
|
$self->{disables}->{$long} = \@participants;
|
|
PTDEBUG && _d('Option', $long, 'disables', @participants);
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub _get_participants {
|
|
my ( $self, $str ) = @_;
|
|
my @participants;
|
|
foreach my $long ( $str =~ m/--(?:\[no\])?([\w-]+)/g ) {
|
|
die "Option --$long does not exist while processing rule $str"
|
|
unless exists $self->{opts}->{$long};
|
|
push @participants, $long;
|
|
}
|
|
PTDEBUG && _d('Participants for', $str, ':', @participants);
|
|
return @participants;
|
|
}
|
|
|
|
sub opts {
|
|
my ( $self ) = @_;
|
|
my %opts = %{$self->{opts}};
|
|
return %opts;
|
|
}
|
|
|
|
sub short_opts {
|
|
my ( $self ) = @_;
|
|
my %short_opts = %{$self->{short_opts}};
|
|
return %short_opts;
|
|
}
|
|
|
|
sub set_defaults {
|
|
my ( $self, %defaults ) = @_;
|
|
$self->{defaults} = {};
|
|
foreach my $long ( keys %defaults ) {
|
|
die "Cannot set default for nonexistent option $long"
|
|
unless exists $self->{opts}->{$long};
|
|
$self->{defaults}->{$long} = $defaults{$long};
|
|
PTDEBUG && _d('Default val for', $long, ':', $defaults{$long});
|
|
}
|
|
return;
|
|
}
|
|
|
|
sub get_defaults {
|
|
my ( $self ) = @_;
|
|
return $self->{defaults};
|
|
}
|
|
|
|
sub get_groups {
|
|
my ( $self ) = @_;
|
|
return $self->{groups};
|
|
}
|
|
|
|
sub _set_option {
|
|
my ( $self, $opt, $val ) = @_;
|
|
my $long = exists $self->{opts}->{$opt} ? $opt
|
|
: exists $self->{short_opts}->{$opt} ? $self->{short_opts}->{$opt}
|
|
: die "Getopt::Long gave a nonexistent option: $opt";
|
|
|
|
$opt = $self->{opts}->{$long};
|
|
if ( $opt->{is_cumulative} ) {
|
|
$opt->{value}++;
|
|
}
|
|
else {
|
|
$opt->{value} = $val;
|
|
}
|
|
$opt->{got} = 1;
|
|
PTDEBUG && _d('Got option', $long, '=', $val);
|
|
}
|
|
|
|
sub get_opts {
|
|
my ( $self ) = @_;
|
|
|
|
foreach my $long ( keys %{$self->{opts}} ) {
|
|
$self->{opts}->{$long}->{got} = 0;
|
|
$self->{opts}->{$long}->{value}
|
|
= exists $self->{defaults}->{$long} ? $self->{defaults}->{$long}
|
|
: $self->{opts}->{$long}->{is_cumulative} ? 0
|
|
: undef;
|
|
}
|
|
$self->{got_opts} = 0;
|
|
|
|
$self->{errors} = [];
|
|
|
|
if ( @ARGV && $ARGV[0] eq "--config" ) {
|
|
shift @ARGV;
|
|
$self->_set_option('config', shift @ARGV);
|
|
}
|
|
if ( $self->has('config') ) {
|
|
my @extra_args;
|
|
foreach my $filename ( split(',', $self->get('config')) ) {
|
|
eval {
|
|
push @extra_args, $self->_read_config_file($filename);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
if ( $self->got('config') ) {
|
|
die $EVAL_ERROR;
|
|
}
|
|
elsif ( PTDEBUG ) {
|
|
_d($EVAL_ERROR);
|
|
}
|
|
}
|
|
}
|
|
unshift @ARGV, @extra_args;
|
|
}
|
|
|
|
Getopt::Long::Configure('no_ignore_case', 'bundling');
|
|
GetOptions(
|
|
map { $_->{spec} => sub { $self->_set_option(@_); } }
|
|
grep { $_->{long} ne 'config' } # --config is handled specially above.
|
|
values %{$self->{opts}}
|
|
) or $self->save_error('Error parsing options');
|
|
|
|
if ( exists $self->{opts}->{version} && $self->{opts}->{version}->{got} ) {
|
|
if ( $self->{version} ) {
|
|
print $self->{version}, "\n";
|
|
}
|
|
else {
|
|
print "Error parsing version. See the VERSION section of the tool's documentation.\n";
|
|
}
|
|
exit 0;
|
|
}
|
|
|
|
if ( @ARGV && $self->{strict} ) {
|
|
$self->save_error("Unrecognized command-line options @ARGV");
|
|
}
|
|
|
|
foreach my $mutex ( @{$self->{mutex}} ) {
|
|
my @set = grep { $self->{opts}->{$_}->{got} } @$mutex;
|
|
if ( @set > 1 ) {
|
|
my $err = join(', ', map { "--$self->{opts}->{$_}->{long}" }
|
|
@{$mutex}[ 0 .. scalar(@$mutex) - 2] )
|
|
. ' and --'.$self->{opts}->{$mutex->[-1]}->{long}
|
|
. ' are mutually exclusive.';
|
|
$self->save_error($err);
|
|
}
|
|
}
|
|
|
|
foreach my $required ( @{$self->{atleast1}} ) {
|
|
my @set = grep { $self->{opts}->{$_}->{got} } @$required;
|
|
if ( @set == 0 ) {
|
|
my $err = join(', ', map { "--$self->{opts}->{$_}->{long}" }
|
|
@{$required}[ 0 .. scalar(@$required) - 2] )
|
|
.' or --'.$self->{opts}->{$required->[-1]}->{long};
|
|
$self->save_error("Specify at least one of $err");
|
|
}
|
|
}
|
|
|
|
$self->_check_opts( keys %{$self->{opts}} );
|
|
$self->{got_opts} = 1;
|
|
return;
|
|
}
|
|
|
|
sub _check_opts {
|
|
my ( $self, @long ) = @_;
|
|
my $long_last = scalar @long;
|
|
while ( @long ) {
|
|
foreach my $i ( 0..$#long ) {
|
|
my $long = $long[$i];
|
|
next unless $long;
|
|
my $opt = $self->{opts}->{$long};
|
|
if ( $opt->{got} ) {
|
|
if ( exists $self->{disables}->{$long} ) {
|
|
my @disable_opts = @{$self->{disables}->{$long}};
|
|
map { $self->{opts}->{$_}->{value} = undef; } @disable_opts;
|
|
PTDEBUG && _d('Unset options', @disable_opts,
|
|
'because', $long,'disables them');
|
|
}
|
|
|
|
if ( exists $self->{allowed_groups}->{$long} ) {
|
|
|
|
my @restricted_groups = grep {
|
|
!exists $self->{allowed_groups}->{$long}->{$_}
|
|
} keys %{$self->{groups}};
|
|
|
|
my @restricted_opts;
|
|
foreach my $restricted_group ( @restricted_groups ) {
|
|
RESTRICTED_OPT:
|
|
foreach my $restricted_opt (
|
|
keys %{$self->{groups}->{$restricted_group}} )
|
|
{
|
|
next RESTRICTED_OPT if $restricted_opt eq $long;
|
|
push @restricted_opts, $restricted_opt
|
|
if $self->{opts}->{$restricted_opt}->{got};
|
|
}
|
|
}
|
|
|
|
if ( @restricted_opts ) {
|
|
my $err;
|
|
if ( @restricted_opts == 1 ) {
|
|
$err = "--$restricted_opts[0]";
|
|
}
|
|
else {
|
|
$err = join(', ',
|
|
map { "--$self->{opts}->{$_}->{long}" }
|
|
grep { $_ }
|
|
@restricted_opts[0..scalar(@restricted_opts) - 2]
|
|
)
|
|
. ' or --'.$self->{opts}->{$restricted_opts[-1]}->{long};
|
|
}
|
|
$self->save_error("--$long is not allowed with $err");
|
|
}
|
|
}
|
|
|
|
}
|
|
elsif ( $opt->{is_required} ) {
|
|
$self->save_error("Required option --$long must be specified");
|
|
}
|
|
|
|
$self->_validate_type($opt);
|
|
if ( $opt->{parsed} ) {
|
|
delete $long[$i];
|
|
}
|
|
else {
|
|
PTDEBUG && _d('Temporarily failed to parse', $long);
|
|
}
|
|
}
|
|
|
|
die "Failed to parse options, possibly due to circular dependencies"
|
|
if @long == $long_last;
|
|
$long_last = @long;
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub _validate_type {
|
|
my ( $self, $opt ) = @_;
|
|
return unless $opt;
|
|
|
|
if ( !$opt->{type} ) {
|
|
$opt->{parsed} = 1;
|
|
return;
|
|
}
|
|
|
|
my $val = $opt->{value};
|
|
|
|
if ( $val && $opt->{type} eq 'm' ) { # type time
|
|
PTDEBUG && _d('Parsing option', $opt->{long}, 'as a time value');
|
|
my ( $prefix, $num, $suffix ) = $val =~ m/([+-]?)(\d+)([a-z])?$/;
|
|
if ( !$suffix ) {
|
|
my ( $s ) = $opt->{desc} =~ m/\(suffix (.)\)/;
|
|
$suffix = $s || 's';
|
|
PTDEBUG && _d('No suffix given; using', $suffix, 'for',
|
|
$opt->{long}, '(value:', $val, ')');
|
|
}
|
|
if ( $suffix =~ m/[smhd]/ ) {
|
|
$val = $suffix eq 's' ? $num # Seconds
|
|
: $suffix eq 'm' ? $num * 60 # Minutes
|
|
: $suffix eq 'h' ? $num * 3600 # Hours
|
|
: $num * 86400; # Days
|
|
$opt->{value} = ($prefix || '') . $val;
|
|
PTDEBUG && _d('Setting option', $opt->{long}, 'to', $val);
|
|
}
|
|
else {
|
|
$self->save_error("Invalid time suffix for --$opt->{long}");
|
|
}
|
|
}
|
|
elsif ( $val && $opt->{type} eq 'd' ) { # type DSN
|
|
PTDEBUG && _d('Parsing option', $opt->{long}, 'as a DSN');
|
|
my $prev = {};
|
|
my $from_key = $self->{defaults_to}->{ $opt->{long} };
|
|
if ( $from_key ) {
|
|
PTDEBUG && _d($opt->{long}, 'DSN copies from', $from_key, 'DSN');
|
|
if ( $self->{opts}->{$from_key}->{parsed} ) {
|
|
$prev = $self->{opts}->{$from_key}->{value};
|
|
}
|
|
else {
|
|
PTDEBUG && _d('Cannot parse', $opt->{long}, 'until',
|
|
$from_key, 'parsed');
|
|
return;
|
|
}
|
|
}
|
|
my $defaults = $self->{DSNParser}->parse_options($self);
|
|
$opt->{value} = $self->{DSNParser}->parse($val, $prev, $defaults);
|
|
}
|
|
elsif ( $val && $opt->{type} eq 'z' ) { # type size
|
|
PTDEBUG && _d('Parsing option', $opt->{long}, 'as a size value');
|
|
$self->_parse_size($opt, $val);
|
|
}
|
|
elsif ( $opt->{type} eq 'H' || (defined $val && $opt->{type} eq 'h') ) {
|
|
$opt->{value} = { map { $_ => 1 } split(/(?<!\\),\s*/, ($val || '')) };
|
|
}
|
|
elsif ( $opt->{type} eq 'A' || (defined $val && $opt->{type} eq 'a') ) {
|
|
$opt->{value} = [ split(/(?<!\\),\s*/, ($val || '')) ];
|
|
}
|
|
else {
|
|
PTDEBUG && _d('Nothing to validate for option',
|
|
$opt->{long}, 'type', $opt->{type}, 'value', $val);
|
|
}
|
|
|
|
$opt->{parsed} = 1;
|
|
return;
|
|
}
|
|
|
|
sub get {
|
|
my ( $self, $opt ) = @_;
|
|
my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt);
|
|
die "Option $opt does not exist"
|
|
unless $long && exists $self->{opts}->{$long};
|
|
return $self->{opts}->{$long}->{value};
|
|
}
|
|
|
|
sub got {
|
|
my ( $self, $opt ) = @_;
|
|
my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt);
|
|
die "Option $opt does not exist"
|
|
unless $long && exists $self->{opts}->{$long};
|
|
return $self->{opts}->{$long}->{got};
|
|
}
|
|
|
|
sub has {
|
|
my ( $self, $opt ) = @_;
|
|
my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt);
|
|
return defined $long ? exists $self->{opts}->{$long} : 0;
|
|
}
|
|
|
|
sub set {
|
|
my ( $self, $opt, $val ) = @_;
|
|
my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt);
|
|
die "Option $opt does not exist"
|
|
unless $long && exists $self->{opts}->{$long};
|
|
$self->{opts}->{$long}->{value} = $val;
|
|
return;
|
|
}
|
|
|
|
sub save_error {
|
|
my ( $self, $error ) = @_;
|
|
push @{$self->{errors}}, $error;
|
|
return;
|
|
}
|
|
|
|
sub errors {
|
|
my ( $self ) = @_;
|
|
return $self->{errors};
|
|
}
|
|
|
|
sub usage {
|
|
my ( $self ) = @_;
|
|
warn "No usage string is set" unless $self->{usage}; # XXX
|
|
return "Usage: " . ($self->{usage} || '') . "\n";
|
|
}
|
|
|
|
sub descr {
|
|
my ( $self ) = @_;
|
|
warn "No description string is set" unless $self->{description}; # XXX
|
|
my $descr = ($self->{description} || $self->{program_name} || '')
|
|
. " For more details, please use the --help option, "
|
|
. "or try 'perldoc $PROGRAM_NAME' "
|
|
. "for complete documentation.";
|
|
$descr = join("\n", $descr =~ m/(.{0,80})(?:\s+|$)/g)
|
|
unless $ENV{DONT_BREAK_LINES};
|
|
$descr =~ s/ +$//mg;
|
|
return $descr;
|
|
}
|
|
|
|
sub usage_or_errors {
|
|
my ( $self, $file, $return ) = @_;
|
|
$file ||= $self->{file} || __FILE__;
|
|
|
|
if ( !$self->{description} || !$self->{usage} ) {
|
|
PTDEBUG && _d("Getting description and usage from SYNOPSIS in", $file);
|
|
my %synop = $self->_parse_synopsis($file);
|
|
$self->{description} ||= $synop{description};
|
|
$self->{usage} ||= $synop{usage};
|
|
PTDEBUG && _d("Description:", $self->{description},
|
|
"\nUsage:", $self->{usage});
|
|
}
|
|
|
|
if ( $self->{opts}->{help}->{got} ) {
|
|
print $self->print_usage() or die "Cannot print usage: $OS_ERROR";
|
|
exit 0 unless $return;
|
|
}
|
|
elsif ( scalar @{$self->{errors}} ) {
|
|
print $self->print_errors() or die "Cannot print errors: $OS_ERROR";
|
|
exit 0 unless $return;
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub print_errors {
|
|
my ( $self ) = @_;
|
|
my $usage = $self->usage() . "\n";
|
|
if ( (my @errors = @{$self->{errors}}) ) {
|
|
$usage .= join("\n * ", 'Errors in command-line arguments:', @errors)
|
|
. "\n";
|
|
}
|
|
return $usage . "\n" . $self->descr();
|
|
}
|
|
|
|
sub print_usage {
|
|
my ( $self ) = @_;
|
|
die "Run get_opts() before print_usage()" unless $self->{got_opts};
|
|
my @opts = values %{$self->{opts}};
|
|
|
|
my $maxl = max(
|
|
map {
|
|
length($_->{long}) # option long name
|
|
+ ($_->{is_negatable} ? 4 : 0) # "[no]" if opt is negatable
|
|
+ ($_->{type} ? 2 : 0) # "=x" where x is the opt type
|
|
}
|
|
@opts);
|
|
|
|
my $maxs = max(0,
|
|
map {
|
|
length($_)
|
|
+ ($self->{opts}->{$_}->{is_negatable} ? 4 : 0)
|
|
+ ($self->{opts}->{$_}->{type} ? 2 : 0)
|
|
}
|
|
values %{$self->{short_opts}});
|
|
|
|
my $lcol = max($maxl, ($maxs + 3));
|
|
my $rcol = 80 - $lcol - 6;
|
|
my $rpad = ' ' x ( 80 - $rcol );
|
|
|
|
$maxs = max($lcol - 3, $maxs);
|
|
|
|
my $usage = $self->descr() . "\n" . $self->usage();
|
|
|
|
my @groups = reverse sort grep { $_ ne 'default'; } keys %{$self->{groups}};
|
|
push @groups, 'default';
|
|
|
|
foreach my $group ( reverse @groups ) {
|
|
$usage .= "\n".($group eq 'default' ? 'Options' : $group).":\n\n";
|
|
foreach my $opt (
|
|
sort { $a->{long} cmp $b->{long} }
|
|
grep { $_->{group} eq $group }
|
|
@opts )
|
|
{
|
|
my $long = $opt->{is_negatable} ? "[no]$opt->{long}" : $opt->{long};
|
|
my $short = $opt->{short};
|
|
my $desc = $opt->{desc};
|
|
|
|
$long .= $opt->{type} ? "=$opt->{type}" : "";
|
|
|
|
if ( $opt->{type} && $opt->{type} eq 'm' ) {
|
|
my ($s) = $desc =~ m/\(suffix (.)\)/;
|
|
$s ||= 's';
|
|
$desc =~ s/\s+\(suffix .\)//;
|
|
$desc .= ". Optional suffix s=seconds, m=minutes, h=hours, "
|
|
. "d=days; if no suffix, $s is used.";
|
|
}
|
|
$desc = join("\n$rpad", grep { $_ } $desc =~ m/(.{0,$rcol})(?:\s+|$)/g);
|
|
$desc =~ s/ +$//mg;
|
|
if ( $short ) {
|
|
$usage .= sprintf(" --%-${maxs}s -%s %s\n", $long, $short, $desc);
|
|
}
|
|
else {
|
|
$usage .= sprintf(" --%-${lcol}s %s\n", $long, $desc);
|
|
}
|
|
}
|
|
}
|
|
|
|
$usage .= "\nOption types: s=string, i=integer, f=float, h/H/a/A=comma-separated list, d=DSN, z=size, m=time\n";
|
|
|
|
if ( (my @rules = @{$self->{rules}}) ) {
|
|
$usage .= "\nRules:\n\n";
|
|
$usage .= join("\n", map { " $_" } @rules) . "\n";
|
|
}
|
|
if ( $self->{DSNParser} ) {
|
|
$usage .= "\n" . $self->{DSNParser}->usage();
|
|
}
|
|
$usage .= "\nOptions and values after processing arguments:\n\n";
|
|
foreach my $opt ( sort { $a->{long} cmp $b->{long} } @opts ) {
|
|
my $val = $opt->{value};
|
|
my $type = $opt->{type} || '';
|
|
my $bool = $opt->{spec} =~ m/^[\w-]+(?:\|[\w-])?!?$/;
|
|
$val = $bool ? ( $val ? 'TRUE' : 'FALSE' )
|
|
: !defined $val ? '(No value)'
|
|
: $type eq 'd' ? $self->{DSNParser}->as_string($val)
|
|
: $type =~ m/H|h/ ? join(',', sort keys %$val)
|
|
: $type =~ m/A|a/ ? join(',', @$val)
|
|
: $val;
|
|
$usage .= sprintf(" --%-${lcol}s %s\n", $opt->{long}, $val);
|
|
}
|
|
return $usage;
|
|
}
|
|
|
|
sub prompt_noecho {
|
|
shift @_ if ref $_[0] eq __PACKAGE__;
|
|
my ( $prompt ) = @_;
|
|
local $OUTPUT_AUTOFLUSH = 1;
|
|
print $prompt
|
|
or die "Cannot print: $OS_ERROR";
|
|
my $response;
|
|
eval {
|
|
require Term::ReadKey;
|
|
Term::ReadKey::ReadMode('noecho');
|
|
chomp($response = <STDIN>);
|
|
Term::ReadKey::ReadMode('normal');
|
|
print "\n"
|
|
or die "Cannot print: $OS_ERROR";
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
die "Cannot read response; is Term::ReadKey installed? $EVAL_ERROR";
|
|
}
|
|
return $response;
|
|
}
|
|
|
|
sub _read_config_file {
|
|
my ( $self, $filename ) = @_;
|
|
open my $fh, "<", $filename or die "Cannot open $filename: $OS_ERROR\n";
|
|
my @args;
|
|
my $prefix = '--';
|
|
my $parse = 1;
|
|
|
|
LINE:
|
|
while ( my $line = <$fh> ) {
|
|
chomp $line;
|
|
next LINE if $line =~ m/^\s*(?:\#|\;|$)/;
|
|
$line =~ s/\s+#.*$//g;
|
|
$line =~ s/^\s+|\s+$//g;
|
|
if ( $line eq '--' ) {
|
|
$prefix = '';
|
|
$parse = 0;
|
|
next LINE;
|
|
}
|
|
if ( $parse
|
|
&& (my($opt, $arg) = $line =~ m/^\s*([^=\s]+?)(?:\s*=\s*(.*?)\s*)?$/)
|
|
) {
|
|
push @args, grep { defined $_ } ("$prefix$opt", $arg);
|
|
}
|
|
elsif ( $line =~ m/./ ) {
|
|
push @args, $line;
|
|
}
|
|
else {
|
|
die "Syntax error in file $filename at line $INPUT_LINE_NUMBER";
|
|
}
|
|
}
|
|
close $fh;
|
|
return @args;
|
|
}
|
|
|
|
sub read_para_after {
|
|
my ( $self, $file, $regex ) = @_;
|
|
open my $fh, "<", $file or die "Can't open $file: $OS_ERROR";
|
|
local $INPUT_RECORD_SEPARATOR = '';
|
|
my $para;
|
|
while ( $para = <$fh> ) {
|
|
next unless $para =~ m/^=pod$/m;
|
|
last;
|
|
}
|
|
while ( $para = <$fh> ) {
|
|
next unless $para =~ m/$regex/;
|
|
last;
|
|
}
|
|
$para = <$fh>;
|
|
chomp($para);
|
|
close $fh or die "Can't close $file: $OS_ERROR";
|
|
return $para;
|
|
}
|
|
|
|
sub clone {
|
|
my ( $self ) = @_;
|
|
|
|
my %clone = map {
|
|
my $hashref = $self->{$_};
|
|
my $val_copy = {};
|
|
foreach my $key ( keys %$hashref ) {
|
|
my $ref = ref $hashref->{$key};
|
|
$val_copy->{$key} = !$ref ? $hashref->{$key}
|
|
: $ref eq 'HASH' ? { %{$hashref->{$key}} }
|
|
: $ref eq 'ARRAY' ? [ @{$hashref->{$key}} ]
|
|
: $hashref->{$key};
|
|
}
|
|
$_ => $val_copy;
|
|
} qw(opts short_opts defaults);
|
|
|
|
foreach my $scalar ( qw(got_opts) ) {
|
|
$clone{$scalar} = $self->{$scalar};
|
|
}
|
|
|
|
return bless \%clone;
|
|
}
|
|
|
|
sub _parse_size {
|
|
my ( $self, $opt, $val ) = @_;
|
|
|
|
if ( lc($val || '') eq 'null' ) {
|
|
PTDEBUG && _d('NULL size for', $opt->{long});
|
|
$opt->{value} = 'null';
|
|
return;
|
|
}
|
|
|
|
my %factor_for = (k => 1_024, M => 1_048_576, G => 1_073_741_824);
|
|
my ($pre, $num, $factor) = $val =~ m/^([+-])?(\d+)([kMG])?$/;
|
|
if ( defined $num ) {
|
|
if ( $factor ) {
|
|
$num *= $factor_for{$factor};
|
|
PTDEBUG && _d('Setting option', $opt->{y},
|
|
'to num', $num, '* factor', $factor);
|
|
}
|
|
$opt->{value} = ($pre || '') . $num;
|
|
}
|
|
else {
|
|
$self->save_error("Invalid size for --$opt->{long}: $val");
|
|
}
|
|
return;
|
|
}
|
|
|
|
sub _parse_attribs {
|
|
my ( $self, $option, $attribs ) = @_;
|
|
my $types = $self->{types};
|
|
return $option
|
|
. ($attribs->{'short form'} ? '|' . $attribs->{'short form'} : '' )
|
|
. ($attribs->{'negatable'} ? '!' : '' )
|
|
. ($attribs->{'cumulative'} ? '+' : '' )
|
|
. ($attribs->{'type'} ? '=' . $types->{$attribs->{type}} : '' );
|
|
}
|
|
|
|
sub _parse_synopsis {
|
|
my ( $self, $file ) = @_;
|
|
$file ||= $self->{file} || __FILE__;
|
|
PTDEBUG && _d("Parsing SYNOPSIS in", $file);
|
|
|
|
local $INPUT_RECORD_SEPARATOR = ''; # read paragraphs
|
|
open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR";
|
|
my $para;
|
|
1 while defined($para = <$fh>) && $para !~ m/^=head1 SYNOPSIS/;
|
|
die "$file does not contain a SYNOPSIS section" unless $para;
|
|
my @synop;
|
|
for ( 1..2 ) { # 1 for the usage, 2 for the description
|
|
my $para = <$fh>;
|
|
push @synop, $para;
|
|
}
|
|
close $fh;
|
|
PTDEBUG && _d("Raw SYNOPSIS text:", @synop);
|
|
my ($usage, $desc) = @synop;
|
|
die "The SYNOPSIS section in $file is not formatted properly"
|
|
unless $usage && $desc;
|
|
|
|
$usage =~ s/^\s*Usage:\s+(.+)/$1/;
|
|
chomp $usage;
|
|
|
|
$desc =~ s/\n/ /g;
|
|
$desc =~ s/\s{2,}/ /g;
|
|
$desc =~ s/\. ([A-Z][a-z])/. $1/g;
|
|
$desc =~ s/\s+$//;
|
|
|
|
return (
|
|
description => $desc,
|
|
usage => $usage,
|
|
);
|
|
};
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
if ( PTDEBUG ) {
|
|
print '# ', $^X, ' ', $], "\n";
|
|
if ( my $uname = `uname -a` ) {
|
|
$uname =~ s/\s+/ /g;
|
|
print "# $uname\n";
|
|
}
|
|
print '# Arguments: ',
|
|
join(' ', map { my $a = "_[$_]_"; $a =~ s/\n/\n# /g; $a; } @ARGV), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End OptionParser package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# VersionParser package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/VersionParser.pm
|
|
# t/lib/VersionParser.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package VersionParser;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
sub new {
|
|
my ( $class ) = @_;
|
|
bless {}, $class;
|
|
}
|
|
|
|
sub parse {
|
|
my ( $self, $str ) = @_;
|
|
my $result = sprintf('%03d%03d%03d', $str =~ m/(\d+)/g);
|
|
PTDEBUG && _d($str, 'parses to', $result);
|
|
return $result;
|
|
}
|
|
|
|
sub version_ge {
|
|
my ( $self, $dbh, $target ) = @_;
|
|
if ( !$self->{$dbh} ) {
|
|
$self->{$dbh} = $self->parse(
|
|
$dbh->selectrow_array('SELECT VERSION()'));
|
|
}
|
|
my $result = $self->{$dbh} ge $self->parse($target) ? 1 : 0;
|
|
PTDEBUG && _d($self->{$dbh}, 'ge', $target, ':', $result);
|
|
return $result;
|
|
}
|
|
|
|
sub innodb_version {
|
|
my ( $self, $dbh ) = @_;
|
|
return unless $dbh;
|
|
my $innodb_version = "NO";
|
|
|
|
my ($innodb) =
|
|
grep { $_->{engine} =~ m/InnoDB/i }
|
|
map {
|
|
my %hash;
|
|
@hash{ map { lc $_ } keys %$_ } = values %$_;
|
|
\%hash;
|
|
}
|
|
@{ $dbh->selectall_arrayref("SHOW ENGINES", {Slice=>{}}) };
|
|
if ( $innodb ) {
|
|
PTDEBUG && _d("InnoDB support:", $innodb->{support});
|
|
if ( $innodb->{support} =~ m/YES|DEFAULT/i ) {
|
|
my $vars = $dbh->selectrow_hashref(
|
|
"SHOW VARIABLES LIKE 'innodb_version'");
|
|
$innodb_version = !$vars ? "BUILTIN"
|
|
: ($vars->{Value} || $vars->{value});
|
|
}
|
|
else {
|
|
$innodb_version = $innodb->{support}; # probably DISABLED or NO
|
|
}
|
|
}
|
|
|
|
PTDEBUG && _d("InnoDB version:", $innodb_version);
|
|
return $innodb_version;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End VersionParser package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# DSNParser package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/DSNParser.pm
|
|
# t/lib/DSNParser.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package DSNParser;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
use Data::Dumper;
|
|
$Data::Dumper::Indent = 0;
|
|
$Data::Dumper::Quotekeys = 0;
|
|
|
|
my $dsn_sep = qr/(?<!\\),/;
|
|
|
|
eval {
|
|
require DBI;
|
|
};
|
|
my $have_dbi = $EVAL_ERROR ? 0 : 1;
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
foreach my $arg ( qw(opts) ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my $self = {
|
|
opts => {} # h, P, u, etc. Should come from DSN OPTIONS section in POD.
|
|
};
|
|
foreach my $opt ( @{$args{opts}} ) {
|
|
if ( !$opt->{key} || !$opt->{desc} ) {
|
|
die "Invalid DSN option: ", Dumper($opt);
|
|
}
|
|
PTDEBUG && _d('DSN option:',
|
|
join(', ',
|
|
map { "$_=" . (defined $opt->{$_} ? ($opt->{$_} || '') : 'undef') }
|
|
keys %$opt
|
|
)
|
|
);
|
|
$self->{opts}->{$opt->{key}} = {
|
|
dsn => $opt->{dsn},
|
|
desc => $opt->{desc},
|
|
copy => $opt->{copy} || 0,
|
|
};
|
|
}
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub prop {
|
|
my ( $self, $prop, $value ) = @_;
|
|
if ( @_ > 2 ) {
|
|
PTDEBUG && _d('Setting', $prop, 'property');
|
|
$self->{$prop} = $value;
|
|
}
|
|
return $self->{$prop};
|
|
}
|
|
|
|
sub parse {
|
|
my ( $self, $dsn, $prev, $defaults ) = @_;
|
|
if ( !$dsn ) {
|
|
PTDEBUG && _d('No DSN to parse');
|
|
return;
|
|
}
|
|
PTDEBUG && _d('Parsing', $dsn);
|
|
$prev ||= {};
|
|
$defaults ||= {};
|
|
my %given_props;
|
|
my %final_props;
|
|
my $opts = $self->{opts};
|
|
|
|
foreach my $dsn_part ( split($dsn_sep, $dsn) ) {
|
|
$dsn_part =~ s/\\,/,/g;
|
|
if ( my ($prop_key, $prop_val) = $dsn_part =~ m/^(.)=(.*)$/ ) {
|
|
$given_props{$prop_key} = $prop_val;
|
|
}
|
|
else {
|
|
PTDEBUG && _d('Interpreting', $dsn_part, 'as h=', $dsn_part);
|
|
$given_props{h} = $dsn_part;
|
|
}
|
|
}
|
|
|
|
foreach my $key ( keys %$opts ) {
|
|
PTDEBUG && _d('Finding value for', $key);
|
|
$final_props{$key} = $given_props{$key};
|
|
if ( !defined $final_props{$key}
|
|
&& defined $prev->{$key} && $opts->{$key}->{copy} )
|
|
{
|
|
$final_props{$key} = $prev->{$key};
|
|
PTDEBUG && _d('Copying value for', $key, 'from previous DSN');
|
|
}
|
|
if ( !defined $final_props{$key} ) {
|
|
$final_props{$key} = $defaults->{$key};
|
|
PTDEBUG && _d('Copying value for', $key, 'from defaults');
|
|
}
|
|
}
|
|
|
|
foreach my $key ( keys %given_props ) {
|
|
die "Unknown DSN option '$key' in '$dsn'. For more details, "
|
|
. "please use the --help option, or try 'perldoc $PROGRAM_NAME' "
|
|
. "for complete documentation."
|
|
unless exists $opts->{$key};
|
|
}
|
|
if ( (my $required = $self->prop('required')) ) {
|
|
foreach my $key ( keys %$required ) {
|
|
die "Missing required DSN option '$key' in '$dsn'. For more details, "
|
|
. "please use the --help option, or try 'perldoc $PROGRAM_NAME' "
|
|
. "for complete documentation."
|
|
unless $final_props{$key};
|
|
}
|
|
}
|
|
|
|
return \%final_props;
|
|
}
|
|
|
|
sub parse_options {
|
|
my ( $self, $o ) = @_;
|
|
die 'I need an OptionParser object' unless ref $o eq 'OptionParser';
|
|
my $dsn_string
|
|
= join(',',
|
|
map { "$_=".$o->get($_); }
|
|
grep { $o->has($_) && $o->get($_) }
|
|
keys %{$self->{opts}}
|
|
);
|
|
PTDEBUG && _d('DSN string made from options:', $dsn_string);
|
|
return $self->parse($dsn_string);
|
|
}
|
|
|
|
sub as_string {
|
|
my ( $self, $dsn, $props ) = @_;
|
|
return $dsn unless ref $dsn;
|
|
my @keys = $props ? @$props : sort keys %$dsn;
|
|
return join(',',
|
|
map { "$_=" . ($_ eq 'p' ? '...' : $dsn->{$_}) }
|
|
grep {
|
|
exists $self->{opts}->{$_}
|
|
&& exists $dsn->{$_}
|
|
&& defined $dsn->{$_}
|
|
} @keys);
|
|
}
|
|
|
|
sub usage {
|
|
my ( $self ) = @_;
|
|
my $usage
|
|
= "DSN syntax is key=value[,key=value...] Allowable DSN keys:\n\n"
|
|
. " KEY COPY MEANING\n"
|
|
. " === ==== =============================================\n";
|
|
my %opts = %{$self->{opts}};
|
|
foreach my $key ( sort keys %opts ) {
|
|
$usage .= " $key "
|
|
. ($opts{$key}->{copy} ? 'yes ' : 'no ')
|
|
. ($opts{$key}->{desc} || '[No description]')
|
|
. "\n";
|
|
}
|
|
$usage .= "\n If the DSN is a bareword, the word is treated as the 'h' key.\n";
|
|
return $usage;
|
|
}
|
|
|
|
sub get_cxn_params {
|
|
my ( $self, $info ) = @_;
|
|
my $dsn;
|
|
my %opts = %{$self->{opts}};
|
|
my $driver = $self->prop('dbidriver') || '';
|
|
if ( $driver eq 'Pg' ) {
|
|
$dsn = 'DBI:Pg:dbname=' . ( $info->{D} || '' ) . ';'
|
|
. join(';', map { "$opts{$_}->{dsn}=$info->{$_}" }
|
|
grep { defined $info->{$_} }
|
|
qw(h P));
|
|
}
|
|
else {
|
|
$dsn = 'DBI:mysql:' . ( $info->{D} || '' ) . ';'
|
|
. join(';', map { "$opts{$_}->{dsn}=$info->{$_}" }
|
|
grep { defined $info->{$_} }
|
|
qw(F h P S A))
|
|
. ';mysql_read_default_group=client';
|
|
}
|
|
PTDEBUG && _d($dsn);
|
|
return ($dsn, $info->{u}, $info->{p});
|
|
}
|
|
|
|
sub fill_in_dsn {
|
|
my ( $self, $dbh, $dsn ) = @_;
|
|
my $vars = $dbh->selectall_hashref('SHOW VARIABLES', 'Variable_name');
|
|
my ($user, $db) = $dbh->selectrow_array('SELECT USER(), DATABASE()');
|
|
$user =~ s/@.*//;
|
|
$dsn->{h} ||= $vars->{hostname}->{Value};
|
|
$dsn->{S} ||= $vars->{'socket'}->{Value};
|
|
$dsn->{P} ||= $vars->{port}->{Value};
|
|
$dsn->{u} ||= $user;
|
|
$dsn->{D} ||= $db;
|
|
}
|
|
|
|
sub get_dbh {
|
|
my ( $self, $cxn_string, $user, $pass, $opts ) = @_;
|
|
$opts ||= {};
|
|
my $defaults = {
|
|
AutoCommit => 0,
|
|
RaiseError => 1,
|
|
PrintError => 0,
|
|
ShowErrorStatement => 1,
|
|
mysql_enable_utf8 => ($cxn_string =~ m/charset=utf8/i ? 1 : 0),
|
|
};
|
|
@{$defaults}{ keys %$opts } = values %$opts;
|
|
|
|
if ( $opts->{mysql_use_result} ) {
|
|
$defaults->{mysql_use_result} = 1;
|
|
}
|
|
|
|
if ( !$have_dbi ) {
|
|
die "Cannot connect to MySQL because the Perl DBI module is not "
|
|
. "installed or not found. Run 'perl -MDBI' to see the directories "
|
|
. "that Perl searches for DBI. If DBI is not installed, try:\n"
|
|
. " Debian/Ubuntu apt-get install libdbi-perl\n"
|
|
. " RHEL/CentOS yum install perl-DBI\n"
|
|
. " OpenSolaris pgk install pkg:/SUNWpmdbi\n";
|
|
|
|
}
|
|
|
|
my $dbh;
|
|
my $tries = 2;
|
|
while ( !$dbh && $tries-- ) {
|
|
PTDEBUG && _d($cxn_string, ' ', $user, ' ', $pass,
|
|
join(', ', map { "$_=>$defaults->{$_}" } keys %$defaults ));
|
|
|
|
eval {
|
|
$dbh = DBI->connect($cxn_string, $user, $pass, $defaults);
|
|
|
|
if ( $cxn_string =~ m/mysql/i ) {
|
|
my $sql;
|
|
|
|
$sql = 'SELECT @@SQL_MODE';
|
|
PTDEBUG && _d($dbh, $sql);
|
|
my ($sql_mode) = $dbh->selectrow_array($sql);
|
|
|
|
$sql = 'SET @@SQL_QUOTE_SHOW_CREATE = 1'
|
|
. '/*!40101, @@SQL_MODE=\'NO_AUTO_VALUE_ON_ZERO'
|
|
. ($sql_mode ? ",$sql_mode" : '')
|
|
. '\'*/';
|
|
PTDEBUG && _d($dbh, $sql);
|
|
$dbh->do($sql);
|
|
|
|
if ( my ($charset) = $cxn_string =~ m/charset=(\w+)/ ) {
|
|
$sql = "/*!40101 SET NAMES $charset*/";
|
|
PTDEBUG && _d($dbh, ':', $sql);
|
|
$dbh->do($sql);
|
|
PTDEBUG && _d('Enabling charset for STDOUT');
|
|
if ( $charset eq 'utf8' ) {
|
|
binmode(STDOUT, ':utf8')
|
|
or die "Can't binmode(STDOUT, ':utf8'): $OS_ERROR";
|
|
}
|
|
else {
|
|
binmode(STDOUT) or die "Can't binmode(STDOUT): $OS_ERROR";
|
|
}
|
|
}
|
|
|
|
if ( $self->prop('set-vars') ) {
|
|
$sql = "SET " . $self->prop('set-vars');
|
|
PTDEBUG && _d($dbh, ':', $sql);
|
|
$dbh->do($sql);
|
|
}
|
|
}
|
|
};
|
|
if ( !$dbh && $EVAL_ERROR ) {
|
|
PTDEBUG && _d($EVAL_ERROR);
|
|
if ( $EVAL_ERROR =~ m/not a compiled character set|character set utf8/ ) {
|
|
PTDEBUG && _d('Going to try again without utf8 support');
|
|
delete $defaults->{mysql_enable_utf8};
|
|
}
|
|
elsif ( $EVAL_ERROR =~ m/locate DBD\/mysql/i ) {
|
|
die "Cannot connect to MySQL because the Perl DBD::mysql module is "
|
|
. "not installed or not found. Run 'perl -MDBD::mysql' to see "
|
|
. "the directories that Perl searches for DBD::mysql. If "
|
|
. "DBD::mysql is not installed, try:\n"
|
|
. " Debian/Ubuntu apt-get install libdbd-mysql-perl\n"
|
|
. " RHEL/CentOS yum install perl-DBD-MySQL\n"
|
|
. " OpenSolaris pgk install pkg:/SUNWapu13dbd-mysql\n";
|
|
}
|
|
if ( !$tries ) {
|
|
die $EVAL_ERROR;
|
|
}
|
|
}
|
|
}
|
|
|
|
PTDEBUG && _d('DBH info: ',
|
|
$dbh,
|
|
Dumper($dbh->selectrow_hashref(
|
|
'SELECT DATABASE(), CONNECTION_ID(), VERSION()/*!50038 , @@hostname*/')),
|
|
'Connection info:', $dbh->{mysql_hostinfo},
|
|
'Character set info:', Dumper($dbh->selectall_arrayref(
|
|
'SHOW VARIABLES LIKE "character_set%"', { Slice => {}})),
|
|
'$DBD::mysql::VERSION:', $DBD::mysql::VERSION,
|
|
'$DBI::VERSION:', $DBI::VERSION,
|
|
);
|
|
|
|
return $dbh;
|
|
}
|
|
|
|
sub get_hostname {
|
|
my ( $self, $dbh ) = @_;
|
|
if ( my ($host) = ($dbh->{mysql_hostinfo} || '') =~ m/^(\w+) via/ ) {
|
|
return $host;
|
|
}
|
|
my ( $hostname, $one ) = $dbh->selectrow_array(
|
|
'SELECT /*!50038 @@hostname, */ 1');
|
|
return $hostname;
|
|
}
|
|
|
|
sub disconnect {
|
|
my ( $self, $dbh ) = @_;
|
|
PTDEBUG && $self->print_active_handles($dbh);
|
|
$dbh->disconnect;
|
|
}
|
|
|
|
sub print_active_handles {
|
|
my ( $self, $thing, $level ) = @_;
|
|
$level ||= 0;
|
|
printf("# Active %sh: %s %s %s\n", ($thing->{Type} || 'undef'), "\t" x $level,
|
|
$thing, (($thing->{Type} || '') eq 'st' ? $thing->{Statement} || '' : ''))
|
|
or die "Cannot print: $OS_ERROR";
|
|
foreach my $handle ( grep {defined} @{ $thing->{ChildHandles} } ) {
|
|
$self->print_active_handles( $handle, $level + 1 );
|
|
}
|
|
}
|
|
|
|
sub copy {
|
|
my ( $self, $dsn_1, $dsn_2, %args ) = @_;
|
|
die 'I need a dsn_1 argument' unless $dsn_1;
|
|
die 'I need a dsn_2 argument' unless $dsn_2;
|
|
my %new_dsn = map {
|
|
my $key = $_;
|
|
my $val;
|
|
if ( $args{overwrite} ) {
|
|
$val = defined $dsn_1->{$key} ? $dsn_1->{$key} : $dsn_2->{$key};
|
|
}
|
|
else {
|
|
$val = defined $dsn_2->{$key} ? $dsn_2->{$key} : $dsn_1->{$key};
|
|
}
|
|
$key => $val;
|
|
} keys %{$self->{opts}};
|
|
return \%new_dsn;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End DSNParser package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# Daemon package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/Daemon.pm
|
|
# t/lib/Daemon.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package Daemon;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
use POSIX qw(setsid);
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
foreach my $arg ( qw(o) ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my $o = $args{o};
|
|
my $self = {
|
|
o => $o,
|
|
log_file => $o->has('log') ? $o->get('log') : undef,
|
|
PID_file => $o->has('pid') ? $o->get('pid') : undef,
|
|
};
|
|
|
|
check_PID_file(undef, $self->{PID_file});
|
|
|
|
PTDEBUG && _d('Daemonized child will log to', $self->{log_file});
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub daemonize {
|
|
my ( $self ) = @_;
|
|
|
|
PTDEBUG && _d('About to fork and daemonize');
|
|
defined (my $pid = fork()) or die "Cannot fork: $OS_ERROR";
|
|
if ( $pid ) {
|
|
PTDEBUG && _d('Parent PID', $PID, 'exiting after forking child PID',$pid);
|
|
exit;
|
|
}
|
|
|
|
PTDEBUG && _d('Daemonizing child PID', $PID);
|
|
$self->{PID_owner} = $PID;
|
|
$self->{child} = 1;
|
|
|
|
POSIX::setsid() or die "Cannot start a new session: $OS_ERROR";
|
|
chdir '/' or die "Cannot chdir to /: $OS_ERROR";
|
|
|
|
$self->_make_PID_file();
|
|
|
|
$OUTPUT_AUTOFLUSH = 1;
|
|
|
|
PTDEBUG && _d('Redirecting STDIN to /dev/null');
|
|
close STDIN;
|
|
open STDIN, '/dev/null'
|
|
or die "Cannot reopen STDIN to /dev/null: $OS_ERROR";
|
|
|
|
if ( $self->{log_file} ) {
|
|
PTDEBUG && _d('Redirecting STDOUT and STDERR to', $self->{log_file});
|
|
close STDOUT;
|
|
open STDOUT, '>>', $self->{log_file}
|
|
or die "Cannot open log file $self->{log_file}: $OS_ERROR";
|
|
|
|
close STDERR;
|
|
open STDERR, ">&STDOUT"
|
|
or die "Cannot dupe STDERR to STDOUT: $OS_ERROR";
|
|
}
|
|
else {
|
|
if ( -t STDOUT ) {
|
|
PTDEBUG && _d('No log file and STDOUT is a terminal;',
|
|
'redirecting to /dev/null');
|
|
close STDOUT;
|
|
open STDOUT, '>', '/dev/null'
|
|
or die "Cannot reopen STDOUT to /dev/null: $OS_ERROR";
|
|
}
|
|
if ( -t STDERR ) {
|
|
PTDEBUG && _d('No log file and STDERR is a terminal;',
|
|
'redirecting to /dev/null');
|
|
close STDERR;
|
|
open STDERR, '>', '/dev/null'
|
|
or die "Cannot reopen STDERR to /dev/null: $OS_ERROR";
|
|
}
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub check_PID_file {
|
|
my ( $self, $file ) = @_;
|
|
my $PID_file = $self ? $self->{PID_file} : $file;
|
|
PTDEBUG && _d('Checking PID file', $PID_file);
|
|
if ( $PID_file && -f $PID_file ) {
|
|
my $pid;
|
|
eval { chomp($pid = `cat $PID_file`); };
|
|
die "Cannot cat $PID_file: $OS_ERROR" if $EVAL_ERROR;
|
|
PTDEBUG && _d('PID file exists; it contains PID', $pid);
|
|
if ( $pid ) {
|
|
my $pid_is_alive = kill 0, $pid;
|
|
if ( $pid_is_alive ) {
|
|
die "The PID file $PID_file already exists "
|
|
. " and the PID that it contains, $pid, is running";
|
|
}
|
|
else {
|
|
warn "Overwriting PID file $PID_file because the PID that it "
|
|
. "contains, $pid, is not running";
|
|
}
|
|
}
|
|
else {
|
|
die "The PID file $PID_file already exists but it does not "
|
|
. "contain a PID";
|
|
}
|
|
}
|
|
else {
|
|
PTDEBUG && _d('No PID file');
|
|
}
|
|
return;
|
|
}
|
|
|
|
sub make_PID_file {
|
|
my ( $self ) = @_;
|
|
if ( exists $self->{child} ) {
|
|
die "Do not call Daemon::make_PID_file() for daemonized scripts";
|
|
}
|
|
$self->_make_PID_file();
|
|
$self->{PID_owner} = $PID;
|
|
return;
|
|
}
|
|
|
|
sub _make_PID_file {
|
|
my ( $self ) = @_;
|
|
|
|
my $PID_file = $self->{PID_file};
|
|
if ( !$PID_file ) {
|
|
PTDEBUG && _d('No PID file to create');
|
|
return;
|
|
}
|
|
|
|
$self->check_PID_file();
|
|
|
|
open my $PID_FH, '>', $PID_file
|
|
or die "Cannot open PID file $PID_file: $OS_ERROR";
|
|
print $PID_FH $PID
|
|
or die "Cannot print to PID file $PID_file: $OS_ERROR";
|
|
close $PID_FH
|
|
or die "Cannot close PID file $PID_file: $OS_ERROR";
|
|
|
|
PTDEBUG && _d('Created PID file:', $self->{PID_file});
|
|
return;
|
|
}
|
|
|
|
sub _remove_PID_file {
|
|
my ( $self ) = @_;
|
|
if ( $self->{PID_file} && -f $self->{PID_file} ) {
|
|
unlink $self->{PID_file}
|
|
or warn "Cannot remove PID file $self->{PID_file}: $OS_ERROR";
|
|
PTDEBUG && _d('Removed PID file');
|
|
}
|
|
else {
|
|
PTDEBUG && _d('No PID to remove');
|
|
}
|
|
return;
|
|
}
|
|
|
|
sub DESTROY {
|
|
my ( $self ) = @_;
|
|
|
|
$self->_remove_PID_file() if ($self->{PID_owner} || 0) == $PID;
|
|
|
|
return;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End Daemon package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# Quoter package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/Quoter.pm
|
|
# t/lib/Quoter.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package Quoter;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
return bless {}, $class;
|
|
}
|
|
|
|
sub quote {
|
|
my ( $self, @vals ) = @_;
|
|
foreach my $val ( @vals ) {
|
|
$val =~ s/`/``/g;
|
|
}
|
|
return join('.', map { '`' . $_ . '`' } @vals);
|
|
}
|
|
|
|
sub quote_val {
|
|
my ( $self, $val ) = @_;
|
|
|
|
return 'NULL' unless defined $val; # undef = NULL
|
|
return "''" if $val eq ''; # blank string = ''
|
|
return $val if $val =~ m/^0x[0-9a-fA-F]+$/; # hex data
|
|
|
|
$val =~ s/(['\\])/\\$1/g;
|
|
return "'$val'";
|
|
}
|
|
|
|
sub split_unquote {
|
|
my ( $self, $db_tbl, $default_db ) = @_;
|
|
$db_tbl =~ s/`//g;
|
|
my ( $db, $tbl ) = split(/[.]/, $db_tbl);
|
|
if ( !$tbl ) {
|
|
$tbl = $db;
|
|
$db = $default_db;
|
|
}
|
|
return ($db, $tbl);
|
|
}
|
|
|
|
sub literal_like {
|
|
my ( $self, $like ) = @_;
|
|
return unless $like;
|
|
$like =~ s/([%_])/\\$1/g;
|
|
return "'$like'";
|
|
}
|
|
|
|
sub join_quote {
|
|
my ( $self, $default_db, $db_tbl ) = @_;
|
|
return unless $db_tbl;
|
|
my ($db, $tbl) = split(/[.]/, $db_tbl);
|
|
if ( !$tbl ) {
|
|
$tbl = $db;
|
|
$db = $default_db;
|
|
}
|
|
$db = "`$db`" if $db && $db !~ m/^`/;
|
|
$tbl = "`$tbl`" if $tbl && $tbl !~ m/^`/;
|
|
return $db ? "$db.$tbl" : $tbl;
|
|
}
|
|
|
|
sub serialize_list {
|
|
my ( $self, @args ) = @_;
|
|
return unless @args;
|
|
|
|
return $args[0] if @args == 1 && !defined $args[0];
|
|
|
|
die "Cannot serialize multiple values with undef/NULL"
|
|
if grep { !defined $_ } @args;
|
|
|
|
return join ',', map { quotemeta } @args;
|
|
}
|
|
|
|
sub deserialize_list {
|
|
my ( $self, $string ) = @_;
|
|
return $string unless defined $string;
|
|
my @escaped_parts = $string =~ /
|
|
\G # Start of string, or end of previous match.
|
|
( # Each of these is an element in the original list.
|
|
[^\\,]* # Anything not a backslash or a comma
|
|
(?: # When we get here, we found one of the above.
|
|
\\. # A backslash followed by something so we can continue
|
|
[^\\,]* # Same as above.
|
|
)* # Repeat zero of more times.
|
|
)
|
|
, # Comma dividing elements
|
|
/sxgc;
|
|
|
|
push @escaped_parts, pos($string) ? substr( $string, pos($string) ) : $string;
|
|
|
|
my @unescaped_parts = map {
|
|
my $part = $_;
|
|
|
|
my $char_class = utf8::is_utf8($part) # If it's a UTF-8 string,
|
|
? qr/(?=\p{ASCII})\W/ # We only care about non-word
|
|
: qr/(?=\p{ASCII})\W|[\x{80}-\x{FF}]/; # Otherwise,
|
|
$part =~ s/\\($char_class)/$1/g;
|
|
$part;
|
|
} @escaped_parts;
|
|
|
|
return @unescaped_parts;
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End Quoter package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# TableNibbler package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/TableNibbler.pm
|
|
# t/lib/TableNibbler.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package TableNibbler;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
my @required_args = qw(TableParser Quoter);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my $self = { %args };
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub generate_asc_stmt {
|
|
my ( $self, %args ) = @_;
|
|
my @required_args = qw(tbl_struct index);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless defined $args{$arg};
|
|
}
|
|
my ($tbl_struct, $index) = @args{@required_args};
|
|
my @cols = $args{cols} ? @{$args{cols}} : @{$tbl_struct->{cols}};
|
|
my $q = $self->{Quoter};
|
|
|
|
die "Index '$index' does not exist in table"
|
|
unless exists $tbl_struct->{keys}->{$index};
|
|
PTDEBUG && _d('Will ascend index', $index);
|
|
|
|
my @asc_cols = @{$tbl_struct->{keys}->{$index}->{cols}};
|
|
if ( $args{asc_first} ) {
|
|
@asc_cols = $asc_cols[0];
|
|
PTDEBUG && _d('Ascending only first column');
|
|
}
|
|
PTDEBUG && _d('Will ascend columns', join(', ', @asc_cols));
|
|
|
|
my @asc_slice;
|
|
my %col_posn = do { my $i = 0; map { $_ => $i++ } @cols };
|
|
foreach my $col ( @asc_cols ) {
|
|
if ( !exists $col_posn{$col} ) {
|
|
push @cols, $col;
|
|
$col_posn{$col} = $#cols;
|
|
}
|
|
push @asc_slice, $col_posn{$col};
|
|
}
|
|
PTDEBUG && _d('Will ascend, in ordinal position:', join(', ', @asc_slice));
|
|
|
|
my $asc_stmt = {
|
|
cols => \@cols,
|
|
index => $index,
|
|
where => '',
|
|
slice => [],
|
|
scols => [],
|
|
};
|
|
|
|
if ( @asc_slice ) {
|
|
my $cmp_where;
|
|
foreach my $cmp ( qw(< <= >= >) ) {
|
|
$cmp_where = $self->generate_cmp_where(
|
|
type => $cmp,
|
|
slice => \@asc_slice,
|
|
cols => \@cols,
|
|
quoter => $q,
|
|
is_nullable => $tbl_struct->{is_nullable},
|
|
);
|
|
$asc_stmt->{boundaries}->{$cmp} = $cmp_where->{where};
|
|
}
|
|
my $cmp = $args{asc_only} ? '>' : '>=';
|
|
$asc_stmt->{where} = $asc_stmt->{boundaries}->{$cmp};
|
|
$asc_stmt->{slice} = $cmp_where->{slice};
|
|
$asc_stmt->{scols} = $cmp_where->{scols};
|
|
}
|
|
|
|
return $asc_stmt;
|
|
}
|
|
|
|
sub generate_cmp_where {
|
|
my ( $self, %args ) = @_;
|
|
foreach my $arg ( qw(type slice cols is_nullable) ) {
|
|
die "I need a $arg arg" unless defined $args{$arg};
|
|
}
|
|
my @slice = @{$args{slice}};
|
|
my @cols = @{$args{cols}};
|
|
my $is_nullable = $args{is_nullable};
|
|
my $type = $args{type};
|
|
my $q = $self->{Quoter};
|
|
|
|
(my $cmp = $type) =~ s/=//;
|
|
|
|
my @r_slice; # Resulting slice columns, by ordinal
|
|
my @r_scols; # Ditto, by name
|
|
|
|
my @clauses;
|
|
foreach my $i ( 0 .. $#slice ) {
|
|
my @clause;
|
|
|
|
foreach my $j ( 0 .. $i - 1 ) {
|
|
my $ord = $slice[$j];
|
|
my $col = $cols[$ord];
|
|
my $quo = $q->quote($col);
|
|
if ( $is_nullable->{$col} ) {
|
|
push @clause, "((? IS NULL AND $quo IS NULL) OR ($quo = ?))";
|
|
push @r_slice, $ord, $ord;
|
|
push @r_scols, $col, $col;
|
|
}
|
|
else {
|
|
push @clause, "$quo = ?";
|
|
push @r_slice, $ord;
|
|
push @r_scols, $col;
|
|
}
|
|
}
|
|
|
|
my $ord = $slice[$i];
|
|
my $col = $cols[$ord];
|
|
my $quo = $q->quote($col);
|
|
my $end = $i == $#slice; # Last clause of the whole group.
|
|
if ( $is_nullable->{$col} ) {
|
|
if ( $type =~ m/=/ && $end ) {
|
|
push @clause, "(? IS NULL OR $quo $type ?)";
|
|
}
|
|
elsif ( $type =~ m/>/ ) {
|
|
push @clause, "((? IS NULL AND $quo IS NOT NULL) OR ($quo $cmp ?))";
|
|
}
|
|
else { # If $type =~ m/</ ) {
|
|
push @clause, "((? IS NOT NULL AND $quo IS NULL) OR ($quo $cmp ?))";
|
|
}
|
|
push @r_slice, $ord, $ord;
|
|
push @r_scols, $col, $col;
|
|
}
|
|
else {
|
|
push @r_slice, $ord;
|
|
push @r_scols, $col;
|
|
push @clause, ($type =~ m/=/ && $end ? "$quo $type ?" : "$quo $cmp ?");
|
|
}
|
|
|
|
push @clauses, '(' . join(' AND ', @clause) . ')';
|
|
}
|
|
my $result = '(' . join(' OR ', @clauses) . ')';
|
|
my $where = {
|
|
slice => \@r_slice,
|
|
scols => \@r_scols,
|
|
where => $result,
|
|
};
|
|
return $where;
|
|
}
|
|
|
|
sub generate_del_stmt {
|
|
my ( $self, %args ) = @_;
|
|
|
|
my $tbl = $args{tbl_struct};
|
|
my @cols = $args{cols} ? @{$args{cols}} : ();
|
|
my $tp = $self->{TableParser};
|
|
my $q = $self->{Quoter};
|
|
|
|
my @del_cols;
|
|
my @del_slice;
|
|
|
|
my $index = $tp->find_best_index($tbl, $args{index});
|
|
die "Cannot find an ascendable index in table" unless $index;
|
|
|
|
if ( $index ) {
|
|
@del_cols = @{$tbl->{keys}->{$index}->{cols}};
|
|
}
|
|
else {
|
|
@del_cols = @{$tbl->{cols}};
|
|
}
|
|
PTDEBUG && _d('Columns needed for DELETE:', join(', ', @del_cols));
|
|
|
|
my %col_posn = do { my $i = 0; map { $_ => $i++ } @cols };
|
|
foreach my $col ( @del_cols ) {
|
|
if ( !exists $col_posn{$col} ) {
|
|
push @cols, $col;
|
|
$col_posn{$col} = $#cols;
|
|
}
|
|
push @del_slice, $col_posn{$col};
|
|
}
|
|
PTDEBUG && _d('Ordinals needed for DELETE:', join(', ', @del_slice));
|
|
|
|
my $del_stmt = {
|
|
cols => \@cols,
|
|
index => $index,
|
|
where => '',
|
|
slice => [],
|
|
scols => [],
|
|
};
|
|
|
|
my @clauses;
|
|
foreach my $i ( 0 .. $#del_slice ) {
|
|
my $ord = $del_slice[$i];
|
|
my $col = $cols[$ord];
|
|
my $quo = $q->quote($col);
|
|
if ( $tbl->{is_nullable}->{$col} ) {
|
|
push @clauses, "((? IS NULL AND $quo IS NULL) OR ($quo = ?))";
|
|
push @{$del_stmt->{slice}}, $ord, $ord;
|
|
push @{$del_stmt->{scols}}, $col, $col;
|
|
}
|
|
else {
|
|
push @clauses, "$quo = ?";
|
|
push @{$del_stmt->{slice}}, $ord;
|
|
push @{$del_stmt->{scols}}, $col;
|
|
}
|
|
}
|
|
|
|
$del_stmt->{where} = '(' . join(' AND ', @clauses) . ')';
|
|
|
|
return $del_stmt;
|
|
}
|
|
|
|
sub generate_ins_stmt {
|
|
my ( $self, %args ) = @_;
|
|
foreach my $arg ( qw(ins_tbl sel_cols) ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my $ins_tbl = $args{ins_tbl};
|
|
my @sel_cols = @{$args{sel_cols}};
|
|
|
|
die "You didn't specify any SELECT columns" unless @sel_cols;
|
|
|
|
my @ins_cols;
|
|
my @ins_slice;
|
|
for my $i ( 0..$#sel_cols ) {
|
|
next unless $ins_tbl->{is_col}->{$sel_cols[$i]};
|
|
push @ins_cols, $sel_cols[$i];
|
|
push @ins_slice, $i;
|
|
}
|
|
|
|
return {
|
|
cols => \@ins_cols,
|
|
slice => \@ins_slice,
|
|
};
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End TableNibbler package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# TableParser package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/TableParser.pm
|
|
# t/lib/TableParser.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package TableParser;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
use Data::Dumper;
|
|
$Data::Dumper::Indent = 1;
|
|
$Data::Dumper::Sortkeys = 1;
|
|
$Data::Dumper::Quotekeys = 0;
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
my @required_args = qw(Quoter);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my $self = { %args };
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub get_create_table {
|
|
my ( $self, $dbh, $db, $tbl ) = @_;
|
|
die "I need a dbh parameter" unless $dbh;
|
|
die "I need a db parameter" unless $db;
|
|
die "I need a tbl parameter" unless $tbl;
|
|
my $q = $self->{Quoter};
|
|
|
|
my $new_sql_mode
|
|
= '/*!40101 SET @OLD_SQL_MODE := @@SQL_MODE, '
|
|
. q{@@SQL_MODE := REPLACE(REPLACE(@@SQL_MODE, 'ANSI_QUOTES', ''), ',,', ','), }
|
|
. '@OLD_QUOTE := @@SQL_QUOTE_SHOW_CREATE, '
|
|
. '@@SQL_QUOTE_SHOW_CREATE := 1 */';
|
|
|
|
my $old_sql_mode = '/*!40101 SET @@SQL_MODE := @OLD_SQL_MODE, '
|
|
. '@@SQL_QUOTE_SHOW_CREATE := @OLD_QUOTE */';
|
|
|
|
PTDEBUG && _d($new_sql_mode);
|
|
eval { $dbh->do($new_sql_mode); };
|
|
PTDEBUG && $EVAL_ERROR && _d($EVAL_ERROR);
|
|
|
|
my $use_sql = 'USE ' . $q->quote($db);
|
|
PTDEBUG && _d($dbh, $use_sql);
|
|
$dbh->do($use_sql);
|
|
|
|
my $show_sql = "SHOW CREATE TABLE " . $q->quote($db, $tbl);
|
|
PTDEBUG && _d($show_sql);
|
|
my $href;
|
|
eval { $href = $dbh->selectrow_hashref($show_sql); };
|
|
if ( $EVAL_ERROR ) {
|
|
PTDEBUG && _d($EVAL_ERROR);
|
|
|
|
PTDEBUG && _d($old_sql_mode);
|
|
$dbh->do($old_sql_mode);
|
|
|
|
return;
|
|
}
|
|
|
|
PTDEBUG && _d($old_sql_mode);
|
|
$dbh->do($old_sql_mode);
|
|
|
|
my ($key) = grep { m/create (?:table|view)/i } keys %$href;
|
|
if ( !$key ) {
|
|
die "Error: no 'Create Table' or 'Create View' in result set from "
|
|
. "$show_sql: " . Dumper($href);
|
|
}
|
|
|
|
return $href->{$key};
|
|
}
|
|
|
|
sub parse {
|
|
my ( $self, $ddl, $opts ) = @_;
|
|
return unless $ddl;
|
|
|
|
if ( $ddl !~ m/CREATE (?:TEMPORARY )?TABLE `/ ) {
|
|
die "Cannot parse table definition; is ANSI quoting "
|
|
. "enabled or SQL_QUOTE_SHOW_CREATE disabled?";
|
|
}
|
|
|
|
my ($name) = $ddl =~ m/CREATE (?:TEMPORARY )?TABLE\s+(`.+?`)/;
|
|
(undef, $name) = $self->{Quoter}->split_unquote($name) if $name;
|
|
|
|
$ddl =~ s/(`[^`]+`)/\L$1/g;
|
|
|
|
my $engine = $self->get_engine($ddl);
|
|
|
|
my @defs = $ddl =~ m/^(\s+`.*?),?$/gm;
|
|
my @cols = map { $_ =~ m/`([^`]+)`/ } @defs;
|
|
PTDEBUG && _d('Table cols:', join(', ', map { "`$_`" } @cols));
|
|
|
|
my %def_for;
|
|
@def_for{@cols} = @defs;
|
|
|
|
my (@nums, @null);
|
|
my (%type_for, %is_nullable, %is_numeric, %is_autoinc);
|
|
foreach my $col ( @cols ) {
|
|
my $def = $def_for{$col};
|
|
my ( $type ) = $def =~ m/`[^`]+`\s([a-z]+)/;
|
|
die "Can't determine column type for $def" unless $type;
|
|
$type_for{$col} = $type;
|
|
if ( $type =~ m/(?:(?:tiny|big|medium|small)?int|float|double|decimal|year)/ ) {
|
|
push @nums, $col;
|
|
$is_numeric{$col} = 1;
|
|
}
|
|
if ( $def !~ m/NOT NULL/ ) {
|
|
push @null, $col;
|
|
$is_nullable{$col} = 1;
|
|
}
|
|
$is_autoinc{$col} = $def =~ m/AUTO_INCREMENT/i ? 1 : 0;
|
|
}
|
|
|
|
my ($keys, $clustered_key) = $self->get_keys($ddl, $opts, \%is_nullable);
|
|
|
|
my ($charset) = $ddl =~ m/DEFAULT CHARSET=(\w+)/;
|
|
|
|
return {
|
|
name => $name,
|
|
cols => \@cols,
|
|
col_posn => { map { $cols[$_] => $_ } 0..$#cols },
|
|
is_col => { map { $_ => 1 } @cols },
|
|
null_cols => \@null,
|
|
is_nullable => \%is_nullable,
|
|
is_autoinc => \%is_autoinc,
|
|
clustered_key => $clustered_key,
|
|
keys => $keys,
|
|
defs => \%def_for,
|
|
numeric_cols => \@nums,
|
|
is_numeric => \%is_numeric,
|
|
engine => $engine,
|
|
type_for => \%type_for,
|
|
charset => $charset,
|
|
};
|
|
}
|
|
|
|
sub sort_indexes {
|
|
my ( $self, $tbl ) = @_;
|
|
|
|
my @indexes
|
|
= sort {
|
|
(($a ne 'PRIMARY') <=> ($b ne 'PRIMARY'))
|
|
|| ( !$tbl->{keys}->{$a}->{is_unique} <=> !$tbl->{keys}->{$b}->{is_unique} )
|
|
|| ( $tbl->{keys}->{$a}->{is_nullable} <=> $tbl->{keys}->{$b}->{is_nullable} )
|
|
|| ( scalar(@{$tbl->{keys}->{$a}->{cols}}) <=> scalar(@{$tbl->{keys}->{$b}->{cols}}) )
|
|
}
|
|
grep {
|
|
$tbl->{keys}->{$_}->{type} eq 'BTREE'
|
|
}
|
|
sort keys %{$tbl->{keys}};
|
|
|
|
PTDEBUG && _d('Indexes sorted best-first:', join(', ', @indexes));
|
|
return @indexes;
|
|
}
|
|
|
|
sub find_best_index {
|
|
my ( $self, $tbl, $index ) = @_;
|
|
my $best;
|
|
if ( $index ) {
|
|
($best) = grep { uc $_ eq uc $index } keys %{$tbl->{keys}};
|
|
}
|
|
if ( !$best ) {
|
|
if ( $index ) {
|
|
die "Index '$index' does not exist in table";
|
|
}
|
|
else {
|
|
($best) = $self->sort_indexes($tbl);
|
|
}
|
|
}
|
|
PTDEBUG && _d('Best index found is', $best);
|
|
return $best;
|
|
}
|
|
|
|
sub find_possible_keys {
|
|
my ( $self, $dbh, $database, $table, $quoter, $where ) = @_;
|
|
return () unless $where;
|
|
my $sql = 'EXPLAIN SELECT * FROM ' . $quoter->quote($database, $table)
|
|
. ' WHERE ' . $where;
|
|
PTDEBUG && _d($sql);
|
|
my $expl = $dbh->selectrow_hashref($sql);
|
|
$expl = { map { lc($_) => $expl->{$_} } keys %$expl };
|
|
if ( $expl->{possible_keys} ) {
|
|
PTDEBUG && _d('possible_keys =', $expl->{possible_keys});
|
|
my @candidates = split(',', $expl->{possible_keys});
|
|
my %possible = map { $_ => 1 } @candidates;
|
|
if ( $expl->{key} ) {
|
|
PTDEBUG && _d('MySQL chose', $expl->{key});
|
|
unshift @candidates, grep { $possible{$_} } split(',', $expl->{key});
|
|
PTDEBUG && _d('Before deduping:', join(', ', @candidates));
|
|
my %seen;
|
|
@candidates = grep { !$seen{$_}++ } @candidates;
|
|
}
|
|
PTDEBUG && _d('Final list:', join(', ', @candidates));
|
|
return @candidates;
|
|
}
|
|
else {
|
|
PTDEBUG && _d('No keys in possible_keys');
|
|
return ();
|
|
}
|
|
}
|
|
|
|
sub check_table {
|
|
my ( $self, %args ) = @_;
|
|
my @required_args = qw(dbh db tbl);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($dbh, $db, $tbl) = @args{@required_args};
|
|
my $q = $self->{Quoter};
|
|
my $db_tbl = $q->quote($db, $tbl);
|
|
PTDEBUG && _d('Checking', $db_tbl);
|
|
|
|
my $sql = "SHOW TABLES FROM " . $q->quote($db)
|
|
. ' LIKE ' . $q->literal_like($tbl);
|
|
PTDEBUG && _d($sql);
|
|
my $row;
|
|
eval {
|
|
$row = $dbh->selectrow_arrayref($sql);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
PTDEBUG && _d($EVAL_ERROR);
|
|
return 0;
|
|
}
|
|
if ( !$row->[0] || $row->[0] ne $tbl ) {
|
|
PTDEBUG && _d('Table does not exist');
|
|
return 0;
|
|
}
|
|
|
|
PTDEBUG && _d('Table exists; no privs to check');
|
|
return 1 unless $args{all_privs};
|
|
|
|
$sql = "SHOW FULL COLUMNS FROM $db_tbl";
|
|
PTDEBUG && _d($sql);
|
|
eval {
|
|
$row = $dbh->selectrow_hashref($sql);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
PTDEBUG && _d($EVAL_ERROR);
|
|
return 0;
|
|
}
|
|
if ( !scalar keys %$row ) {
|
|
PTDEBUG && _d('Table has no columns:', Dumper($row));
|
|
return 0;
|
|
}
|
|
my $privs = $row->{privileges} || $row->{Privileges};
|
|
|
|
$sql = "DELETE FROM $db_tbl LIMIT 0";
|
|
PTDEBUG && _d($sql);
|
|
eval {
|
|
$dbh->do($sql);
|
|
};
|
|
my $can_delete = $EVAL_ERROR ? 0 : 1;
|
|
|
|
PTDEBUG && _d('User privs on', $db_tbl, ':', $privs,
|
|
($can_delete ? 'delete' : ''));
|
|
|
|
if ( !($privs =~ m/select/ && $privs =~ m/insert/ && $privs =~ m/update/
|
|
&& $can_delete) ) {
|
|
PTDEBUG && _d('User does not have all privs');
|
|
return 0;
|
|
}
|
|
|
|
PTDEBUG && _d('User has all privs');
|
|
return 1;
|
|
}
|
|
|
|
sub get_engine {
|
|
my ( $self, $ddl, $opts ) = @_;
|
|
my ( $engine ) = $ddl =~ m/\).*?(?:ENGINE|TYPE)=(\w+)/;
|
|
PTDEBUG && _d('Storage engine:', $engine);
|
|
return $engine || undef;
|
|
}
|
|
|
|
sub get_keys {
|
|
my ( $self, $ddl, $opts, $is_nullable ) = @_;
|
|
my $engine = $self->get_engine($ddl);
|
|
my $keys = {};
|
|
my $clustered_key = undef;
|
|
|
|
KEY:
|
|
foreach my $key ( $ddl =~ m/^ ((?:[A-Z]+ )?KEY .*)$/gm ) {
|
|
|
|
next KEY if $key =~ m/FOREIGN/;
|
|
|
|
my $key_ddl = $key;
|
|
PTDEBUG && _d('Parsed key:', $key_ddl);
|
|
|
|
if ( $engine !~ m/MEMORY|HEAP/ ) {
|
|
$key =~ s/USING HASH/USING BTREE/;
|
|
}
|
|
|
|
my ( $type, $cols ) = $key =~ m/(?:USING (\w+))? \((.+)\)/;
|
|
my ( $special ) = $key =~ m/(FULLTEXT|SPATIAL)/;
|
|
$type = $type || $special || 'BTREE';
|
|
if ( $opts->{mysql_version} && $opts->{mysql_version} lt '004001000'
|
|
&& $engine =~ m/HEAP|MEMORY/i )
|
|
{
|
|
$type = 'HASH'; # MySQL pre-4.1 supports only HASH indexes on HEAP
|
|
}
|
|
|
|
my ($name) = $key =~ m/(PRIMARY|`[^`]*`)/;
|
|
my $unique = $key =~ m/PRIMARY|UNIQUE/ ? 1 : 0;
|
|
my @cols;
|
|
my @col_prefixes;
|
|
foreach my $col_def ( $cols =~ m/`[^`]+`(?:\(\d+\))?/g ) {
|
|
my ($name, $prefix) = $col_def =~ m/`([^`]+)`(?:\((\d+)\))?/;
|
|
push @cols, $name;
|
|
push @col_prefixes, $prefix;
|
|
}
|
|
$name =~ s/`//g;
|
|
|
|
PTDEBUG && _d( $name, 'key cols:', join(', ', map { "`$_`" } @cols));
|
|
|
|
$keys->{$name} = {
|
|
name => $name,
|
|
type => $type,
|
|
colnames => $cols,
|
|
cols => \@cols,
|
|
col_prefixes => \@col_prefixes,
|
|
is_unique => $unique,
|
|
is_nullable => scalar(grep { $is_nullable->{$_} } @cols),
|
|
is_col => { map { $_ => 1 } @cols },
|
|
ddl => $key_ddl,
|
|
};
|
|
|
|
if ( $engine =~ m/InnoDB/i && !$clustered_key ) {
|
|
my $this_key = $keys->{$name};
|
|
if ( $this_key->{name} eq 'PRIMARY' ) {
|
|
$clustered_key = 'PRIMARY';
|
|
}
|
|
elsif ( $this_key->{is_unique} && !$this_key->{is_nullable} ) {
|
|
$clustered_key = $this_key->{name};
|
|
}
|
|
PTDEBUG && $clustered_key && _d('This key is the clustered key');
|
|
}
|
|
}
|
|
|
|
return $keys, $clustered_key;
|
|
}
|
|
|
|
sub get_fks {
|
|
my ( $self, $ddl, $opts ) = @_;
|
|
my $q = $self->{Quoter};
|
|
my $fks = {};
|
|
|
|
foreach my $fk (
|
|
$ddl =~ m/CONSTRAINT .* FOREIGN KEY .* REFERENCES [^\)]*\)/mg )
|
|
{
|
|
my ( $name ) = $fk =~ m/CONSTRAINT `(.*?)`/;
|
|
my ( $cols ) = $fk =~ m/FOREIGN KEY \(([^\)]+)\)/;
|
|
my ( $parent, $parent_cols ) = $fk =~ m/REFERENCES (\S+) \(([^\)]+)\)/;
|
|
|
|
my ($db, $tbl) = $q->split_unquote($parent, $opts->{database});
|
|
my %parent_tbl = (tbl => $tbl);
|
|
$parent_tbl{db} = $db if $db;
|
|
|
|
if ( $parent !~ m/\./ && $opts->{database} ) {
|
|
$parent = $q->quote($opts->{database}) . ".$parent";
|
|
}
|
|
|
|
$fks->{$name} = {
|
|
name => $name,
|
|
colnames => $cols,
|
|
cols => [ map { s/[ `]+//g; $_; } split(',', $cols) ],
|
|
parent_tbl => \%parent_tbl,
|
|
parent_tblname => $parent,
|
|
parent_cols => [ map { s/[ `]+//g; $_; } split(',', $parent_cols) ],
|
|
parent_colnames=> $parent_cols,
|
|
ddl => $fk,
|
|
};
|
|
}
|
|
|
|
return $fks;
|
|
}
|
|
|
|
sub remove_auto_increment {
|
|
my ( $self, $ddl ) = @_;
|
|
$ddl =~ s/(^\).*?) AUTO_INCREMENT=\d+\b/$1/m;
|
|
return $ddl;
|
|
}
|
|
|
|
sub get_table_status {
|
|
my ( $self, $dbh, $db, $like ) = @_;
|
|
my $q = $self->{Quoter};
|
|
my $sql = "SHOW TABLE STATUS FROM " . $q->quote($db);
|
|
my @params;
|
|
if ( $like ) {
|
|
$sql .= ' LIKE ?';
|
|
push @params, $like;
|
|
}
|
|
PTDEBUG && _d($sql, @params);
|
|
my $sth = $dbh->prepare($sql);
|
|
eval { $sth->execute(@params); };
|
|
if ($EVAL_ERROR) {
|
|
PTDEBUG && _d($EVAL_ERROR);
|
|
return;
|
|
}
|
|
my @tables = @{$sth->fetchall_arrayref({})};
|
|
@tables = map {
|
|
my %tbl; # Make a copy with lowercased keys
|
|
@tbl{ map { lc $_ } keys %$_ } = values %$_;
|
|
$tbl{engine} ||= $tbl{type} || $tbl{comment};
|
|
delete $tbl{type};
|
|
\%tbl;
|
|
} @tables;
|
|
return @tables;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End TableParser package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# Progress package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/Progress.pm
|
|
# t/lib/Progress.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package Progress;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
foreach my $arg (qw(jobsize)) {
|
|
die "I need a $arg argument" unless defined $args{$arg};
|
|
}
|
|
if ( (!$args{report} || !$args{interval}) ) {
|
|
if ( $args{spec} && @{$args{spec}} == 2 ) {
|
|
@args{qw(report interval)} = @{$args{spec}};
|
|
}
|
|
else {
|
|
die "I need either report and interval arguments, or a spec";
|
|
}
|
|
}
|
|
|
|
my $name = $args{name} || "Progress";
|
|
$args{start} ||= time();
|
|
my $self;
|
|
$self = {
|
|
last_reported => $args{start},
|
|
fraction => 0, # How complete the job is
|
|
callback => sub {
|
|
my ($fraction, $elapsed, $remaining, $eta) = @_;
|
|
printf STDERR "$name: %3d%% %s remain\n",
|
|
$fraction * 100,
|
|
Transformers::secs_to_time($remaining),
|
|
Transformers::ts($eta);
|
|
},
|
|
%args,
|
|
};
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub validate_spec {
|
|
shift @_ if $_[0] eq 'Progress'; # Permit calling as Progress-> or Progress::
|
|
my ( $spec ) = @_;
|
|
if ( @$spec != 2 ) {
|
|
die "spec array requires a two-part argument\n";
|
|
}
|
|
if ( $spec->[0] !~ m/^(?:percentage|time|iterations)$/ ) {
|
|
die "spec array's first element must be one of "
|
|
. "percentage,time,iterations\n";
|
|
}
|
|
if ( $spec->[1] !~ m/^\d+$/ ) {
|
|
die "spec array's second element must be an integer\n";
|
|
}
|
|
}
|
|
|
|
sub set_callback {
|
|
my ( $self, $callback ) = @_;
|
|
$self->{callback} = $callback;
|
|
}
|
|
|
|
sub start {
|
|
my ( $self, $start ) = @_;
|
|
$self->{start} = $self->{last_reported} = $start || time();
|
|
$self->{first_report} = 0;
|
|
}
|
|
|
|
sub update {
|
|
my ( $self, $callback, %args ) = @_;
|
|
my $jobsize = $self->{jobsize};
|
|
my $now ||= $args{now} || time;
|
|
|
|
$self->{iterations}++; # How many updates have happened;
|
|
|
|
if ( !$self->{first_report} && $args{first_report} ) {
|
|
$args{first_report}->();
|
|
$self->{first_report} = 1;
|
|
}
|
|
|
|
if ( $self->{report} eq 'time'
|
|
&& $self->{interval} > $now - $self->{last_reported}
|
|
) {
|
|
return;
|
|
}
|
|
elsif ( $self->{report} eq 'iterations'
|
|
&& ($self->{iterations} - 1) % $self->{interval} > 0
|
|
) {
|
|
return;
|
|
}
|
|
$self->{last_reported} = $now;
|
|
|
|
my $completed = $callback->();
|
|
$self->{updates}++; # How many times we have run the update callback
|
|
|
|
return if $completed > $jobsize;
|
|
|
|
my $fraction = $completed > 0 ? $completed / $jobsize : 0;
|
|
|
|
if ( $self->{report} eq 'percentage'
|
|
&& $self->fraction_modulo($self->{fraction})
|
|
>= $self->fraction_modulo($fraction)
|
|
) {
|
|
$self->{fraction} = $fraction;
|
|
return;
|
|
}
|
|
$self->{fraction} = $fraction;
|
|
|
|
my $elapsed = $now - $self->{start};
|
|
my $remaining = 0;
|
|
my $eta = $now;
|
|
if ( $completed > 0 && $completed <= $jobsize && $elapsed > 0 ) {
|
|
my $rate = $completed / $elapsed;
|
|
if ( $rate > 0 ) {
|
|
$remaining = ($jobsize - $completed) / $rate;
|
|
$eta = $now + int($remaining);
|
|
}
|
|
}
|
|
$self->{callback}->($fraction, $elapsed, $remaining, $eta, $completed);
|
|
}
|
|
|
|
sub fraction_modulo {
|
|
my ( $self, $num ) = @_;
|
|
$num *= 100; # Convert from fraction to percentage
|
|
return sprintf('%d',
|
|
sprintf('%d', $num / $self->{interval}) * $self->{interval});
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End Progress package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# Retry package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/Retry.pm
|
|
# t/lib/Retry.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package Retry;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
my $self = {
|
|
%args,
|
|
};
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub retry {
|
|
my ( $self, %args ) = @_;
|
|
my @required_args = qw(try fail final_fail);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
};
|
|
my ($try, $fail, $final_fail) = @args{@required_args};
|
|
my $wait = $args{wait} || sub { sleep 1; };
|
|
my $tries = $args{tries} || 3;
|
|
|
|
my $last_error;
|
|
my $tryno = 0;
|
|
TRY:
|
|
while ( ++$tryno <= $tries ) {
|
|
PTDEBUG && _d("Try", $tryno, "of", $tries);
|
|
my $result;
|
|
eval {
|
|
$result = $try->(tryno=>$tryno);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
PTDEBUG && _d("Try code failed:", $EVAL_ERROR);
|
|
$last_error = $EVAL_ERROR;
|
|
|
|
if ( $tryno < $tries ) { # more retries
|
|
my $retry = $fail->(tryno=>$tryno, error=>$last_error);
|
|
last TRY unless $retry;
|
|
PTDEBUG && _d("Calling wait code");
|
|
$wait->(tryno=>$tryno);
|
|
}
|
|
}
|
|
else {
|
|
PTDEBUG && _d("Try code succeeded");
|
|
return $result;
|
|
}
|
|
}
|
|
|
|
PTDEBUG && _d('Try code did not succeed');
|
|
return $final_fail->(error=>$last_error);
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End Retry package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# Cxn package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/Cxn.pm
|
|
# t/lib/Cxn.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package Cxn;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
use constant PERCONA_TOOLKIT_TEST_USE_DSN_NAMES => $ENV{PERCONA_TOOLKIT_TEST_USE_DSN_NAMES} || 0;
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
my @required_args = qw(DSNParser OptionParser);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
};
|
|
my ($dp, $o) = @args{@required_args};
|
|
|
|
my $dsn_defaults = $dp->parse_options($o);
|
|
my $prev_dsn = $args{prev_dsn};
|
|
my $dsn = $args{dsn};
|
|
if ( !$dsn ) {
|
|
$args{dsn_string} ||= 'h=' . ($dsn_defaults->{h} || 'localhost');
|
|
|
|
$dsn = $dp->parse(
|
|
$args{dsn_string}, $prev_dsn, $dsn_defaults);
|
|
}
|
|
elsif ( $prev_dsn ) {
|
|
$dsn = $dp->copy($prev_dsn, $dsn);
|
|
}
|
|
|
|
my $self = {
|
|
dsn => $dsn,
|
|
dbh => $args{dbh},
|
|
dsn_name => $dp->as_string($dsn, [qw(h P S)]),
|
|
hostname => '',
|
|
set => $args{set},
|
|
dbh_set => 0,
|
|
OptionParser => $o,
|
|
DSNParser => $dp,
|
|
};
|
|
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub connect {
|
|
my ( $self ) = @_;
|
|
my $dsn = $self->{dsn};
|
|
my $dp = $self->{DSNParser};
|
|
my $o = $self->{OptionParser};
|
|
|
|
my $dbh = $self->{dbh};
|
|
if ( !$dbh || !$dbh->ping() ) {
|
|
if ( $o->get('ask-pass') && !$self->{asked_for_pass} ) {
|
|
$dsn->{p} = OptionParser::prompt_noecho("Enter MySQL password: ");
|
|
$self->{asked_for_pass} = 1;
|
|
}
|
|
$dbh = $dp->get_dbh($dp->get_cxn_params($dsn), { AutoCommit => 1 });
|
|
}
|
|
PTDEBUG && _d($dbh, 'Connected dbh to', $self->{name});
|
|
|
|
return $self->set_dbh($dbh);
|
|
}
|
|
|
|
sub set_dbh {
|
|
my ($self, $dbh) = @_;
|
|
|
|
if ( $self->{dbh} && $self->{dbh} == $dbh && $self->{dbh_set} ) {
|
|
PTDEBUG && _d($dbh, 'Already set dbh');
|
|
return $dbh;
|
|
}
|
|
|
|
PTDEBUG && _d($dbh, 'Setting dbh');
|
|
|
|
$dbh->{FetchHashKeyName} = 'NAME_lc';
|
|
|
|
my $sql = 'SELECT @@hostname, @@server_id';
|
|
PTDEBUG && _d($dbh, $sql);
|
|
my ($hostname, $server_id) = $dbh->selectrow_array($sql);
|
|
PTDEBUG && _d($dbh, 'hostname:', $hostname, $server_id);
|
|
if ( $hostname ) {
|
|
$self->{hostname} = $hostname;
|
|
}
|
|
|
|
if ( my $set = $self->{set}) {
|
|
$set->($dbh);
|
|
}
|
|
|
|
$self->{dbh} = $dbh;
|
|
$self->{dbh_set} = 1;
|
|
return $dbh;
|
|
}
|
|
|
|
sub dbh {
|
|
my ($self) = @_;
|
|
return $self->{dbh};
|
|
}
|
|
|
|
sub dsn {
|
|
my ($self) = @_;
|
|
return $self->{dsn};
|
|
}
|
|
|
|
sub name {
|
|
my ($self) = @_;
|
|
return $self->{dsn_name} if PERCONA_TOOLKIT_TEST_USE_DSN_NAMES;
|
|
return $self->{hostname} || $self->{dsn_name} || 'unknown host';
|
|
}
|
|
|
|
sub DESTROY {
|
|
my ($self) = @_;
|
|
if ( $self->{dbh} && ref($self->{dbh}) ) {
|
|
PTDEBUG && _d('Disconnecting dbh', $self->{dbh}, $self->{name});
|
|
$self->{dbh}->disconnect();
|
|
}
|
|
return;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End Cxn package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# MasterSlave package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/MasterSlave.pm
|
|
# t/lib/MasterSlave.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package MasterSlave;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
my $self = {
|
|
%args,
|
|
replication_thread => {},
|
|
};
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub get_slaves {
|
|
my ($self, %args) = @_;
|
|
my @required_args = qw(make_cxn OptionParser DSNParser Quoter);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($make_cxn, $o, $dp) = @args{@required_args};
|
|
|
|
my $slaves = [];
|
|
my $method = $o->get('recursion-method');
|
|
PTDEBUG && _d('Slave recursion method:', $method);
|
|
if ( !$method || $method =~ m/processlist|hosts/i ) {
|
|
my @required_args = qw(dbh dsn);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($dbh, $dsn) = @args{@required_args};
|
|
$self->recurse_to_slaves(
|
|
{ dbh => $dbh,
|
|
dsn => $dsn,
|
|
dsn_parser => $dp,
|
|
recurse => $o->get('recurse'),
|
|
method => $o->get('recursion-method'),
|
|
callback => sub {
|
|
my ( $dsn, $dbh, $level, $parent ) = @_;
|
|
return unless $level;
|
|
PTDEBUG && _d('Found slave:', $dp->as_string($dsn));
|
|
push @$slaves, $make_cxn->(dsn => $dsn, dbh => $dbh);
|
|
return;
|
|
},
|
|
}
|
|
);
|
|
}
|
|
elsif ( $method =~ m/^dsn=/i ) {
|
|
my ($dsn_table_dsn) = $method =~ m/^dsn=(.+)/i;
|
|
$slaves = $self->get_cxn_from_dsn_table(
|
|
%args,
|
|
dsn_table_dsn => $dsn_table_dsn,
|
|
);
|
|
}
|
|
elsif ( $method =~ m/none/i ) {
|
|
PTDEBUG && _d('Not getting to slaves');
|
|
}
|
|
else {
|
|
die "Invalid --recursion-method: $method. Valid values are: "
|
|
. "dsn=DSN, hosts, or processlist.\n";
|
|
}
|
|
|
|
return $slaves;
|
|
}
|
|
|
|
sub recurse_to_slaves {
|
|
my ( $self, $args, $level ) = @_;
|
|
$level ||= 0;
|
|
my $dp = $args->{dsn_parser};
|
|
my $dsn = $args->{dsn};
|
|
|
|
if ( lc($args->{method} || '') eq 'none' ) {
|
|
PTDEBUG && _d('Not recursing to slaves');
|
|
return;
|
|
}
|
|
|
|
my $dbh;
|
|
eval {
|
|
$dbh = $args->{dbh} || $dp->get_dbh(
|
|
$dp->get_cxn_params($dsn), { AutoCommit => 1 });
|
|
PTDEBUG && _d('Connected to', $dp->as_string($dsn));
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
print STDERR "Cannot connect to ", $dp->as_string($dsn), "\n"
|
|
or die "Cannot print: $OS_ERROR";
|
|
return;
|
|
}
|
|
|
|
my $sql = 'SELECT @@SERVER_ID';
|
|
PTDEBUG && _d($sql);
|
|
my ($id) = $dbh->selectrow_array($sql);
|
|
PTDEBUG && _d('Working on server ID', $id);
|
|
my $master_thinks_i_am = $dsn->{server_id};
|
|
if ( !defined $id
|
|
|| ( defined $master_thinks_i_am && $master_thinks_i_am != $id )
|
|
|| $args->{server_ids_seen}->{$id}++
|
|
) {
|
|
PTDEBUG && _d('Server ID seen, or not what master said');
|
|
if ( $args->{skip_callback} ) {
|
|
$args->{skip_callback}->($dsn, $dbh, $level, $args->{parent});
|
|
}
|
|
return;
|
|
}
|
|
|
|
$args->{callback}->($dsn, $dbh, $level, $args->{parent});
|
|
|
|
if ( !defined $args->{recurse} || $level < $args->{recurse} ) {
|
|
|
|
my @slaves =
|
|
grep { !$_->{master_id} || $_->{master_id} == $id } # Only my slaves.
|
|
$self->find_slave_hosts($dp, $dbh, $dsn, $args->{method});
|
|
|
|
foreach my $slave ( @slaves ) {
|
|
PTDEBUG && _d('Recursing from',
|
|
$dp->as_string($dsn), 'to', $dp->as_string($slave));
|
|
$self->recurse_to_slaves(
|
|
{ %$args, dsn => $slave, dbh => undef, parent => $dsn }, $level + 1 );
|
|
}
|
|
}
|
|
}
|
|
|
|
sub find_slave_hosts {
|
|
my ( $self, $dsn_parser, $dbh, $dsn, $method ) = @_;
|
|
|
|
my @methods = qw(processlist hosts);
|
|
if ( $method ) {
|
|
@methods = grep { $_ ne $method } @methods;
|
|
unshift @methods, $method;
|
|
}
|
|
else {
|
|
if ( ($dsn->{P} || 3306) != 3306 ) {
|
|
PTDEBUG && _d('Port number is non-standard; using only hosts method');
|
|
@methods = qw(hosts);
|
|
}
|
|
}
|
|
PTDEBUG && _d('Looking for slaves on', $dsn_parser->as_string($dsn),
|
|
'using methods', @methods);
|
|
|
|
my @slaves;
|
|
METHOD:
|
|
foreach my $method ( @methods ) {
|
|
my $find_slaves = "_find_slaves_by_$method";
|
|
PTDEBUG && _d('Finding slaves with', $find_slaves);
|
|
@slaves = $self->$find_slaves($dsn_parser, $dbh, $dsn);
|
|
last METHOD if @slaves;
|
|
}
|
|
|
|
PTDEBUG && _d('Found', scalar(@slaves), 'slaves');
|
|
return @slaves;
|
|
}
|
|
|
|
sub _find_slaves_by_processlist {
|
|
my ( $self, $dsn_parser, $dbh, $dsn ) = @_;
|
|
|
|
my @slaves = map {
|
|
my $slave = $dsn_parser->parse("h=$_", $dsn);
|
|
$slave->{source} = 'processlist';
|
|
$slave;
|
|
}
|
|
grep { $_ }
|
|
map {
|
|
my ( $host ) = $_->{host} =~ m/^([^:]+):/;
|
|
if ( $host eq 'localhost' ) {
|
|
$host = '127.0.0.1'; # Replication never uses sockets.
|
|
}
|
|
$host;
|
|
} $self->get_connected_slaves($dbh);
|
|
|
|
return @slaves;
|
|
}
|
|
|
|
sub _find_slaves_by_hosts {
|
|
my ( $self, $dsn_parser, $dbh, $dsn ) = @_;
|
|
|
|
my @slaves;
|
|
my $sql = 'SHOW SLAVE HOSTS';
|
|
PTDEBUG && _d($dbh, $sql);
|
|
@slaves = @{$dbh->selectall_arrayref($sql, { Slice => {} })};
|
|
|
|
if ( @slaves ) {
|
|
PTDEBUG && _d('Found some SHOW SLAVE HOSTS info');
|
|
@slaves = map {
|
|
my %hash;
|
|
@hash{ map { lc $_ } keys %$_ } = values %$_;
|
|
my $spec = "h=$hash{host},P=$hash{port}"
|
|
. ( $hash{user} ? ",u=$hash{user}" : '')
|
|
. ( $hash{password} ? ",p=$hash{password}" : '');
|
|
my $dsn = $dsn_parser->parse($spec, $dsn);
|
|
$dsn->{server_id} = $hash{server_id};
|
|
$dsn->{master_id} = $hash{master_id};
|
|
$dsn->{source} = 'hosts';
|
|
$dsn;
|
|
} @slaves;
|
|
}
|
|
|
|
return @slaves;
|
|
}
|
|
|
|
sub get_connected_slaves {
|
|
my ( $self, $dbh ) = @_;
|
|
|
|
my $show = "SHOW GRANTS FOR ";
|
|
my $user = 'CURRENT_USER()';
|
|
my $vp = $self->{VersionParser};
|
|
if ( $vp && !$vp->version_ge($dbh, '4.1.2') ) {
|
|
$user = $dbh->selectrow_arrayref('SELECT USER()')->[0];
|
|
$user =~ s/([^@]+)@(.+)/'$1'\@'$2'/;
|
|
}
|
|
my $sql = $show . $user;
|
|
PTDEBUG && _d($dbh, $sql);
|
|
|
|
my $proc;
|
|
eval {
|
|
$proc = grep {
|
|
m/ALL PRIVILEGES.*?\*\.\*|PROCESS/
|
|
} @{$dbh->selectcol_arrayref($sql)};
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
|
|
if ( $EVAL_ERROR =~ m/no such grant defined for user/ ) {
|
|
PTDEBUG && _d('Retrying SHOW GRANTS without host; error:',
|
|
$EVAL_ERROR);
|
|
($user) = split('@', $user);
|
|
$sql = $show . $user;
|
|
PTDEBUG && _d($sql);
|
|
eval {
|
|
$proc = grep {
|
|
m/ALL PRIVILEGES.*?\*\.\*|PROCESS/
|
|
} @{$dbh->selectcol_arrayref($sql)};
|
|
};
|
|
}
|
|
|
|
die "Failed to $sql: $EVAL_ERROR" if $EVAL_ERROR;
|
|
}
|
|
if ( !$proc ) {
|
|
die "You do not have the PROCESS privilege";
|
|
}
|
|
|
|
$sql = 'SHOW PROCESSLIST';
|
|
PTDEBUG && _d($dbh, $sql);
|
|
grep { $_->{command} =~ m/Binlog Dump/i }
|
|
map { # Lowercase the column names
|
|
my %hash;
|
|
@hash{ map { lc $_ } keys %$_ } = values %$_;
|
|
\%hash;
|
|
}
|
|
@{$dbh->selectall_arrayref($sql, { Slice => {} })};
|
|
}
|
|
|
|
sub is_master_of {
|
|
my ( $self, $master, $slave ) = @_;
|
|
my $master_status = $self->get_master_status($master)
|
|
or die "The server specified as a master is not a master";
|
|
my $slave_status = $self->get_slave_status($slave)
|
|
or die "The server specified as a slave is not a slave";
|
|
my @connected = $self->get_connected_slaves($master)
|
|
or die "The server specified as a master has no connected slaves";
|
|
my (undef, $port) = $master->selectrow_array('SHOW VARIABLES LIKE "port"');
|
|
|
|
if ( $port != $slave_status->{master_port} ) {
|
|
die "The slave is connected to $slave_status->{master_port} "
|
|
. "but the master's port is $port";
|
|
}
|
|
|
|
if ( !grep { $slave_status->{master_user} eq $_->{user} } @connected ) {
|
|
die "I don't see any slave I/O thread connected with user "
|
|
. $slave_status->{master_user};
|
|
}
|
|
|
|
if ( ($slave_status->{slave_io_state} || '')
|
|
eq 'Waiting for master to send event' )
|
|
{
|
|
my ( $master_log_name, $master_log_num )
|
|
= $master_status->{file} =~ m/^(.*?)\.0*([1-9][0-9]*)$/;
|
|
my ( $slave_log_name, $slave_log_num )
|
|
= $slave_status->{master_log_file} =~ m/^(.*?)\.0*([1-9][0-9]*)$/;
|
|
if ( $master_log_name ne $slave_log_name
|
|
|| abs($master_log_num - $slave_log_num) > 1 )
|
|
{
|
|
die "The slave thinks it is reading from "
|
|
. "$slave_status->{master_log_file}, but the "
|
|
. "master is writing to $master_status->{file}";
|
|
}
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
sub get_master_dsn {
|
|
my ( $self, $dbh, $dsn, $dsn_parser ) = @_;
|
|
my $master = $self->get_slave_status($dbh) or return undef;
|
|
my $spec = "h=$master->{master_host},P=$master->{master_port}";
|
|
return $dsn_parser->parse($spec, $dsn);
|
|
}
|
|
|
|
sub get_slave_status {
|
|
my ( $self, $dbh ) = @_;
|
|
if ( !$self->{not_a_slave}->{$dbh} ) {
|
|
my $sth = $self->{sths}->{$dbh}->{SLAVE_STATUS}
|
|
||= $dbh->prepare('SHOW SLAVE STATUS');
|
|
PTDEBUG && _d($dbh, 'SHOW SLAVE STATUS');
|
|
$sth->execute();
|
|
my ($ss) = @{$sth->fetchall_arrayref({})};
|
|
|
|
if ( $ss && %$ss ) {
|
|
$ss = { map { lc($_) => $ss->{$_} } keys %$ss }; # lowercase the keys
|
|
return $ss;
|
|
}
|
|
|
|
PTDEBUG && _d('This server returns nothing for SHOW SLAVE STATUS');
|
|
$self->{not_a_slave}->{$dbh}++;
|
|
}
|
|
}
|
|
|
|
sub get_master_status {
|
|
my ( $self, $dbh ) = @_;
|
|
|
|
if ( $self->{not_a_master}->{$dbh} ) {
|
|
PTDEBUG && _d('Server on dbh', $dbh, 'is not a master');
|
|
return;
|
|
}
|
|
|
|
my $sth = $self->{sths}->{$dbh}->{MASTER_STATUS}
|
|
||= $dbh->prepare('SHOW MASTER STATUS');
|
|
PTDEBUG && _d($dbh, 'SHOW MASTER STATUS');
|
|
$sth->execute();
|
|
my ($ms) = @{$sth->fetchall_arrayref({})};
|
|
PTDEBUG && _d(
|
|
$ms ? map { "$_=" . (defined $ms->{$_} ? $ms->{$_} : '') } keys %$ms
|
|
: '');
|
|
|
|
if ( !$ms || scalar keys %$ms < 2 ) {
|
|
PTDEBUG && _d('Server on dbh', $dbh, 'does not seem to be a master');
|
|
$self->{not_a_master}->{$dbh}++;
|
|
}
|
|
|
|
return { map { lc($_) => $ms->{$_} } keys %$ms }; # lowercase the keys
|
|
}
|
|
|
|
sub wait_for_master {
|
|
my ( $self, %args ) = @_;
|
|
my @required_args = qw(master_status slave_dbh);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($master_status, $slave_dbh) = @args{@required_args};
|
|
my $timeout = $args{timeout} || 60;
|
|
|
|
my $result;
|
|
my $waited;
|
|
if ( $master_status ) {
|
|
my $sql = "SELECT MASTER_POS_WAIT('$master_status->{file}', "
|
|
. "$master_status->{position}, $timeout)";
|
|
PTDEBUG && _d($slave_dbh, $sql);
|
|
my $start = time;
|
|
($result) = $slave_dbh->selectrow_array($sql);
|
|
|
|
$waited = time - $start;
|
|
|
|
PTDEBUG && _d('Result of waiting:', $result);
|
|
PTDEBUG && _d("Waited", $waited, "seconds");
|
|
}
|
|
else {
|
|
PTDEBUG && _d('Not waiting: this server is not a master');
|
|
}
|
|
|
|
return {
|
|
result => $result,
|
|
waited => $waited,
|
|
};
|
|
}
|
|
|
|
sub stop_slave {
|
|
my ( $self, $dbh ) = @_;
|
|
my $sth = $self->{sths}->{$dbh}->{STOP_SLAVE}
|
|
||= $dbh->prepare('STOP SLAVE');
|
|
PTDEBUG && _d($dbh, $sth->{Statement});
|
|
$sth->execute();
|
|
}
|
|
|
|
sub start_slave {
|
|
my ( $self, $dbh, $pos ) = @_;
|
|
if ( $pos ) {
|
|
my $sql = "START SLAVE UNTIL MASTER_LOG_FILE='$pos->{file}', "
|
|
. "MASTER_LOG_POS=$pos->{position}";
|
|
PTDEBUG && _d($dbh, $sql);
|
|
$dbh->do($sql);
|
|
}
|
|
else {
|
|
my $sth = $self->{sths}->{$dbh}->{START_SLAVE}
|
|
||= $dbh->prepare('START SLAVE');
|
|
PTDEBUG && _d($dbh, $sth->{Statement});
|
|
$sth->execute();
|
|
}
|
|
}
|
|
|
|
sub catchup_to_master {
|
|
my ( $self, $slave, $master, $timeout ) = @_;
|
|
$self->stop_slave($master);
|
|
$self->stop_slave($slave);
|
|
my $slave_status = $self->get_slave_status($slave);
|
|
my $slave_pos = $self->repl_posn($slave_status);
|
|
my $master_status = $self->get_master_status($master);
|
|
my $master_pos = $self->repl_posn($master_status);
|
|
PTDEBUG && _d('Master position:', $self->pos_to_string($master_pos),
|
|
'Slave position:', $self->pos_to_string($slave_pos));
|
|
|
|
my $result;
|
|
if ( $self->pos_cmp($slave_pos, $master_pos) < 0 ) {
|
|
PTDEBUG && _d('Waiting for slave to catch up to master');
|
|
$self->start_slave($slave, $master_pos);
|
|
|
|
$result = $self->wait_for_master(
|
|
master_status => $master_status,
|
|
slave_dbh => $slave,
|
|
timeout => $timeout,
|
|
master_status => $master_status
|
|
);
|
|
if ( !defined $result->{result} ) {
|
|
$slave_status = $self->get_slave_status($slave);
|
|
if ( !$self->slave_is_running($slave_status) ) {
|
|
PTDEBUG && _d('Master position:',
|
|
$self->pos_to_string($master_pos),
|
|
'Slave position:', $self->pos_to_string($slave_pos));
|
|
$slave_pos = $self->repl_posn($slave_status);
|
|
if ( $self->pos_cmp($slave_pos, $master_pos) != 0 ) {
|
|
die "MASTER_POS_WAIT() returned NULL but slave has not "
|
|
. "caught up to master";
|
|
}
|
|
PTDEBUG && _d('Slave is caught up to master and stopped');
|
|
}
|
|
else {
|
|
die "Slave has not caught up to master and it is still running";
|
|
}
|
|
}
|
|
}
|
|
else {
|
|
PTDEBUG && _d("Slave is already caught up to master");
|
|
}
|
|
|
|
return $result;
|
|
}
|
|
|
|
sub catchup_to_same_pos {
|
|
my ( $self, $s1_dbh, $s2_dbh ) = @_;
|
|
$self->stop_slave($s1_dbh);
|
|
$self->stop_slave($s2_dbh);
|
|
my $s1_status = $self->get_slave_status($s1_dbh);
|
|
my $s2_status = $self->get_slave_status($s2_dbh);
|
|
my $s1_pos = $self->repl_posn($s1_status);
|
|
my $s2_pos = $self->repl_posn($s2_status);
|
|
if ( $self->pos_cmp($s1_pos, $s2_pos) < 0 ) {
|
|
$self->start_slave($s1_dbh, $s2_pos);
|
|
}
|
|
elsif ( $self->pos_cmp($s2_pos, $s1_pos) < 0 ) {
|
|
$self->start_slave($s2_dbh, $s1_pos);
|
|
}
|
|
|
|
$s1_status = $self->get_slave_status($s1_dbh);
|
|
$s2_status = $self->get_slave_status($s2_dbh);
|
|
$s1_pos = $self->repl_posn($s1_status);
|
|
$s2_pos = $self->repl_posn($s2_status);
|
|
|
|
if ( $self->slave_is_running($s1_status)
|
|
|| $self->slave_is_running($s2_status)
|
|
|| $self->pos_cmp($s1_pos, $s2_pos) != 0)
|
|
{
|
|
die "The servers aren't both stopped at the same position";
|
|
}
|
|
|
|
}
|
|
|
|
sub slave_is_running {
|
|
my ( $self, $slave_status ) = @_;
|
|
return ($slave_status->{slave_sql_running} || 'No') eq 'Yes';
|
|
}
|
|
|
|
sub has_slave_updates {
|
|
my ( $self, $dbh ) = @_;
|
|
my $sql = q{SHOW VARIABLES LIKE 'log_slave_updates'};
|
|
PTDEBUG && _d($dbh, $sql);
|
|
my ($name, $value) = $dbh->selectrow_array($sql);
|
|
return $value && $value =~ m/^(1|ON)$/;
|
|
}
|
|
|
|
sub repl_posn {
|
|
my ( $self, $status ) = @_;
|
|
if ( exists $status->{file} && exists $status->{position} ) {
|
|
return {
|
|
file => $status->{file},
|
|
position => $status->{position},
|
|
};
|
|
}
|
|
else {
|
|
return {
|
|
file => $status->{relay_master_log_file},
|
|
position => $status->{exec_master_log_pos},
|
|
};
|
|
}
|
|
}
|
|
|
|
sub get_slave_lag {
|
|
my ( $self, $dbh ) = @_;
|
|
my $stat = $self->get_slave_status($dbh);
|
|
return unless $stat; # server is not a slave
|
|
return $stat->{seconds_behind_master};
|
|
}
|
|
|
|
sub pos_cmp {
|
|
my ( $self, $a, $b ) = @_;
|
|
return $self->pos_to_string($a) cmp $self->pos_to_string($b);
|
|
}
|
|
|
|
sub short_host {
|
|
my ( $self, $dsn ) = @_;
|
|
my ($host, $port);
|
|
if ( $dsn->{master_host} ) {
|
|
$host = $dsn->{master_host};
|
|
$port = $dsn->{master_port};
|
|
}
|
|
else {
|
|
$host = $dsn->{h};
|
|
$port = $dsn->{P};
|
|
}
|
|
return ($host || '[default]') . ( ($port || 3306) == 3306 ? '' : ":$port" );
|
|
}
|
|
|
|
sub is_replication_thread {
|
|
my ( $self, $query, %args ) = @_;
|
|
return unless $query;
|
|
|
|
my $type = lc($args{type} || 'all');
|
|
die "Invalid type: $type"
|
|
unless $type =~ m/^binlog_dump|slave_io|slave_sql|all$/i;
|
|
|
|
my $match = 0;
|
|
if ( $type =~ m/binlog_dump|all/i ) {
|
|
$match = 1
|
|
if ($query->{Command} || $query->{command} || '') eq "Binlog Dump";
|
|
}
|
|
if ( !$match ) {
|
|
if ( ($query->{User} || $query->{user} || '') eq "system user" ) {
|
|
PTDEBUG && _d("Slave replication thread");
|
|
if ( $type ne 'all' ) {
|
|
my $state = $query->{State} || $query->{state} || '';
|
|
|
|
if ( $state =~ m/^init|end$/ ) {
|
|
PTDEBUG && _d("Special state:", $state);
|
|
$match = 1;
|
|
}
|
|
else {
|
|
my ($slave_sql) = $state =~ m/
|
|
^(Waiting\sfor\sthe\snext\sevent
|
|
|Reading\sevent\sfrom\sthe\srelay\slog
|
|
|Has\sread\sall\srelay\slog;\swaiting
|
|
|Making\stemp\sfile
|
|
|Waiting\sfor\sslave\smutex\son\sexit)/xi;
|
|
|
|
$match = $type eq 'slave_sql' && $slave_sql ? 1
|
|
: $type eq 'slave_io' && !$slave_sql ? 1
|
|
: 0;
|
|
}
|
|
}
|
|
else {
|
|
$match = 1;
|
|
}
|
|
}
|
|
else {
|
|
PTDEBUG && _d('Not system user');
|
|
}
|
|
|
|
if ( !defined $args{check_known_ids} || $args{check_known_ids} ) {
|
|
my $id = $query->{Id} || $query->{id};
|
|
if ( $match ) {
|
|
$self->{replication_thread}->{$id} = 1;
|
|
}
|
|
else {
|
|
if ( $self->{replication_thread}->{$id} ) {
|
|
PTDEBUG && _d("Thread ID is a known replication thread ID");
|
|
$match = 1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
PTDEBUG && _d('Matches', $type, 'replication thread:',
|
|
($match ? 'yes' : 'no'), '; match:', $match);
|
|
|
|
return $match;
|
|
}
|
|
|
|
|
|
sub get_replication_filters {
|
|
my ( $self, %args ) = @_;
|
|
my @required_args = qw(dbh);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($dbh) = @args{@required_args};
|
|
|
|
my %filters = ();
|
|
|
|
my $status = $self->get_master_status($dbh);
|
|
if ( $status ) {
|
|
map { $filters{$_} = $status->{$_} }
|
|
grep { defined $status->{$_} && $status->{$_} ne '' }
|
|
qw(
|
|
binlog_do_db
|
|
binlog_ignore_db
|
|
);
|
|
}
|
|
|
|
$status = $self->get_slave_status($dbh);
|
|
if ( $status ) {
|
|
map { $filters{$_} = $status->{$_} }
|
|
grep { defined $status->{$_} && $status->{$_} ne '' }
|
|
qw(
|
|
replicate_do_db
|
|
replicate_ignore_db
|
|
replicate_do_table
|
|
replicate_ignore_table
|
|
replicate_wild_do_table
|
|
replicate_wild_ignore_table
|
|
);
|
|
|
|
my $sql = "SHOW VARIABLES LIKE 'slave_skip_errors'";
|
|
PTDEBUG && _d($dbh, $sql);
|
|
my $row = $dbh->selectrow_arrayref($sql);
|
|
$filters{slave_skip_errors} = $row->[1] if $row->[1] && $row->[1] ne 'OFF';
|
|
}
|
|
|
|
return \%filters;
|
|
}
|
|
|
|
|
|
sub pos_to_string {
|
|
my ( $self, $pos ) = @_;
|
|
my $fmt = '%s/%020d';
|
|
return sprintf($fmt, @{$pos}{qw(file position)});
|
|
}
|
|
|
|
sub reset_known_replication_threads {
|
|
my ( $self ) = @_;
|
|
$self->{replication_thread} = {};
|
|
return;
|
|
}
|
|
|
|
sub get_cxn_from_dsn_table {
|
|
my ($self, %args) = @_;
|
|
my @required_args = qw(dsn_table_dsn make_cxn DSNParser Quoter);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($dsn_table_dsn, $make_cxn, $dp, $q) = @args{@required_args};
|
|
PTDEBUG && _d('DSN table DSN:', $dsn_table_dsn);
|
|
|
|
my $dsn = $dp->parse($dsn_table_dsn);
|
|
my $dsn_table;
|
|
if ( $dsn->{D} && $dsn->{t} ) {
|
|
$dsn_table = $q->quote($dsn->{D}, $dsn->{t});
|
|
}
|
|
elsif ( $dsn->{t} && $dsn->{t} =~ m/\./ ) {
|
|
$dsn_table = $q->quote($q->split_unquote($dsn->{t}));
|
|
}
|
|
else {
|
|
die "DSN table DSN does not specify a database (D) "
|
|
. "or a database-qualified table (t)";
|
|
}
|
|
|
|
my $dsn_tbl_cxn = $make_cxn->(dsn => $dsn);
|
|
my $dbh = $dsn_tbl_cxn->connect();
|
|
my $sql = "SELECT dsn FROM $dsn_table ORDER BY id";
|
|
PTDEBUG && _d($sql);
|
|
my $dsn_strings = $dbh->selectcol_arrayref($sql);
|
|
my @cxn;
|
|
if ( $dsn_strings ) {
|
|
foreach my $dsn_string ( @$dsn_strings ) {
|
|
PTDEBUG && _d('DSN from DSN table:', $dsn_string);
|
|
push @cxn, $make_cxn->(dsn_string => $dsn_string);
|
|
}
|
|
}
|
|
return \@cxn;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End MasterSlave package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# ReplicaLagWaiter package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/ReplicaLagWaiter.pm
|
|
# t/lib/ReplicaLagWaiter.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package ReplicaLagWaiter;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
use Time::HiRes qw(sleep time);
|
|
use Data::Dumper;
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
my @required_args = qw(oktorun get_lag sleep max_lag slaves);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless defined $args{$arg};
|
|
}
|
|
|
|
my $self = {
|
|
%args,
|
|
};
|
|
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub wait {
|
|
my ( $self, %args ) = @_;
|
|
my @required_args = qw();
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my $pr = $args{Progress};
|
|
|
|
my $oktorun = $self->{oktorun};
|
|
my $get_lag = $self->{get_lag};
|
|
my $sleep = $self->{sleep};
|
|
my $slaves = $self->{slaves};
|
|
my $max_lag = $self->{max_lag};
|
|
|
|
my $worst; # most lagging slave
|
|
my $pr_callback;
|
|
my $pr_first_report;
|
|
if ( $pr ) {
|
|
$pr_callback = sub {
|
|
my ($fraction, $elapsed, $remaining, $eta, $completed) = @_;
|
|
my $dsn_name = $worst->{cxn}->name();
|
|
if ( defined $worst->{lag} ) {
|
|
print STDERR "Replica lag is " . ($worst->{lag} || '?')
|
|
. " seconds on $dsn_name. Waiting.\n";
|
|
}
|
|
else {
|
|
print STDERR "Replica $dsn_name is stopped. Waiting.\n";
|
|
}
|
|
return;
|
|
};
|
|
$pr->set_callback($pr_callback);
|
|
|
|
$pr_first_report = sub {
|
|
my $dsn_name = $worst->{cxn}->name();
|
|
if ( !defined $worst->{lag} ) {
|
|
print STDERR "Replica $dsn_name is stopped. Waiting.\n";
|
|
}
|
|
return;
|
|
};
|
|
}
|
|
|
|
my @lagged_slaves = map { {cxn=>$_, lag=>undef} } @$slaves;
|
|
while ( $oktorun->() && @lagged_slaves ) {
|
|
PTDEBUG && _d('Checking slave lag');
|
|
for my $i ( 0..$#lagged_slaves ) {
|
|
my $lag = $get_lag->($lagged_slaves[$i]->{cxn});
|
|
PTDEBUG && _d($lagged_slaves[$i]->{cxn}->name(),
|
|
'slave lag:', $lag);
|
|
if ( !defined $lag || $lag > $max_lag ) {
|
|
$lagged_slaves[$i]->{lag} = $lag;
|
|
}
|
|
else {
|
|
delete $lagged_slaves[$i];
|
|
}
|
|
}
|
|
|
|
@lagged_slaves = grep { defined $_ } @lagged_slaves;
|
|
if ( @lagged_slaves ) {
|
|
@lagged_slaves = reverse sort {
|
|
defined $a->{lag} && defined $b->{lag} ? $a->{lag} <=> $b->{lag}
|
|
: defined $a->{lag} ? -1
|
|
: 1;
|
|
} @lagged_slaves;
|
|
$worst = $lagged_slaves[0];
|
|
PTDEBUG && _d(scalar @lagged_slaves, 'slaves are lagging, worst:',
|
|
$worst->{lag}, 'on', Dumper($worst->{cxn}->dsn()));
|
|
|
|
if ( $pr ) {
|
|
$pr->update(
|
|
sub { return 0; },
|
|
first_report => $pr_first_report,
|
|
);
|
|
}
|
|
|
|
PTDEBUG && _d('Calling sleep callback');
|
|
$sleep->($worst->{cxn}, $worst->{lag});
|
|
}
|
|
}
|
|
|
|
PTDEBUG && _d('All slaves caught up');
|
|
return;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End ReplicaLagWaiter package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# MySQLStatusWaiter package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/MySQLStatusWaiter.pm
|
|
# t/lib/MySQLStatusWaiter.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package MySQLStatusWaiter;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
my @required_args = qw(max_spec get_status sleep oktorun);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless defined $args{$arg};
|
|
}
|
|
|
|
PTDEBUG && _d('Parsing spec for max thresholds');
|
|
my $max_val_for = _parse_spec($args{max_spec});
|
|
if ( $max_val_for ) {
|
|
_check_and_set_vals(
|
|
vars => $max_val_for,
|
|
get_status => $args{get_status},
|
|
threshold_factor => 0.2, # +20%
|
|
);
|
|
}
|
|
|
|
PTDEBUG && _d('Parsing spec for critical thresholds');
|
|
my $critical_val_for = _parse_spec($args{critical_spec} || []);
|
|
if ( $critical_val_for ) {
|
|
_check_and_set_vals(
|
|
vars => $critical_val_for,
|
|
get_status => $args{get_status},
|
|
threshold_factor => 1.0, # double (x2; +100%)
|
|
);
|
|
}
|
|
|
|
my $self = {
|
|
get_status => $args{get_status},
|
|
sleep => $args{sleep},
|
|
oktorun => $args{oktorun},
|
|
max_val_for => $max_val_for,
|
|
critical_val_for => $critical_val_for,
|
|
};
|
|
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub _parse_spec {
|
|
my ($spec) = @_;
|
|
|
|
return unless $spec && scalar @$spec;
|
|
|
|
my %max_val_for;
|
|
foreach my $var_val ( @$spec ) {
|
|
die "Empty or undefined spec\n" unless $var_val;
|
|
$var_val =~ s/^\s+//;
|
|
$var_val =~ s/\s+$//g;
|
|
|
|
my ($var, $val) = split /[:=]/, $var_val;
|
|
die "$var_val does not contain a variable\n" unless $var;
|
|
die "$var is not a variable name\n" unless $var =~ m/^[a-zA-Z_]+$/;
|
|
|
|
if ( !$val ) {
|
|
PTDEBUG && _d('Will get intial value for', $var, 'later');
|
|
$max_val_for{$var} = undef;
|
|
}
|
|
else {
|
|
die "The value for $var must be a number\n"
|
|
unless $val =~ m/^[\d\.]+$/;
|
|
$max_val_for{$var} = $val;
|
|
}
|
|
}
|
|
|
|
return \%max_val_for;
|
|
}
|
|
|
|
sub max_values {
|
|
my ($self) = @_;
|
|
return $self->{max_val_for};
|
|
}
|
|
|
|
sub critical_values {
|
|
my ($self) = @_;
|
|
return $self->{critical_val_for};
|
|
}
|
|
|
|
sub wait {
|
|
my ( $self, %args ) = @_;
|
|
|
|
return unless $self->{max_val_for};
|
|
|
|
my $pr = $args{Progress}; # optional
|
|
|
|
my $oktorun = $self->{oktorun};
|
|
my $get_status = $self->{get_status};
|
|
my $sleep = $self->{sleep};
|
|
|
|
my %vals_too_high = %{$self->{max_val_for}};
|
|
my $pr_callback;
|
|
if ( $pr ) {
|
|
$pr_callback = sub {
|
|
print STDERR "Pausing because "
|
|
. join(', ',
|
|
map {
|
|
"$_="
|
|
. (defined $vals_too_high{$_} ? $vals_too_high{$_}
|
|
: 'unknown')
|
|
} sort keys %vals_too_high
|
|
)
|
|
. ".\n";
|
|
return;
|
|
};
|
|
$pr->set_callback($pr_callback);
|
|
}
|
|
|
|
while ( $oktorun->() ) {
|
|
PTDEBUG && _d('Checking status variables');
|
|
foreach my $var ( sort keys %vals_too_high ) {
|
|
my $val = $get_status->($var);
|
|
PTDEBUG && _d($var, '=', $val);
|
|
if ( $val
|
|
&& exists $self->{critical_val_for}->{$var}
|
|
&& $val >= $self->{critical_val_for}->{$var} ) {
|
|
die "$var=$val exceeds its critical threshold "
|
|
. "$self->{critical_val_for}->{$var}\n";
|
|
}
|
|
if ( !$val || $val >= $self->{max_val_for}->{$var} ) {
|
|
$vals_too_high{$var} = $val;
|
|
}
|
|
else {
|
|
delete $vals_too_high{$var};
|
|
}
|
|
}
|
|
|
|
last unless scalar keys %vals_too_high;
|
|
|
|
PTDEBUG && _d(scalar keys %vals_too_high, 'values are too high:',
|
|
%vals_too_high);
|
|
if ( $pr ) {
|
|
$pr->update(sub { return 0; });
|
|
}
|
|
PTDEBUG && _d('Calling sleep callback');
|
|
$sleep->();
|
|
%vals_too_high = %{$self->{max_val_for}}; # recheck all vars
|
|
}
|
|
|
|
PTDEBUG && _d('All var vals are low enough');
|
|
return;
|
|
}
|
|
|
|
sub _check_and_set_vals {
|
|
my (%args) = @_;
|
|
my @required_args = qw(vars get_status threshold_factor);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless defined $args{$arg};
|
|
}
|
|
my ($vars, $get_status, $threshold_factor) = @args{@required_args};
|
|
|
|
PTDEBUG && _d('Checking and setting values');
|
|
return unless $vars && scalar %$vars;
|
|
|
|
foreach my $var ( keys %$vars ) {
|
|
my $init_val = $get_status->($var);
|
|
die "Variable $var does not exist or its value is undefined\n"
|
|
unless defined $init_val;
|
|
my $val;
|
|
if ( defined $vars->{$var} ) {
|
|
$val = $vars->{$var};
|
|
}
|
|
else {
|
|
PTDEBUG && _d('Initial', $var, 'value:', $init_val);
|
|
$val = int(($init_val * $threshold_factor) + $init_val);
|
|
$vars->{$var} = $val;
|
|
}
|
|
PTDEBUG && _d('Wait if', $var, '>=', $val);
|
|
}
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End MySQLStatusWaiter package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# WeightedAvgRate package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/WeightedAvgRate.pm
|
|
# t/lib/WeightedAvgRate.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package WeightedAvgRate;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
my @required_args = qw(target_t);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless defined $args{$arg};
|
|
}
|
|
|
|
my $self = {
|
|
%args,
|
|
avg_n => 0,
|
|
avg_t => 0,
|
|
weight => $args{weight} || 0.75,
|
|
};
|
|
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub update {
|
|
my ($self, $n, $t) = @_;
|
|
PTDEBUG && _d('Master op time:', $n, 'n /', $t, 's');
|
|
|
|
if ( $self->{avg_n} && $self->{avg_t} ) {
|
|
$self->{avg_n} = ($self->{avg_n} * $self->{weight}) + $n;
|
|
$self->{avg_t} = ($self->{avg_t} * $self->{weight}) + $t;
|
|
$self->{avg_rate} = $self->{avg_n} / $self->{avg_t};
|
|
PTDEBUG && _d('Weighted avg rate:', $self->{avg_rate}, 'n/s');
|
|
}
|
|
else {
|
|
$self->{avg_n} = $n;
|
|
$self->{avg_t} = $t;
|
|
$self->{avg_rate} = $self->{avg_n} / $self->{avg_t};
|
|
PTDEBUG && _d('Initial avg rate:', $self->{avg_rate}, 'n/s');
|
|
}
|
|
|
|
my $new_n = int($self->{avg_rate} * $self->{target_t});
|
|
PTDEBUG && _d('Adjust n to', $new_n);
|
|
return $new_n;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End WeightedAvgRate package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# NibbleIterator package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/NibbleIterator.pm
|
|
# t/lib/NibbleIterator.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package NibbleIterator;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
use Data::Dumper;
|
|
$Data::Dumper::Indent = 1;
|
|
$Data::Dumper::Sortkeys = 1;
|
|
$Data::Dumper::Quotekeys = 0;
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
my @required_args = qw(Cxn tbl chunk_size OptionParser Quoter TableNibbler TableParser);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($cxn, $tbl, $chunk_size, $o, $q) = @args{@required_args};
|
|
|
|
my $nibble_params = can_nibble(%args);
|
|
|
|
my %comments = (
|
|
bite => "bite table",
|
|
nibble => "nibble table",
|
|
);
|
|
if ( $args{comments} ) {
|
|
map { $comments{$_} = $args{comments}->{$_} }
|
|
grep { defined $args{comments}->{$_} }
|
|
keys %{$args{comments}};
|
|
}
|
|
|
|
my $where = $o->has('where') ? $o->get('where') : '';
|
|
my $tbl_struct = $tbl->{tbl_struct};
|
|
my $ignore_col = $o->has('ignore-columns')
|
|
? ($o->get('ignore-columns') || {})
|
|
: {};
|
|
my $all_cols = $o->has('columns')
|
|
? ($o->get('columns') || $tbl_struct->{cols})
|
|
: $tbl_struct->{cols};
|
|
my @cols = grep { !$ignore_col->{$_} } @$all_cols;
|
|
my $self;
|
|
if ( $nibble_params->{one_nibble} ) {
|
|
my $nibble_sql
|
|
= ($args{dml} ? "$args{dml} " : "SELECT ")
|
|
. ($args{select} ? $args{select}
|
|
: join(', ', map { $q->quote($_) } @cols))
|
|
. " FROM $tbl->{name}"
|
|
. ($where ? " WHERE $where" : '')
|
|
. " /*$comments{bite}*/";
|
|
PTDEBUG && _d('One nibble statement:', $nibble_sql);
|
|
|
|
my $explain_nibble_sql
|
|
= "EXPLAIN SELECT "
|
|
. ($args{select} ? $args{select}
|
|
: join(', ', map { $q->quote($_) } @cols))
|
|
. " FROM $tbl->{name}"
|
|
. ($where ? " WHERE $where" : '')
|
|
. " /*explain $comments{bite}*/";
|
|
PTDEBUG && _d('Explain one nibble statement:', $explain_nibble_sql);
|
|
|
|
$self = {
|
|
%args,
|
|
one_nibble => 1,
|
|
limit => 0,
|
|
nibble_sql => $nibble_sql,
|
|
explain_nibble_sql => $explain_nibble_sql,
|
|
};
|
|
}
|
|
else {
|
|
my $index = $nibble_params->{index}; # brevity
|
|
my $index_cols = $tbl->{tbl_struct}->{keys}->{$index}->{cols};
|
|
|
|
my $asc = $args{TableNibbler}->generate_asc_stmt(
|
|
%args,
|
|
tbl_struct => $tbl->{tbl_struct},
|
|
index => $index,
|
|
cols => \@cols,
|
|
asc_only => 1,
|
|
);
|
|
PTDEBUG && _d('Ascend params:', Dumper($asc));
|
|
|
|
my $from = "$tbl->{name} FORCE INDEX(`$index`)";
|
|
my $order_by = join(', ', map {$q->quote($_)} @{$index_cols});
|
|
|
|
my $first_lb_sql
|
|
= "SELECT /*!40001 SQL_NO_CACHE */ "
|
|
. join(', ', map { $q->quote($_) } @{$asc->{scols}})
|
|
. " FROM $from"
|
|
. ($where ? " WHERE $where" : '')
|
|
. " ORDER BY $order_by"
|
|
. " LIMIT 1"
|
|
. " /*first lower boundary*/";
|
|
PTDEBUG && _d('First lower boundary statement:', $first_lb_sql);
|
|
|
|
my $resume_lb_sql;
|
|
if ( $args{resume} ) {
|
|
$resume_lb_sql
|
|
= "SELECT /*!40001 SQL_NO_CACHE */ "
|
|
. join(', ', map { $q->quote($_) } @{$asc->{scols}})
|
|
. " FROM $from"
|
|
. " WHERE " . $asc->{boundaries}->{'>'}
|
|
. ($where ? " AND ($where)" : '')
|
|
. " ORDER BY $order_by"
|
|
. " LIMIT 1"
|
|
. " /*resume lower boundary*/";
|
|
PTDEBUG && _d('Resume lower boundary statement:', $resume_lb_sql);
|
|
}
|
|
|
|
my $last_ub_sql
|
|
= "SELECT /*!40001 SQL_NO_CACHE */ "
|
|
. join(', ', map { $q->quote($_) } @{$asc->{scols}})
|
|
. " FROM $from"
|
|
. ($where ? " WHERE $where" : '')
|
|
. " ORDER BY "
|
|
. join(' DESC, ', map {$q->quote($_)} @{$index_cols}) . ' DESC'
|
|
. " LIMIT 1"
|
|
. " /*last upper boundary*/";
|
|
PTDEBUG && _d('Last upper boundary statement:', $last_ub_sql);
|
|
|
|
my $ub_sql
|
|
= "SELECT /*!40001 SQL_NO_CACHE */ "
|
|
. join(', ', map { $q->quote($_) } @{$asc->{scols}})
|
|
. " FROM $from"
|
|
. " WHERE " . $asc->{boundaries}->{'>='}
|
|
. ($where ? " AND ($where)" : '')
|
|
. " ORDER BY $order_by"
|
|
. " LIMIT ?, 2"
|
|
. " /*next chunk boundary*/";
|
|
PTDEBUG && _d('Upper boundary statement:', $ub_sql);
|
|
|
|
my $nibble_sql
|
|
= ($args{dml} ? "$args{dml} " : "SELECT ")
|
|
. ($args{select} ? $args{select}
|
|
: join(', ', map { $q->quote($_) } @{$asc->{cols}}))
|
|
. " FROM $from"
|
|
. " WHERE " . $asc->{boundaries}->{'>='} # lower boundary
|
|
. " AND " . $asc->{boundaries}->{'<='} # upper boundary
|
|
. ($where ? " AND ($where)" : '')
|
|
. ($args{order_by} ? " ORDER BY $order_by" : "")
|
|
. " /*$comments{nibble}*/";
|
|
PTDEBUG && _d('Nibble statement:', $nibble_sql);
|
|
|
|
my $explain_nibble_sql
|
|
= "EXPLAIN SELECT "
|
|
. ($args{select} ? $args{select}
|
|
: join(', ', map { $q->quote($_) } @{$asc->{cols}}))
|
|
. " FROM $from"
|
|
. " WHERE " . $asc->{boundaries}->{'>='} # lower boundary
|
|
. " AND " . $asc->{boundaries}->{'<='} # upper boundary
|
|
. ($where ? " AND ($where)" : '')
|
|
. ($args{order_by} ? " ORDER BY $order_by" : "")
|
|
. " /*explain $comments{nibble}*/";
|
|
PTDEBUG && _d('Explain nibble statement:', $explain_nibble_sql);
|
|
|
|
my $limit = $chunk_size - 1;
|
|
PTDEBUG && _d('Initial chunk size (LIMIT):', $limit);
|
|
|
|
$self = {
|
|
%args,
|
|
index => $index,
|
|
limit => $limit,
|
|
first_lb_sql => $first_lb_sql,
|
|
last_ub_sql => $last_ub_sql,
|
|
ub_sql => $ub_sql,
|
|
nibble_sql => $nibble_sql,
|
|
explain_ub_sql => "EXPLAIN $ub_sql",
|
|
explain_nibble_sql => $explain_nibble_sql,
|
|
resume_lb_sql => $resume_lb_sql,
|
|
sql => {
|
|
columns => $asc->{scols},
|
|
from => $from,
|
|
where => $where,
|
|
boundaries => $asc->{boundaries},
|
|
order_by => $order_by,
|
|
},
|
|
};
|
|
}
|
|
|
|
$self->{row_est} = $nibble_params->{row_est},
|
|
$self->{nibbleno} = 0;
|
|
$self->{have_rows} = 0;
|
|
$self->{rowno} = 0;
|
|
$self->{oktonibble} = 1;
|
|
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub next {
|
|
my ($self) = @_;
|
|
|
|
if ( !$self->{oktonibble} ) {
|
|
PTDEBUG && _d('Not ok to nibble');
|
|
return;
|
|
}
|
|
|
|
my %callback_args = (
|
|
Cxn => $self->{Cxn},
|
|
tbl => $self->{tbl},
|
|
NibbleIterator => $self,
|
|
);
|
|
|
|
if ($self->{nibbleno} == 0) {
|
|
$self->_prepare_sths();
|
|
$self->_get_bounds();
|
|
if ( my $callback = $self->{callbacks}->{init} ) {
|
|
$self->{oktonibble} = $callback->(%callback_args);
|
|
PTDEBUG && _d('init callback returned', $self->{oktonibble});
|
|
if ( !$self->{oktonibble} ) {
|
|
$self->{no_more_boundaries} = 1;
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
|
|
NIBBLE:
|
|
while ( $self->{have_rows} || $self->_next_boundaries() ) {
|
|
if ( !$self->{have_rows} ) {
|
|
$self->{nibbleno}++;
|
|
PTDEBUG && _d('Nibble:', $self->{nibble_sth}->{Statement}, 'params:',
|
|
join(', ', (@{$self->{lower} || []}, @{$self->{upper} || []})));
|
|
if ( my $callback = $self->{callbacks}->{exec_nibble} ) {
|
|
$self->{have_rows} = $callback->(%callback_args);
|
|
}
|
|
else {
|
|
$self->{nibble_sth}->execute(@{$self->{lower}}, @{$self->{upper}});
|
|
$self->{have_rows} = $self->{nibble_sth}->rows();
|
|
}
|
|
PTDEBUG && _d($self->{have_rows}, 'rows in nibble', $self->{nibbleno});
|
|
}
|
|
|
|
if ( $self->{have_rows} ) {
|
|
my $row = $self->{nibble_sth}->fetchrow_arrayref();
|
|
if ( $row ) {
|
|
$self->{rowno}++;
|
|
PTDEBUG && _d('Row', $self->{rowno}, 'in nibble',$self->{nibbleno});
|
|
return [ @$row ];
|
|
}
|
|
}
|
|
|
|
PTDEBUG && _d('No rows in nibble or nibble skipped');
|
|
if ( my $callback = $self->{callbacks}->{after_nibble} ) {
|
|
$callback->(%callback_args);
|
|
}
|
|
$self->{rowno} = 0;
|
|
$self->{have_rows} = 0;
|
|
}
|
|
|
|
PTDEBUG && _d('Done nibbling');
|
|
if ( my $callback = $self->{callbacks}->{done} ) {
|
|
$callback->(%callback_args);
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub nibble_number {
|
|
my ($self) = @_;
|
|
return $self->{nibbleno};
|
|
}
|
|
|
|
sub set_nibble_number {
|
|
my ($self, $n) = @_;
|
|
die "I need a number" unless $n;
|
|
$self->{nibbleno} = $n;
|
|
PTDEBUG && _d('Set new nibble number:', $n);
|
|
return;
|
|
}
|
|
|
|
sub nibble_index {
|
|
my ($self) = @_;
|
|
return $self->{index};
|
|
}
|
|
|
|
sub statements {
|
|
my ($self) = @_;
|
|
return {
|
|
nibble => $self->{nibble_sth},
|
|
explain_nibble => $self->{explain_nibble_sth},
|
|
upper_boundary => $self->{ub_sth},
|
|
explain_upper_boundary => $self->{explain_ub_sth},
|
|
}
|
|
}
|
|
|
|
sub boundaries {
|
|
my ($self) = @_;
|
|
return {
|
|
first_lower => $self->{first_lower},
|
|
lower => $self->{lower},
|
|
upper => $self->{upper},
|
|
next_lower => $self->{next_lower},
|
|
last_upper => $self->{last_upper},
|
|
};
|
|
}
|
|
|
|
sub set_boundary {
|
|
my ($self, $boundary, $values) = @_;
|
|
die "I need a boundary parameter"
|
|
unless $boundary;
|
|
die "Invalid boundary: $boundary"
|
|
unless $boundary =~ m/^(?:lower|upper|next_lower|last_upper)$/;
|
|
die "I need a values arrayref parameter"
|
|
unless $values && ref $values eq 'ARRAY';
|
|
$self->{$boundary} = $values;
|
|
PTDEBUG && _d('Set new', $boundary, 'boundary:', Dumper($values));
|
|
return;
|
|
}
|
|
|
|
sub one_nibble {
|
|
my ($self) = @_;
|
|
return $self->{one_nibble};
|
|
}
|
|
|
|
sub chunk_size {
|
|
my ($self) = @_;
|
|
return $self->{limit} + 1;
|
|
}
|
|
|
|
sub set_chunk_size {
|
|
my ($self, $limit) = @_;
|
|
return if $self->{one_nibble};
|
|
die "Chunk size must be > 0" unless $limit;
|
|
$self->{limit} = $limit - 1;
|
|
PTDEBUG && _d('Set new chunk size (LIMIT):', $limit);
|
|
return;
|
|
}
|
|
|
|
sub sql {
|
|
my ($self) = @_;
|
|
return $self->{sql};
|
|
}
|
|
|
|
sub more_boundaries {
|
|
my ($self) = @_;
|
|
return !$self->{no_more_boundaries};
|
|
}
|
|
|
|
sub row_estimate {
|
|
my ($self) = @_;
|
|
return $self->{row_est};
|
|
}
|
|
|
|
sub can_nibble {
|
|
my (%args) = @_;
|
|
my @required_args = qw(Cxn tbl chunk_size OptionParser TableParser);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($cxn, $tbl, $chunk_size, $o) = @args{@required_args};
|
|
|
|
my $where = $o->has('where') ? $o->get('where') : '';
|
|
|
|
my ($row_est, $mysql_index) = get_row_estimate(
|
|
Cxn => $cxn,
|
|
tbl => $tbl,
|
|
where => $where,
|
|
);
|
|
|
|
if ( !$where ) {
|
|
$mysql_index = undef;
|
|
}
|
|
|
|
my $chunk_size_limit = $o->get('chunk-size-limit') || 1;
|
|
my $one_nibble = !defined $args{one_nibble} || $args{one_nibble}
|
|
? $row_est <= $chunk_size * $chunk_size_limit
|
|
: 0;
|
|
PTDEBUG && _d('One nibble:', $one_nibble ? 'yes' : 'no');
|
|
|
|
if ( $args{resume}
|
|
&& !defined $args{resume}->{lower_boundary}
|
|
&& !defined $args{resume}->{upper_boundary} ) {
|
|
PTDEBUG && _d('Resuming from one nibble table');
|
|
$one_nibble = 1;
|
|
}
|
|
|
|
my $index = _find_best_index(%args, mysql_index => $mysql_index);
|
|
if ( !$index && !$one_nibble ) {
|
|
die "There is no good index and the table is oversized.";
|
|
}
|
|
|
|
return {
|
|
row_est => $row_est, # nibble about this many rows
|
|
index => $index, # using this index
|
|
one_nibble => $one_nibble, # if the table fits in one nibble/chunk
|
|
};
|
|
}
|
|
|
|
sub _find_best_index {
|
|
my (%args) = @_;
|
|
my @required_args = qw(Cxn tbl TableParser);
|
|
my ($cxn, $tbl, $tp) = @args{@required_args};
|
|
my $tbl_struct = $tbl->{tbl_struct};
|
|
my $indexes = $tbl_struct->{keys};
|
|
|
|
my $want_index = $args{chunk_index};
|
|
if ( $want_index ) {
|
|
PTDEBUG && _d('User wants to use index', $want_index);
|
|
if ( !exists $indexes->{$want_index} ) {
|
|
PTDEBUG && _d('Cannot use user index because it does not exist');
|
|
$want_index = undef;
|
|
}
|
|
}
|
|
|
|
if ( !$want_index && $args{mysql_index} ) {
|
|
PTDEBUG && _d('MySQL wants to use index', $args{mysql_index});
|
|
$want_index = $args{mysql_index};
|
|
}
|
|
|
|
my $best_index;
|
|
my @possible_indexes;
|
|
if ( $want_index ) {
|
|
if ( $indexes->{$want_index}->{is_unique} ) {
|
|
PTDEBUG && _d('Will use wanted index');
|
|
$best_index = $want_index;
|
|
}
|
|
else {
|
|
PTDEBUG && _d('Wanted index is a possible index');
|
|
push @possible_indexes, $want_index;
|
|
}
|
|
}
|
|
else {
|
|
PTDEBUG && _d('Auto-selecting best index');
|
|
foreach my $index ( $tp->sort_indexes($tbl_struct) ) {
|
|
if ( $index eq 'PRIMARY' || $indexes->{$index}->{is_unique} ) {
|
|
$best_index = $index;
|
|
last;
|
|
}
|
|
else {
|
|
push @possible_indexes, $index;
|
|
}
|
|
}
|
|
}
|
|
|
|
if ( !$best_index && @possible_indexes ) {
|
|
PTDEBUG && _d('No PRIMARY or unique indexes;',
|
|
'will use index with highest cardinality');
|
|
foreach my $index ( @possible_indexes ) {
|
|
$indexes->{$index}->{cardinality} = _get_index_cardinality(
|
|
%args,
|
|
index => $index,
|
|
);
|
|
}
|
|
@possible_indexes = sort {
|
|
my $cmp
|
|
= $indexes->{$b}->{cardinality} <=> $indexes->{$b}->{cardinality};
|
|
if ( $cmp == 0 ) {
|
|
$cmp = scalar @{$indexes->{$b}->{cols}}
|
|
<=> scalar @{$indexes->{$a}->{cols}};
|
|
}
|
|
$cmp;
|
|
} @possible_indexes;
|
|
$best_index = $possible_indexes[0];
|
|
}
|
|
|
|
PTDEBUG && _d('Best index:', $best_index);
|
|
return $best_index;
|
|
}
|
|
|
|
sub _get_index_cardinality {
|
|
my (%args) = @_;
|
|
my @required_args = qw(Cxn tbl index);
|
|
my ($cxn, $tbl, $index) = @args{@required_args};
|
|
|
|
my $sql = "SHOW INDEXES FROM $tbl->{name} "
|
|
. "WHERE Key_name = '$index'";
|
|
PTDEBUG && _d($sql);
|
|
my $cardinality = 1;
|
|
my $dbh = $cxn->dbh();
|
|
my $key_name = $dbh && ($dbh->{FetchHashKeyName} || '') eq 'NAME_lc'
|
|
? 'key_name'
|
|
: 'Key_name';
|
|
my $rows = $dbh->selectall_hashref($sql, $key_name);
|
|
foreach my $row ( values %$rows ) {
|
|
$cardinality *= $row->{cardinality} if $row->{cardinality};
|
|
}
|
|
PTDEBUG && _d('Index', $index, 'cardinality:', $cardinality);
|
|
return $cardinality;
|
|
}
|
|
|
|
sub get_row_estimate {
|
|
my (%args) = @_;
|
|
my @required_args = qw(Cxn tbl);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($cxn, $tbl) = @args{@required_args};
|
|
|
|
my $sql = "EXPLAIN SELECT * FROM $tbl->{name} "
|
|
. "WHERE " . ($args{where} || '1=1');
|
|
PTDEBUG && _d($sql);
|
|
my $expl = $cxn->dbh()->selectrow_hashref($sql);
|
|
PTDEBUG && _d(Dumper($expl));
|
|
my $mysql_index = $expl->{key} || '';
|
|
if ( $mysql_index ne 'PRIMARY' ) {
|
|
$mysql_index = lc($mysql_index);
|
|
}
|
|
return ($expl->{rows} || 0), $mysql_index;
|
|
}
|
|
|
|
sub _prepare_sths {
|
|
my ($self) = @_;
|
|
PTDEBUG && _d('Preparing statement handles');
|
|
|
|
my $dbh = $self->{Cxn}->dbh();
|
|
|
|
$self->{nibble_sth} = $dbh->prepare($self->{nibble_sql});
|
|
$self->{explain_nibble_sth} = $dbh->prepare($self->{explain_nibble_sql});
|
|
|
|
if ( !$self->{one_nibble} ) {
|
|
$self->{ub_sth} = $dbh->prepare($self->{ub_sql});
|
|
$self->{explain_ub_sth} = $dbh->prepare($self->{explain_ub_sql});
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub _get_bounds {
|
|
my ($self) = @_;
|
|
|
|
if ( $self->{one_nibble} ) {
|
|
if ( $self->{resume} ) {
|
|
$self->{no_more_boundaries} = 1;
|
|
}
|
|
return;
|
|
}
|
|
|
|
my $dbh = $self->{Cxn}->dbh();
|
|
|
|
$self->{first_lower} = $dbh->selectrow_arrayref($self->{first_lb_sql});
|
|
PTDEBUG && _d('First lower boundary:', Dumper($self->{first_lower}));
|
|
|
|
if ( my $nibble = $self->{resume} ) {
|
|
if ( defined $nibble->{lower_boundary}
|
|
&& defined $nibble->{upper_boundary} ) {
|
|
my $sth = $dbh->prepare($self->{resume_lb_sql});
|
|
my @ub = split ',', $nibble->{upper_boundary};
|
|
PTDEBUG && _d($sth->{Statement}, 'params:', @ub);
|
|
$sth->execute(@ub);
|
|
$self->{next_lower} = $sth->fetchrow_arrayref();
|
|
$sth->finish();
|
|
}
|
|
}
|
|
else {
|
|
$self->{next_lower} = $self->{first_lower};
|
|
}
|
|
PTDEBUG && _d('Next lower boundary:', Dumper($self->{next_lower}));
|
|
|
|
if ( !$self->{next_lower} ) {
|
|
PTDEBUG && _d('At end of table, or no more boundaries to resume');
|
|
$self->{no_more_boundaries} = 1;
|
|
|
|
$self->{last_upper} = $dbh->selectrow_arrayref($self->{last_ub_sql});
|
|
PTDEBUG && _d('Last upper boundary:', Dumper($self->{last_upper}));
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub _next_boundaries {
|
|
my ($self) = @_;
|
|
|
|
if ( $self->{no_more_boundaries} ) {
|
|
PTDEBUG && _d('No more boundaries');
|
|
return; # stop nibbling
|
|
}
|
|
|
|
if ( $self->{one_nibble} ) {
|
|
$self->{lower} = $self->{upper} = [];
|
|
$self->{no_more_boundaries} = 1; # for next call
|
|
return 1; # continue nibbling
|
|
}
|
|
|
|
|
|
|
|
if ( $self->identical_boundaries($self->{lower}, $self->{next_lower}) ) {
|
|
PTDEBUG && _d('Infinite loop detected');
|
|
my $tbl = $self->{tbl};
|
|
my $index = $tbl->{tbl_struct}->{keys}->{$self->{index}};
|
|
my $n_cols = scalar @{$index->{cols}};
|
|
my $chunkno = $self->{nibbleno};
|
|
|
|
die "Possible infinite loop detected! "
|
|
. "The lower boundary for chunk $chunkno is "
|
|
. "<" . join(', ', @{$self->{lower}}) . "> and the lower "
|
|
. "boundary for chunk " . ($chunkno + 1) . " is also "
|
|
. "<" . join(', ', @{$self->{next_lower}}) . ">. "
|
|
. "This usually happens when using a non-unique single "
|
|
. "column index. The current chunk index for table "
|
|
. "$tbl->{db}.$tbl->{tbl} is $self->{index} which is"
|
|
. ($index->{is_unique} ? '' : ' not') . " unique and covers "
|
|
. ($n_cols > 1 ? "$n_cols columns" : "1 column") . ".\n";
|
|
}
|
|
$self->{lower} = $self->{next_lower};
|
|
|
|
if ( my $callback = $self->{callbacks}->{next_boundaries} ) {
|
|
my $oktonibble = $callback->(
|
|
Cxn => $self->{Cxn},
|
|
tbl => $self->{tbl},
|
|
NibbleIterator => $self,
|
|
);
|
|
PTDEBUG && _d('next_boundaries callback returned', $oktonibble);
|
|
if ( !$oktonibble ) {
|
|
$self->{no_more_boundaries} = 1;
|
|
return; # stop nibbling
|
|
}
|
|
}
|
|
|
|
|
|
PTDEBUG && _d($self->{ub_sth}->{Statement}, 'params:',
|
|
join(', ', @{$self->{lower}}), $self->{limit});
|
|
$self->{ub_sth}->execute(@{$self->{lower}}, $self->{limit});
|
|
my $boundary = $self->{ub_sth}->fetchall_arrayref();
|
|
PTDEBUG && _d('Next boundary:', Dumper($boundary));
|
|
if ( $boundary && @$boundary ) {
|
|
$self->{upper} = $boundary->[0];
|
|
|
|
if ( $boundary->[1] ) {
|
|
$self->{next_lower} = $boundary->[1];
|
|
}
|
|
else {
|
|
PTDEBUG && _d('End of table boundary:', Dumper($boundary->[0]));
|
|
$self->{no_more_boundaries} = 1; # for next call
|
|
|
|
$self->{last_upper} = $boundary->[0];
|
|
}
|
|
}
|
|
else {
|
|
my $dbh = $self->{Cxn}->dbh();
|
|
$self->{upper} = $dbh->selectrow_arrayref($self->{last_ub_sql});
|
|
PTDEBUG && _d('Last upper boundary:', Dumper($self->{upper}));
|
|
$self->{no_more_boundaries} = 1; # for next call
|
|
|
|
$self->{last_upper} = $self->{upper};
|
|
}
|
|
$self->{ub_sth}->finish();
|
|
|
|
return 1; # continue nibbling
|
|
}
|
|
|
|
sub identical_boundaries {
|
|
my ($self, $b1, $b2) = @_;
|
|
|
|
return 0 if ($b1 && !$b2) || (!$b1 && $b2);
|
|
|
|
return 1 if !$b1 && !$b2;
|
|
|
|
die "Boundaries have different numbers of values"
|
|
if scalar @$b1 != scalar @$b2; # shouldn't happen
|
|
my $n_vals = scalar @$b1;
|
|
for my $i ( 0..($n_vals-1) ) {
|
|
return 0 if $b1->[$i] ne $b2->[$i]; # diff
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
sub DESTROY {
|
|
my ( $self ) = @_;
|
|
foreach my $key ( keys %$self ) {
|
|
if ( $key =~ m/_sth$/ ) {
|
|
PTDEBUG && _d('Finish', $key);
|
|
$self->{$key}->finish();
|
|
}
|
|
}
|
|
return;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End NibbleIterator package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# Transformers package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/Transformers.pm
|
|
# t/lib/Transformers.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package Transformers;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
use Time::Local qw(timegm timelocal);
|
|
use Digest::MD5 qw(md5_hex);
|
|
|
|
require Exporter;
|
|
our @ISA = qw(Exporter);
|
|
our %EXPORT_TAGS = ();
|
|
our @EXPORT = ();
|
|
our @EXPORT_OK = qw(
|
|
micro_t
|
|
percentage_of
|
|
secs_to_time
|
|
time_to_secs
|
|
shorten
|
|
ts
|
|
parse_timestamp
|
|
unix_timestamp
|
|
any_unix_timestamp
|
|
make_checksum
|
|
crc32
|
|
);
|
|
|
|
our $mysql_ts = qr/(\d\d)(\d\d)(\d\d) +(\d+):(\d+):(\d+)(\.\d+)?/;
|
|
our $proper_ts = qr/(\d\d\d\d)-(\d\d)-(\d\d)[T ](\d\d):(\d\d):(\d\d)(\.\d+)?/;
|
|
our $n_ts = qr/(\d{1,5})([shmd]?)/; # Limit \d{1,5} because \d{6} looks
|
|
|
|
sub micro_t {
|
|
my ( $t, %args ) = @_;
|
|
my $p_ms = defined $args{p_ms} ? $args{p_ms} : 0; # precision for ms vals
|
|
my $p_s = defined $args{p_s} ? $args{p_s} : 0; # precision for s vals
|
|
my $f;
|
|
|
|
$t = 0 if $t < 0;
|
|
|
|
$t = sprintf('%.17f', $t) if $t =~ /e/;
|
|
|
|
$t =~ s/\.(\d{1,6})\d*/\.$1/;
|
|
|
|
if ($t > 0 && $t <= 0.000999) {
|
|
$f = ($t * 1000000) . 'us';
|
|
}
|
|
elsif ($t >= 0.001000 && $t <= 0.999999) {
|
|
$f = sprintf("%.${p_ms}f", $t * 1000);
|
|
$f = ($f * 1) . 'ms'; # * 1 to remove insignificant zeros
|
|
}
|
|
elsif ($t >= 1) {
|
|
$f = sprintf("%.${p_s}f", $t);
|
|
$f = ($f * 1) . 's'; # * 1 to remove insignificant zeros
|
|
}
|
|
else {
|
|
$f = 0; # $t should = 0 at this point
|
|
}
|
|
|
|
return $f;
|
|
}
|
|
|
|
sub percentage_of {
|
|
my ( $is, $of, %args ) = @_;
|
|
my $p = $args{p} || 0; # float precision
|
|
my $fmt = $p ? "%.${p}f" : "%d";
|
|
return sprintf $fmt, ($is * 100) / ($of ||= 1);
|
|
}
|
|
|
|
sub secs_to_time {
|
|
my ( $secs, $fmt ) = @_;
|
|
$secs ||= 0;
|
|
return '00:00' unless $secs;
|
|
|
|
$fmt ||= $secs >= 86_400 ? 'd'
|
|
: $secs >= 3_600 ? 'h'
|
|
: 'm';
|
|
|
|
return
|
|
$fmt eq 'd' ? sprintf(
|
|
"%d+%02d:%02d:%02d",
|
|
int($secs / 86_400),
|
|
int(($secs % 86_400) / 3_600),
|
|
int(($secs % 3_600) / 60),
|
|
$secs % 60)
|
|
: $fmt eq 'h' ? sprintf(
|
|
"%02d:%02d:%02d",
|
|
int(($secs % 86_400) / 3_600),
|
|
int(($secs % 3_600) / 60),
|
|
$secs % 60)
|
|
: sprintf(
|
|
"%02d:%02d",
|
|
int(($secs % 3_600) / 60),
|
|
$secs % 60);
|
|
}
|
|
|
|
sub time_to_secs {
|
|
my ( $val, $default_suffix ) = @_;
|
|
die "I need a val argument" unless defined $val;
|
|
my $t = 0;
|
|
my ( $prefix, $num, $suffix ) = $val =~ m/([+-]?)(\d+)([a-z])?$/;
|
|
$suffix = $suffix || $default_suffix || 's';
|
|
if ( $suffix =~ m/[smhd]/ ) {
|
|
$t = $suffix eq 's' ? $num * 1 # Seconds
|
|
: $suffix eq 'm' ? $num * 60 # Minutes
|
|
: $suffix eq 'h' ? $num * 3600 # Hours
|
|
: $num * 86400; # Days
|
|
|
|
$t *= -1 if $prefix && $prefix eq '-';
|
|
}
|
|
else {
|
|
die "Invalid suffix for $val: $suffix";
|
|
}
|
|
return $t;
|
|
}
|
|
|
|
sub shorten {
|
|
my ( $num, %args ) = @_;
|
|
my $p = defined $args{p} ? $args{p} : 2; # float precision
|
|
my $d = defined $args{d} ? $args{d} : 1_024; # divisor
|
|
my $n = 0;
|
|
my @units = ('', qw(k M G T P E Z Y));
|
|
while ( $num >= $d && $n < @units - 1 ) {
|
|
$num /= $d;
|
|
++$n;
|
|
}
|
|
return sprintf(
|
|
$num =~ m/\./ || $n
|
|
? "%.${p}f%s"
|
|
: '%d',
|
|
$num, $units[$n]);
|
|
}
|
|
|
|
sub ts {
|
|
my ( $time, $gmt ) = @_;
|
|
my ( $sec, $min, $hour, $mday, $mon, $year )
|
|
= $gmt ? gmtime($time) : localtime($time);
|
|
$mon += 1;
|
|
$year += 1900;
|
|
my $val = sprintf("%d-%02d-%02dT%02d:%02d:%02d",
|
|
$year, $mon, $mday, $hour, $min, $sec);
|
|
if ( my ($us) = $time =~ m/(\.\d+)$/ ) {
|
|
$us = sprintf("%.6f", $us);
|
|
$us =~ s/^0\././;
|
|
$val .= $us;
|
|
}
|
|
return $val;
|
|
}
|
|
|
|
sub parse_timestamp {
|
|
my ( $val ) = @_;
|
|
if ( my($y, $m, $d, $h, $i, $s, $f)
|
|
= $val =~ m/^$mysql_ts$/ )
|
|
{
|
|
return sprintf "%d-%02d-%02d %02d:%02d:"
|
|
. (defined $f ? '%09.6f' : '%02d'),
|
|
$y + 2000, $m, $d, $h, $i, (defined $f ? $s + $f : $s);
|
|
}
|
|
return $val;
|
|
}
|
|
|
|
sub unix_timestamp {
|
|
my ( $val, $gmt ) = @_;
|
|
if ( my($y, $m, $d, $h, $i, $s, $us) = $val =~ m/^$proper_ts$/ ) {
|
|
$val = $gmt
|
|
? timegm($s, $i, $h, $d, $m - 1, $y)
|
|
: timelocal($s, $i, $h, $d, $m - 1, $y);
|
|
if ( defined $us ) {
|
|
$us = sprintf('%.6f', $us);
|
|
$us =~ s/^0\././;
|
|
$val .= $us;
|
|
}
|
|
}
|
|
return $val;
|
|
}
|
|
|
|
sub any_unix_timestamp {
|
|
my ( $val, $callback ) = @_;
|
|
|
|
if ( my ($n, $suffix) = $val =~ m/^$n_ts$/ ) {
|
|
$n = $suffix eq 's' ? $n # Seconds
|
|
: $suffix eq 'm' ? $n * 60 # Minutes
|
|
: $suffix eq 'h' ? $n * 3600 # Hours
|
|
: $suffix eq 'd' ? $n * 86400 # Days
|
|
: $n; # default: Seconds
|
|
PTDEBUG && _d('ts is now - N[shmd]:', $n);
|
|
return time - $n;
|
|
}
|
|
elsif ( $val =~ m/^\d{9,}/ ) {
|
|
PTDEBUG && _d('ts is already a unix timestamp');
|
|
return $val;
|
|
}
|
|
elsif ( my ($ymd, $hms) = $val =~ m/^(\d{6})(?:\s+(\d+:\d+:\d+))?/ ) {
|
|
PTDEBUG && _d('ts is MySQL slow log timestamp');
|
|
$val .= ' 00:00:00' unless $hms;
|
|
return unix_timestamp(parse_timestamp($val));
|
|
}
|
|
elsif ( ($ymd, $hms) = $val =~ m/^(\d{4}-\d\d-\d\d)(?:[T ](\d+:\d+:\d+))?/) {
|
|
PTDEBUG && _d('ts is properly formatted timestamp');
|
|
$val .= ' 00:00:00' unless $hms;
|
|
return unix_timestamp($val);
|
|
}
|
|
else {
|
|
PTDEBUG && _d('ts is MySQL expression');
|
|
return $callback->($val) if $callback && ref $callback eq 'CODE';
|
|
}
|
|
|
|
PTDEBUG && _d('Unknown ts type:', $val);
|
|
return;
|
|
}
|
|
|
|
sub make_checksum {
|
|
my ( $val ) = @_;
|
|
my $checksum = uc substr(md5_hex($val), -16);
|
|
PTDEBUG && _d($checksum, 'checksum for', $val);
|
|
return $checksum;
|
|
}
|
|
|
|
sub crc32 {
|
|
my ( $string ) = @_;
|
|
return unless $string;
|
|
my $poly = 0xEDB88320;
|
|
my $crc = 0xFFFFFFFF;
|
|
foreach my $char ( split(//, $string) ) {
|
|
my $comp = ($crc ^ ord($char)) & 0xFF;
|
|
for ( 1 .. 8 ) {
|
|
$comp = $comp & 1 ? $poly ^ ($comp >> 1) : $comp >> 1;
|
|
}
|
|
$crc = (($crc >> 8) & 0x00FFFFFF) ^ $comp;
|
|
}
|
|
return $crc ^ 0xFFFFFFFF;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End Transformers package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# CleanupTask package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/CleanupTask.pm
|
|
# t/lib/CleanupTask.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package CleanupTask;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
sub new {
|
|
my ( $class, $task ) = @_;
|
|
die "I need a task parameter" unless $task;
|
|
die "The task parameter must be a coderef" unless ref $task eq 'CODE';
|
|
my $self = {
|
|
task => $task,
|
|
};
|
|
PTDEBUG && _d('Created cleanup task', $task);
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub DESTROY {
|
|
my ($self) = @_;
|
|
my $task = $self->{task};
|
|
if ( ref $task ) {
|
|
PTDEBUG && _d('Calling cleanup task', $task);
|
|
$task->();
|
|
}
|
|
else {
|
|
warn "Lost cleanup task";
|
|
}
|
|
return;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End CleanupTask package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# This is a combination of modules and programs in one -- a runnable module.
|
|
# http://www.perl.com/pub/a/2006/07/13/lightning-articles.html?page=last
|
|
# Or, look it up in the Camel book on pages 642 and 643 in the 3rd edition.
|
|
#
|
|
# Check at the end of this package for the call to main() which actually runs
|
|
# the program.
|
|
# ###########################################################################
|
|
package pt_online_schema_change;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
|
|
|
use Time::HiRes qw(time sleep);
|
|
use Data::Dumper;
|
|
$Data::Dumper::Indent = 1;
|
|
$Data::Dumper::Sortkeys = 1;
|
|
$Data::Dumper::Quotekeys = 0;
|
|
|
|
use sigtrap 'handler', \&sig_int, 'normal-signals';
|
|
|
|
my $exit_status = 0;
|
|
my $oktorun = 1;
|
|
my @drop_trigger_sqls;
|
|
|
|
$OUTPUT_AUTOFLUSH = 1;
|
|
|
|
sub main {
|
|
# Reset global vars else tests will fail.
|
|
@ARGV = @_;
|
|
$oktorun = 1;
|
|
@drop_trigger_sqls = ();
|
|
|
|
$exit_status = 0;
|
|
|
|
# ########################################################################
|
|
# Get configuration information.
|
|
# ########################################################################
|
|
my $q = new Quoter();
|
|
my $o = new OptionParser();
|
|
$o->get_specs();
|
|
$o->get_opts();
|
|
|
|
my $dp = $o->DSNParser();
|
|
$dp->prop('set-vars', $o->get('set-vars'));
|
|
|
|
# The original table, i.e. the one being altered, must be specified
|
|
# on the command line via the DSN.
|
|
my ($db, $tbl);
|
|
my $dsn = shift @ARGV;
|
|
if ( !$dsn ) {
|
|
$o->save_error('A DSN must be specified');
|
|
}
|
|
else {
|
|
# Parse DSN string and convert it to a DSN data struct.
|
|
$dsn = $dp->parse($dsn, $dp->parse_options($o));
|
|
$db = $dsn->{D};
|
|
$tbl = $dsn->{t};
|
|
}
|
|
|
|
my $alter_fk_method = $o->get('alter-foreign-keys-method') || '';
|
|
if ( $alter_fk_method eq 'drop_swap' ) {
|
|
$o->set('swap-tables', 0);
|
|
$o->set('drop-old-table', 0);
|
|
}
|
|
|
|
# Explicit --chunk-size disable auto chunk sizing.
|
|
$o->set('chunk-time', 0) if $o->got('chunk-size');
|
|
|
|
foreach my $opt ( qw(max-load critical-load) ) {
|
|
next unless $o->has($opt);
|
|
my $spec = $o->get($opt);
|
|
eval {
|
|
MySQLStatusWaiter::_parse_spec($o->get($opt));
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
chomp $EVAL_ERROR;
|
|
$o->save_error("Invalid --$opt: $EVAL_ERROR");
|
|
}
|
|
}
|
|
|
|
if ( !$o->get('help') ) {
|
|
if ( @ARGV ) {
|
|
$o->save_error('Specify only one DSN on the command line');
|
|
}
|
|
|
|
if ( !$db || !$tbl ) {
|
|
$o->save_error("The DSN must specify a database (D) and a table (t)");
|
|
}
|
|
|
|
if ( $o->get('progress') ) {
|
|
eval { Progress->validate_spec($o->get('progress')) };
|
|
if ( $EVAL_ERROR ) {
|
|
chomp $EVAL_ERROR;
|
|
$o->save_error("--progress $EVAL_ERROR");
|
|
}
|
|
}
|
|
|
|
# See the "pod-based-option-value-validation" spec for how this may
|
|
# be automagically validated.
|
|
if ( $alter_fk_method
|
|
&& $alter_fk_method ne 'auto'
|
|
&& $alter_fk_method ne 'rebuild_constraints'
|
|
&& $alter_fk_method ne 'drop_swap'
|
|
&& $alter_fk_method ne 'none' )
|
|
{
|
|
$o->save_error("Invalid --alter-foreign-keys-method value: $alter_fk_method");
|
|
}
|
|
|
|
if ( $alter_fk_method eq 'drop_swap' && !$o->get('drop-new-table') ) {
|
|
$o->save_error("--alter-foreign-keys-method=drop_swap does not work with --no-drop-new-table.");
|
|
}
|
|
}
|
|
|
|
$o->usage_or_errors();
|
|
|
|
if ( $o->get('quiet') ) {
|
|
# BARON: this will fail on Windows, where there is no /dev/null. I feel
|
|
# it's a hack, like ignoring a problem instead of fixing it somehow. We
|
|
# should take a look at the things that get printed in a "normal"
|
|
# non-quiet run, and "if !quiet" them, and then do some kind of Logger.pm
|
|
# or Messager.pm module for a future release.
|
|
close STDOUT;
|
|
open STDOUT, '>', '/dev/null'
|
|
or warn "Cannot reopen STDOUT to /dev/null: $OS_ERROR";
|
|
}
|
|
|
|
# ########################################################################
|
|
# Connect to MySQL.
|
|
# ########################################################################
|
|
my $set_on_connect = sub {
|
|
my ($dbh) = @_;
|
|
|
|
# See the same code in pt-table-checksum.
|
|
my $lock_wait_timeout = $o->get('lock-wait-timeout');
|
|
my $set_lwt = "SET SESSION innodb_lock_wait_timeout=$lock_wait_timeout";
|
|
PTDEBUG && _d($set_lwt);
|
|
eval {
|
|
$dbh->do($set_lwt);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
PTDEBUG && _d($EVAL_ERROR);
|
|
# Get the server's current value.
|
|
my $sql = "SHOW SESSION VARIABLES LIKE 'innodb_lock_wait_timeout'";
|
|
PTDEBUG && _d($dbh, $sql);
|
|
my (undef, $curr_lwt) = $dbh->selectrow_array($sql);
|
|
PTDEBUG && _d('innodb_lock_wait_timeout on server:', $curr_lwt);
|
|
if ( !defined $curr_lwt ) {
|
|
PTDEBUG && _d('innodb_lock_wait_timeout does not exist;',
|
|
'InnoDB is probably disabled');
|
|
}
|
|
elsif ( $curr_lwt > $lock_wait_timeout ) {
|
|
warn "Failed to $set_lwt: $EVAL_ERROR\n"
|
|
. "The current innodb_lock_wait_timeout value "
|
|
. "$curr_lwt is greater than the --lock-wait-timeout "
|
|
. "value $lock_wait_timeout and the variable cannot be "
|
|
. "changed. innodb_lock_wait_timeout is only dynamic when "
|
|
. "using the InnoDB plugin. To prevent this warning, either "
|
|
. "specify --lock-wait-time=$curr_lwt, or manually set "
|
|
. "innodb_lock_wait_timeout to a value less than or equal "
|
|
. "to $lock_wait_timeout and restart MySQL.\n";
|
|
}
|
|
}
|
|
};
|
|
|
|
# Do not call "new Cxn(" directly; use this sub so that set_on_connect
|
|
# is applied to every cxn.
|
|
# BARON: why not make this a subroutine instead of a subroutine variable? I
|
|
# think that can be less confusing. Also, the $set_on_connect variable can be
|
|
# inlined into this subroutine. Many of our tools have a get_dbh() subroutine
|
|
# and it might be good to just make a convention of it.
|
|
my $make_cxn = sub {
|
|
my (%args) = @_;
|
|
my $cxn = new Cxn(
|
|
%args,
|
|
DSNParser => $dp,
|
|
OptionParser => $o,
|
|
set => $set_on_connect,
|
|
);
|
|
eval { $cxn->connect() }; # connect or die trying
|
|
if ( $EVAL_ERROR ) {
|
|
die "Cannot connect to MySQL: $EVAL_ERROR\n";
|
|
}
|
|
return $cxn;
|
|
};
|
|
|
|
my $cxn = $make_cxn->(dsn => $dsn);
|
|
|
|
# ########################################################################
|
|
# Check if MySQL is new enough to have the triggers we need.
|
|
# Although triggers were introduced in 5.0.2, "Prior to MySQL 5.0.10,
|
|
# triggers cannot contain direct references to tables by name."
|
|
# ########################################################################
|
|
my $vp = new VersionParser();
|
|
if ( !$vp->version_ge($cxn->dbh(), '5.0.10') ) {
|
|
die "This tool requires MySQL 5.0.10 or newer.\n";
|
|
}
|
|
|
|
# ########################################################################
|
|
# Setup lag and load monitors.
|
|
# ########################################################################
|
|
my $slaves; # all slaves that are found or specified
|
|
my $slave_lag_cxns; # slaves whose lag we'll check
|
|
my $replica_lag; # ReplicaLagWaiter object
|
|
my $replica_lag_pr; # Progress for ReplicaLagWaiter
|
|
my $sys_load; # MySQLStatusWaiter object
|
|
my $sys_load_pr; # Progress for MySQLStatusWaiter object
|
|
|
|
if ( $o->get('execute') ) {
|
|
# #####################################################################
|
|
# Find and connect to slaves.
|
|
# #####################################################################
|
|
my $ms = new MasterSlave();
|
|
$slaves = $ms->get_slaves(
|
|
dbh => $cxn->dbh(),
|
|
dsn => $cxn->dsn(),
|
|
OptionParser => $o,
|
|
DSNParser => $dp,
|
|
Quoter => $q,
|
|
make_cxn => sub {
|
|
return $make_cxn->(@_, prev_dsn => $cxn->dsn());
|
|
},
|
|
);
|
|
PTDEBUG && _d(scalar @$slaves, 'slaves found');
|
|
|
|
if ( $o->get('check-slave-lag') ) {
|
|
PTDEBUG && _d('Will use --check-slave-lag to check for slave lag');
|
|
my $cxn = $make_cxn->(
|
|
dsn_string => $o->get('check-slave-lag'),
|
|
prev_dsn => $cxn->dsn(),
|
|
);
|
|
$slave_lag_cxns = [ $cxn ];
|
|
}
|
|
else {
|
|
PTDEBUG && _d('Will check slave lag on all slaves');
|
|
$slave_lag_cxns = $slaves;
|
|
}
|
|
|
|
# #####################################################################
|
|
# Check for replication filters.
|
|
# #####################################################################
|
|
if ( $o->get('check-replication-filters') ) {
|
|
PTDEBUG && _d("Checking slave replication filters");
|
|
my @all_repl_filters;
|
|
foreach my $slave ( @$slaves ) {
|
|
my $repl_filters = $ms->get_replication_filters(
|
|
dbh => $slave->dbh(),
|
|
);
|
|
if ( keys %$repl_filters ) {
|
|
push @all_repl_filters,
|
|
{ name => $slave->name(),
|
|
filters => $repl_filters,
|
|
};
|
|
}
|
|
}
|
|
if ( @all_repl_filters ) {
|
|
my $msg = "Replication filters are set on these hosts:\n";
|
|
foreach my $host ( @all_repl_filters ) {
|
|
my $filters = $host->{filters};
|
|
$msg .= " $host->{name}\n"
|
|
. join("\n", map { " $_ = $host->{filters}->{$_}" }
|
|
keys %{$host->{filters}})
|
|
. "\n";
|
|
}
|
|
$msg .= "Please read the --check-replication-filters documentation "
|
|
. "to learn how to solve this problem.";
|
|
die $msg;
|
|
}
|
|
}
|
|
|
|
# #####################################################################
|
|
# Make a ReplicaLagWaiter to help wait for slaves after each chunk.
|
|
# #####################################################################
|
|
my $sleep = sub {
|
|
# Don't let the master dbh die while waiting for slaves because we
|
|
# may wait a very long time for slaves.
|
|
my $dbh = $cxn->dbh();
|
|
if ( !$dbh || !$dbh->ping() ) {
|
|
eval { $dbh = $cxn->connect() }; # connect or die trying
|
|
if ( $EVAL_ERROR ) {
|
|
$oktorun = 0; # flag for cleanup tasks
|
|
chomp $EVAL_ERROR;
|
|
die "Lost connection to " . $cxn->name() . " while waiting for "
|
|
. "replica lag ($EVAL_ERROR)\n";
|
|
}
|
|
}
|
|
$dbh->do("SELECT 'pt-online-schema-change keepalive'");
|
|
sleep $o->get('check-interval');
|
|
return;
|
|
};
|
|
|
|
my $get_lag = sub {
|
|
my ($cxn) = @_;
|
|
my $dbh = $cxn->dbh();
|
|
if ( !$dbh || !$dbh->ping() ) {
|
|
eval { $dbh = $cxn->connect() }; # connect or die trying
|
|
if ( $EVAL_ERROR ) {
|
|
$oktorun = 0; # flag for cleanup tasks
|
|
chomp $EVAL_ERROR;
|
|
die "Lost connection to replica " . $cxn->name()
|
|
. " while attempting to get its lag ($EVAL_ERROR)\n";
|
|
}
|
|
}
|
|
return $ms->get_slave_lag($dbh);
|
|
};
|
|
|
|
$replica_lag = new ReplicaLagWaiter(
|
|
slaves => $slave_lag_cxns,
|
|
max_lag => $o->get('max-lag'),
|
|
oktorun => sub { return $oktorun },
|
|
get_lag => $get_lag,
|
|
sleep => $sleep,
|
|
);
|
|
|
|
my $get_status;
|
|
{
|
|
my $sql = "SHOW GLOBAL STATUS LIKE ?";
|
|
my $sth = $cxn->dbh()->prepare($sql);
|
|
|
|
$get_status = sub {
|
|
my ($var) = @_;
|
|
PTDEBUG && _d($sth->{Statement}, $var);
|
|
$sth->execute($var);
|
|
my (undef, $val) = $sth->fetchrow_array();
|
|
return $val;
|
|
};
|
|
}
|
|
|
|
eval {
|
|
$sys_load = new MySQLStatusWaiter(
|
|
max_spec => $o->get('max-load'),
|
|
critical_spec => $o->get('critical-load'),
|
|
get_status => $get_status,
|
|
oktorun => sub { return $oktorun },
|
|
sleep => $sleep,
|
|
);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
chomp $EVAL_ERROR;
|
|
die "Error checking --max-load or --critial-load: $EVAL_ERROR. "
|
|
. "Check that the variables specified for --max-load and "
|
|
. "--critical-load are spelled correctly and exist in "
|
|
. "SHOW GLOBAL STATUS. Current values for these options are:\n"
|
|
. " --max-load " . (join(',', @{$o->get('max-load')})) . "\n"
|
|
. " --critial-load " . (join(',', @{$o->get('critical-load')}))
|
|
. "\n";
|
|
}
|
|
|
|
if ( $o->get('progress') ) {
|
|
$replica_lag_pr = new Progress(
|
|
jobsize => scalar @$slaves,
|
|
spec => $o->get('progress'),
|
|
name => "Waiting for replicas to catch up", # not used
|
|
);
|
|
|
|
$sys_load_pr = new Progress(
|
|
jobsize => scalar @{$o->get('max-load')},
|
|
spec => $o->get('progress'),
|
|
name => "Waiting for --max-load", # not used
|
|
);
|
|
}
|
|
}
|
|
|
|
# ########################################################################
|
|
# Setup and check the original table.
|
|
# ########################################################################
|
|
my $tp = new TableParser(Quoter => $q);
|
|
|
|
# Common table data struct (that modules like NibbleIterator expect).
|
|
my $orig_tbl = {
|
|
db => $db,
|
|
tbl => $tbl,
|
|
name => $q->quote($db, $tbl),
|
|
};
|
|
|
|
check_orig_table(
|
|
orig_tbl => $orig_tbl,
|
|
Cxn => $cxn,
|
|
OptionParser => $o,
|
|
TableParser => $tp,
|
|
Quoter => $q,
|
|
);
|
|
|
|
# ########################################################################
|
|
# Get child tables of the original table, if necessary.
|
|
# ########################################################################
|
|
my $child_tables;
|
|
if ( ($alter_fk_method || '') eq 'none' ) {
|
|
print "Not updating foreign keys because "
|
|
. "--alter-foreign-keys-method=none. Foreign keys "
|
|
. "that reference the table will no longer work.\n";
|
|
}
|
|
else {
|
|
$child_tables = find_child_tables(
|
|
tbl => $orig_tbl,
|
|
Cxn => $cxn,
|
|
Quoter => $q,
|
|
);
|
|
if ( !$child_tables ) {
|
|
if ( $alter_fk_method ) {
|
|
warn "No foreign keys reference $orig_tbl->{name}; ignoring "
|
|
. "--alter-foreign-keys-method.\n";
|
|
|
|
if ( $alter_fk_method eq 'drop_swap' ) {
|
|
# These opts are disabled at the start if the user specifies
|
|
# the drop_swap method, but now that we know there are no
|
|
# child tables, we must re-enable these to make the alter work.
|
|
$o->set('swap-tables', 1);
|
|
$o->set('drop-old-table', 1);
|
|
}
|
|
|
|
$alter_fk_method = '';
|
|
}
|
|
# No child tables and --alter-fk-method wasn't specified,
|
|
# so nothing to do.
|
|
}
|
|
else {
|
|
print "Child tables:\n";
|
|
foreach my $child_table ( @$child_tables ) {
|
|
printf " %s (approx. %s rows)\n",
|
|
$child_table->{name},
|
|
$child_table->{row_est} || '?';
|
|
}
|
|
|
|
if ( $alter_fk_method ) {
|
|
# Let the user know how we're going to update the child table
|
|
# fk refs.
|
|
my $choice
|
|
= $alter_fk_method eq 'none' ? "not"
|
|
: $alter_fk_method eq 'auto' ? "automatically choose the method to"
|
|
: "use the $alter_fk_method method to";
|
|
print "Will $choice update foreign keys.\n";
|
|
}
|
|
else {
|
|
print "You did not specify --alter-foreign-keys-method, but there "
|
|
. "are foreign keys that reference the table. "
|
|
. "Please read the tool's documentation carefully.\n";
|
|
return 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
# ########################################################################
|
|
# XXX
|
|
# Ready to begin the alter! Nothing has been changed on the server at
|
|
# this point; we've just checked and looked for things. Past this point,
|
|
# the code is live if --execute, else it's doing a --dry-run. Or, if
|
|
# the user didn't read the docs, we may bail out here.
|
|
# XXX
|
|
# ########################################################################
|
|
if ( $o->get('dry-run') ) {
|
|
print "Starting a dry run. $orig_tbl->{name} will not be altered. "
|
|
. "Specify --execute instead of --dry-run to alter the table.\n";
|
|
}
|
|
elsif ( $o->get('execute') ) {
|
|
print "Altering $orig_tbl->{name}...\n";
|
|
}
|
|
else {
|
|
print "Exiting without altering $orig_tbl->{name} because neither "
|
|
. "--dry-run nor --execute was specified. Please read the tool's "
|
|
. "documentation carefully before using this tool.\n";
|
|
return 1;
|
|
}
|
|
|
|
# ########################################################################
|
|
# Create a cleanup task object to undo changes (i.e. clean up) if the
|
|
# code dies, or we may call this explicitly at the end if all goes well.
|
|
# ########################################################################
|
|
my @cleanup_tasks;
|
|
my $cleanup = new CleanupTask(
|
|
sub {
|
|
# XXX We shouldn't copy $EVAL_ERROR here, but I found that
|
|
# errors are not re-thrown in tests. If you comment out this
|
|
# line and the die below, an error fails:
|
|
# not ok 5 - Doesn't try forever to find a new table name
|
|
# Failed test 'Doesn't try forever to find a new table name'
|
|
# at /Users/daniel/p/pt-osc-2.1.1/lib/PerconaTest.pm line 559.
|
|
# ''
|
|
# doesn't match '(?-xism:Failed to find a unique new table name)'
|
|
my $original_error = $EVAL_ERROR;
|
|
foreach my $task ( reverse @cleanup_tasks ) {
|
|
eval {
|
|
$task->();
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
warn "Error cleaning up: $EVAL_ERROR\n";
|
|
}
|
|
}
|
|
die $original_error if $original_error; # rethrow original error
|
|
return;
|
|
}
|
|
);
|
|
|
|
# The last cleanup task is to report whether or not the orig table
|
|
# was altered.
|
|
push @cleanup_tasks, sub {
|
|
PTDEBUG && _d('Clean up done, report if orig table was altered');
|
|
if ( $o->get('dry-run') ) {
|
|
print "Dry run complete. $orig_tbl->{name} was not altered.\n";
|
|
}
|
|
else {
|
|
if ( $orig_tbl->{swapped} ) {
|
|
if ( $orig_tbl->{success} ) {
|
|
print "Successfully altered $orig_tbl->{name}.\n";
|
|
}
|
|
else {
|
|
print "Altered $orig_tbl->{name} but there were errors "
|
|
. "or warnings.\n";
|
|
}
|
|
}
|
|
else {
|
|
print "$orig_tbl->{name} was not altered.\n";
|
|
}
|
|
}
|
|
return;
|
|
};
|
|
|
|
# ########################################################################
|
|
# Check and create PID file if user specified --pid.
|
|
# ########################################################################
|
|
my $daemon;
|
|
if ( $o->get('execute') && $o->get('pid') ) {
|
|
# We're not daemoninzing, it just handles PID stuff.
|
|
$daemon = new Daemon(o=>$o);
|
|
$daemon->make_PID_file();
|
|
}
|
|
|
|
# #####################################################################
|
|
# Step 1: Create the new table.
|
|
# #####################################################################
|
|
my $new_tbl;
|
|
eval {
|
|
$new_tbl = create_new_table(
|
|
orig_tbl => $orig_tbl,
|
|
suffix => '_new',
|
|
Cxn => $cxn,
|
|
Quoter => $q,
|
|
OptionParser => $o,
|
|
TableParser => $tp,
|
|
);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
die "Error creating new table: $EVAL_ERROR\n";
|
|
}
|
|
|
|
# If the new table still exists, drop it unless the tool was interrupted.
|
|
push @cleanup_tasks, sub {
|
|
PTDEBUG && _d('Clean up new table');
|
|
my $new_tbl_exists = $tp->check_table(
|
|
dbh => $cxn->dbh(),
|
|
db => $new_tbl->{db},
|
|
tbl => $new_tbl->{tbl},
|
|
);
|
|
PTDEBUG && _d('New table exists:', $new_tbl_exists ? 'yes' : 'no');
|
|
return unless $new_tbl_exists;
|
|
|
|
my $sql = "DROP TABLE IF EXISTS $new_tbl->{name};";
|
|
if ( !$oktorun ) {
|
|
# The tool was interrupted, so do not drop the new table
|
|
# in case the user wants to resume (once resume capability
|
|
# is implemented).
|
|
print "Not dropping the new table $new_tbl->{name} because "
|
|
. "the tool was interrupted. To drop the new table, "
|
|
. "execute:\n$sql\n";
|
|
}
|
|
elsif ( $orig_tbl->{copied} && !$orig_tbl->{swapped} ) {
|
|
print "Not dropping the new table $new_tbl->{name} because "
|
|
. "--swap-tables failed. To drop the new table, "
|
|
. "execute:\n$sql\n";
|
|
}
|
|
elsif ( !$o->get('drop-new-table') ) {
|
|
# https://bugs.launchpad.net/percona-toolkit/+bug/998831
|
|
print "Not dropping the new table $new_tbl->{name} because "
|
|
. "--no-drop-new-table was specified. To drop the new table, "
|
|
. "execute:\n$sql\n";
|
|
}
|
|
else {
|
|
print "Dropping new table...\n";
|
|
print $sql, "\n" if $o->get('print');
|
|
PTDEBUG && _d($sql);
|
|
eval {
|
|
$cxn->dbh()->do($sql);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
warn "Error dropping new table $new_tbl->{name}: $EVAL_ERROR\n"
|
|
. "To try dropping the new table again, execute:\n$sql\n";
|
|
}
|
|
print "Dropped new table OK.\n";
|
|
}
|
|
};
|
|
|
|
# #####################################################################
|
|
# Step 2: Alter the new, empty table. This should be very quick,
|
|
# or die if the user specified a bad alter statement.
|
|
# #####################################################################
|
|
if ( my $alter = $o->get('alter') ) {
|
|
print "Altering new table...\n";
|
|
my $sql = "ALTER TABLE $new_tbl->{name} $alter";
|
|
print $sql, "\n" if $o->get('print');
|
|
PTDEBUG && _d($sql);
|
|
eval {
|
|
$cxn->dbh()->do($sql);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
die "Error altering new table $new_tbl->{name}: $EVAL_ERROR\n"
|
|
}
|
|
print "Altered $new_tbl->{name} OK.\n"
|
|
}
|
|
|
|
# Get the new table struct. This shouldn't die because
|
|
# we just created the table successfully so we know it's
|
|
# there. But the ghost of Ryan is everywhere.
|
|
my $ddl = $tp->get_create_table(
|
|
$cxn->dbh(),
|
|
$new_tbl->{db},
|
|
$new_tbl->{tbl},
|
|
);
|
|
$new_tbl->{tbl_struct} = $tp->parse($ddl);
|
|
|
|
# Determine what columns the original and new table share.
|
|
# If the user drops a col, that's easy: just don't copy it. If they
|
|
# add a column, it must have a default value. Other alterations
|
|
# may or may not affect the copy process--we'll know when we try!
|
|
# Note: we don't want to examine the --alter statement to see if the
|
|
# cols have changed because that's messy and prone to parsing errors.
|
|
# Col posn (position) is just for looks because user's like
|
|
# to see columns listed in their original order, not Perl's
|
|
# random hash key sorting.
|
|
my $col_posn = $orig_tbl->{tbl_struct}->{col_posn};
|
|
my $orig_cols = $orig_tbl->{tbl_struct}->{is_col};
|
|
my $new_cols = $new_tbl->{tbl_struct}->{is_col};
|
|
my @common_cols = sort { $col_posn->{$a} <=> $col_posn->{$b} }
|
|
grep { $new_cols->{$_} }
|
|
keys %$orig_cols;
|
|
PTDEBUG && _d('Common columns', @common_cols);
|
|
|
|
# ########################################################################
|
|
# Step 3: Create the triggers to capture changes on the original table and
|
|
# apply them to the new table.
|
|
# ########################################################################
|
|
|
|
# Drop the triggers. We can save this cleanup task before
|
|
# adding the triggers because if adding them fails, this will be
|
|
# called which will drop whichever triggers were created.
|
|
push @cleanup_tasks, sub {
|
|
PTDEBUG && _d('Clean up triggers');
|
|
if ( $oktorun ) {
|
|
drop_triggers(
|
|
tbl => $orig_tbl,
|
|
Cxn => $cxn,
|
|
Quoter => $q,
|
|
OptionParser => $o,
|
|
);
|
|
}
|
|
else {
|
|
print "Not dropping triggers because the tool was interrupted. "
|
|
. "To drop the triggers, execute:\n"
|
|
. join("\n", @drop_trigger_sqls) . "\n";
|
|
}
|
|
};
|
|
|
|
eval {
|
|
create_triggers(
|
|
orig_tbl => $orig_tbl,
|
|
new_tbl => $new_tbl,
|
|
columns => \@common_cols,
|
|
Cxn => $cxn,
|
|
Quoter => $q,
|
|
OptionParser => $o,
|
|
);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
die "Error creating triggers: $EVAL_ERROR\n";
|
|
};
|
|
|
|
# #####################################################################
|
|
# Step 4: Copy rows.
|
|
# #####################################################################
|
|
|
|
# The hashref of callbacks below is what NibbleIterator calls internally
|
|
# to do all the copy work. The callbacks do not need to eval their work
|
|
# because the higher call to $nibble_iter->next() is eval'ed which will
|
|
# catch any errors in the callbacks.
|
|
my $total_rows = 0;
|
|
my $total_time = 0;
|
|
my $avg_rate = 0; # rows/second
|
|
my $retry = new Retry(); # for retrying to exec the copy statement
|
|
my $limit = $o->get('chunk-size-limit'); # brevity
|
|
my $chunk_time = $o->get('chunk-time'); # brevity
|
|
|
|
my $callbacks = {
|
|
init => sub {
|
|
my (%args) = @_;
|
|
my $tbl = $args{tbl};
|
|
my $nibble_iter = $args{NibbleIterator};
|
|
|
|
if ( $o->get('dry-run') ) {
|
|
print "Not copying rows because this is a dry run.\n";
|
|
}
|
|
else {
|
|
print "Copying approximately ", $nibble_iter->row_estimate(),
|
|
" rows...\n";
|
|
}
|
|
|
|
if ( $o->get('print') ) {
|
|
# Print the checksum and next boundary statements.
|
|
my $statements = $nibble_iter->statements();
|
|
foreach my $sth ( sort keys %$statements ) {
|
|
next if $sth =~ m/^explain/;
|
|
if ( $statements->{$sth} ) {
|
|
print $statements->{$sth}->{Statement}, "\n";
|
|
}
|
|
}
|
|
}
|
|
|
|
return unless $o->get('execute');
|
|
|
|
# If table is a single chunk on the master, make sure it's also
|
|
# a single chunk on all slaves. E.g. if a slave is out of sync
|
|
# and has a lot more rows than the master, single chunking on the
|
|
# master could cause the slave to choke.
|
|
if ( $nibble_iter->one_nibble() ) {
|
|
PTDEBUG && _d('Getting table row estimate on replicas');
|
|
my @too_large;
|
|
foreach my $slave ( @$slaves ) {
|
|
my ($n_rows) = NibbleIterator::get_row_estimate(
|
|
Cxn => $slave,
|
|
tbl => $tbl,
|
|
);
|
|
PTDEBUG && _d('Table on',$slave->name(),'has', $n_rows, 'rows');
|
|
if ( $n_rows && $n_rows > ($tbl->{chunk_size} * $limit) ) {
|
|
PTDEBUG && _d('Table too large on', $slave->name());
|
|
push @too_large, [$slave->name(), $n_rows || 0];
|
|
}
|
|
}
|
|
if ( @too_large ) {
|
|
my $msg
|
|
= "Cannot copy table $tbl->{name} because"
|
|
. " on the master it would be checksummed in one chunk"
|
|
. " but on these replicas it has too many rows:\n";
|
|
foreach my $info ( @too_large ) {
|
|
$msg .= " $info->[1] rows on $info->[0]\n";
|
|
}
|
|
$msg .= "The current chunk size limit is "
|
|
. ($tbl->{chunk_size} * $limit)
|
|
. " rows (chunk size=$tbl->{chunk_size}"
|
|
. " * chunk size limit=$limit).\n";
|
|
die $msg;
|
|
}
|
|
}
|
|
|
|
return 1; # continue nibbling table
|
|
},
|
|
next_boundaries => sub {
|
|
my (%args) = @_;
|
|
my $tbl = $args{tbl};
|
|
my $nibble_iter = $args{NibbleIterator};
|
|
my $sth = $nibble_iter->statements();
|
|
my $boundary = $nibble_iter->boundaries();
|
|
|
|
return 0 if $o->get('dry-run');
|
|
return 1 if $nibble_iter->one_nibble();
|
|
|
|
# Check that MySQL will use the nibble index for the next upper
|
|
# boundary sql. This check applies to the next nibble. So if
|
|
# the current nibble number is 5, then nibble 5 is already done
|
|
# and we're checking nibble number 6.
|
|
my $expl = explain_statement(
|
|
tbl => $tbl,
|
|
sth => $sth->{explain_upper_boundary},
|
|
vals => [ @{$boundary->{lower}}, $nibble_iter->chunk_size() ],
|
|
);
|
|
if (lc($expl->{key} || '') ne lc($nibble_iter->nibble_index() || '')) {
|
|
my $msg
|
|
= "Aborting copying table $tbl->{name} at chunk "
|
|
. ($nibble_iter->nibble_number() + 1)
|
|
. " because it is not safe to ascend. Chunking should "
|
|
. "use the "
|
|
. ($nibble_iter->nibble_index() || '?')
|
|
. " index, but MySQL EXPLAIN reports that "
|
|
. ($expl->{key} ? "the $expl->{key}" : "no")
|
|
. " index will be used for "
|
|
. $sth->{upper_boundary}->{Statement}
|
|
. " with values "
|
|
. join(", ", map { defined $_ ? $_ : "NULL" }
|
|
(@{$boundary->{lower}}, $nibble_iter->chunk_size()))
|
|
. "\n";
|
|
die $msg;
|
|
}
|
|
|
|
# Once nibbling begins for a table, control does not return to this
|
|
# tool until nibbling is done because, as noted above, all work is
|
|
# done in these callbacks. This callback is the only place where we
|
|
# can prematurely stop nibbling by returning false. This allows
|
|
# Ctrl-C to stop the tool between nibbles instead of between tables.
|
|
return $oktorun; # continue nibbling table?
|
|
},
|
|
exec_nibble => sub {
|
|
my (%args) = @_;
|
|
my $tbl = $args{tbl};
|
|
my $nibble_iter = $args{NibbleIterator};
|
|
my $sth = $nibble_iter->statements();
|
|
my $boundary = $nibble_iter->boundaries();
|
|
|
|
return if $o->get('dry-run');
|
|
|
|
# Count every chunk, even if it's ultimately skipped, etc.
|
|
$tbl->{results}->{n_chunks}++;
|
|
|
|
# If the table is being chunk (i.e., it's not small enough to be
|
|
# consumed by one nibble), then check index usage and chunk size.
|
|
if ( !$nibble_iter->one_nibble() ) {
|
|
my $expl = explain_statement(
|
|
tbl => $tbl,
|
|
sth => $sth->{explain_nibble},
|
|
vals => [ @{$boundary->{lower}}, @{$boundary->{upper}} ],
|
|
);
|
|
|
|
# Ensure that MySQL is using the chunk index.
|
|
if ( lc($expl->{key} || '')
|
|
ne lc($nibble_iter->nibble_index() || '') ) {
|
|
my $msg
|
|
= "Aborting copying table $tbl->{name} at chunk "
|
|
. $nibble_iter->nibble_number()
|
|
. " because it is not safe to chunk. Chunking should "
|
|
. "use the "
|
|
. ($nibble_iter->nibble_index() || '?')
|
|
. " index, but MySQL EXPLAIN reports that "
|
|
. ($expl->{key} ? "the $expl->{key}" : "no")
|
|
. " index will be used for "
|
|
. $sth->{explain_nibble}->{Statement}
|
|
. " with values "
|
|
. join(", ", map { defined $_ ? $_ : "NULL" }
|
|
(@{$boundary->{lower}}, @{$boundary->{upper}}))
|
|
. "\n";
|
|
die $msg;
|
|
}
|
|
|
|
# Check chunk size limit if the upper boundary and next lower
|
|
# boundary are identical.
|
|
if ( $limit ) {
|
|
my $boundary = $nibble_iter->boundaries();
|
|
my $oversize_chunk
|
|
= $limit ? ($expl->{rows} || 0) >= $tbl->{chunk_size} * $limit
|
|
: 0;
|
|
if ( $oversize_chunk
|
|
&& $nibble_iter->identical_boundaries(
|
|
$boundary->{upper}, $boundary->{next_lower}) )
|
|
{
|
|
my $msg
|
|
= "Aborting copying table $tbl->{name} at chunk "
|
|
. $nibble_iter->nibble_number()
|
|
. " because the chunk is too large: MySQL estimates "
|
|
. ($expl->{rows} || 0) . "rows. The current chunk "
|
|
. "size limit is " . ($tbl->{chunk_size} * $limit)
|
|
. " rows (chunk size=$tbl->{chunk_size}"
|
|
. " * chunk size limit=$limit).\n";
|
|
die $msg;
|
|
}
|
|
}
|
|
}
|
|
|
|
# Exec and time the chunk checksum query.
|
|
$tbl->{nibble_time} = exec_nibble(
|
|
%args,
|
|
Retry => $retry,
|
|
Quoter => $q,
|
|
OptionParser => $o,
|
|
);
|
|
PTDEBUG && _d('Nibble time:', $tbl->{nibble_time});
|
|
|
|
# We're executing REPLACE queries which don't return rows.
|
|
# Returning 0 from this callback causes the nibble iter to
|
|
# get the next boundaries/nibble.
|
|
return 0;
|
|
},
|
|
after_nibble => sub {
|
|
my (%args) = @_;
|
|
my $tbl = $args{tbl};
|
|
my $nibble_iter = $args{NibbleIterator};
|
|
|
|
return unless $o->get('execute');
|
|
|
|
# Update rate, chunk size, and progress if the nibble actually
|
|
# selected some rows.
|
|
my $cnt = $tbl->{row_cnt};
|
|
if ( ($cnt || 0) > 0 ) {
|
|
# Update the rate of rows per second for the entire server.
|
|
# This is used for the initial chunk size of the next table.
|
|
$total_rows += $cnt;
|
|
$total_time += $tbl->{nibble_time};
|
|
$avg_rate = int($total_rows / $total_time);
|
|
PTDEBUG && _d('Average copy rate (rows/s):', $avg_rate);
|
|
|
|
# Adjust chunk size. This affects the next chunk.
|
|
if ( $chunk_time ) {
|
|
# Calcuate a new chunk-size based on the rate of rows/s.
|
|
$tbl->{chunk_size} = $tbl->{rate}->update(
|
|
$cnt, # processed this many rows
|
|
$tbl->{nibble_time}, # is this amount of time
|
|
);
|
|
|
|
if ( $tbl->{chunk_size} < 1 ) {
|
|
# This shouldn't happen. WeightedAvgRate::update() may
|
|
# return a value < 1, but minimum chunk size is 1.
|
|
$tbl->{chunk_size} = 1;
|
|
|
|
# This warning is printed once per table.
|
|
if ( !$tbl->{warned_slow} ) {
|
|
warn "Rows are copying very slowly. "
|
|
. "--chunk-size has been automatically reduced to 1. "
|
|
. "Check that the server is not being overloaded, "
|
|
. "or increase --chunk-time. The last chunk "
|
|
. "selected $cnt rows and took "
|
|
. sprintf('%.3f', $tbl->{nibble_time})
|
|
. " seconds to execute.\n";
|
|
$tbl->{warned_slow} = 1;
|
|
}
|
|
}
|
|
|
|
# Update chunk-size based on the rate of rows/s.
|
|
$nibble_iter->set_chunk_size($tbl->{chunk_size});
|
|
}
|
|
|
|
# Every table should have a Progress obj; update it.
|
|
if ( my $tbl_pr = $tbl->{progress} ) {
|
|
$tbl_pr->update( sub { return $total_rows } );
|
|
}
|
|
}
|
|
|
|
# Wait forever for slaves to catch up.
|
|
$replica_lag_pr->start() if $replica_lag_pr;
|
|
$replica_lag->wait(Progress => $replica_lag_pr);
|
|
|
|
# Wait forever for system load to abate. wait() will die if
|
|
# --critical load is reached.
|
|
$sys_load_pr->start() if $sys_load_pr;
|
|
$sys_load->wait(Progress => $sys_load_pr);
|
|
|
|
return;
|
|
},
|
|
done => sub {
|
|
if ( $o->get('execute') ) {
|
|
print "Copied rows OK.\n";
|
|
}
|
|
},
|
|
};
|
|
|
|
# NibbleIterator combines these two statements and adds
|
|
# "FROM $orig_table->{name} WHERE <nibble stuff>".
|
|
my $dml = "INSERT LOW_PRIORITY IGNORE INTO $new_tbl->{name} "
|
|
. "(" . join(', ', map { $q->quote($_) } @common_cols) . ") "
|
|
. "SELECT";
|
|
my $select = join(', ', map { $q->quote($_) } @common_cols);
|
|
|
|
# The chunk size is auto-adjusted, so use --chunk-size as
|
|
# the initial value, but then save and update the adjusted
|
|
# chunk size in the table data struct.
|
|
$orig_tbl->{chunk_size} = $o->get('chunk-size');
|
|
|
|
# This won't (shouldn't) fail because we already verified in
|
|
# check_orig_table() table we can NibbleIterator::can_nibble().
|
|
my $nibble_iter = new NibbleIterator(
|
|
Cxn => $cxn,
|
|
tbl => $orig_tbl,
|
|
chunk_size => $orig_tbl->{chunk_size},
|
|
chunk_index => $o->get('chunk-index'),
|
|
dml => $dml,
|
|
select => $select,
|
|
callbacks => $callbacks,
|
|
OptionParser => $o,
|
|
Quoter => $q,
|
|
TableParser => $tp,
|
|
TableNibbler => new TableNibbler(TableParser => $tp, Quoter => $q),
|
|
comments => {
|
|
bite => "pt-online-schema-change $PID copy table",
|
|
nibble => "pt-online-schema-change $PID copy nibble",
|
|
},
|
|
);
|
|
|
|
# Init a new weighted avg rate calculator for the table.
|
|
$orig_tbl->{rate} = new WeightedAvgRate(target_t => $chunk_time);
|
|
|
|
# Make a Progress obj for this table. It may not be used;
|
|
# depends on how many rows, chunk size, how fast the server
|
|
# is, etc. But just in case, all tables have a Progress obj.
|
|
if ( $o->get('progress')
|
|
&& !$nibble_iter->one_nibble()
|
|
&& $nibble_iter->row_estimate() )
|
|
{
|
|
$orig_tbl->{progress} = new Progress(
|
|
jobsize => $nibble_iter->row_estimate(),
|
|
spec => $o->get('progress'),
|
|
name => "Copying $orig_tbl->{name}",
|
|
);
|
|
}
|
|
|
|
# Start copying rows. This may take awhile, but --progress is on
|
|
# by default so there will be progress updates to stderr.
|
|
eval {
|
|
1 while $nibble_iter->next();
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
die "Error copying rows from $orig_tbl->{name} to "
|
|
. "$new_tbl->{name}: $EVAL_ERROR\n";
|
|
}
|
|
$orig_tbl->{copied} = 1; # flag for cleanup tasks
|
|
|
|
|
|
# XXX Auto-choose the alter fk method BEFORE swapping/renaming tables
|
|
# else everything will break because if drop_swap is chosen, then we
|
|
# most NOT rename tables or drop the old table.
|
|
if ( $alter_fk_method eq 'auto' ) {
|
|
# If chunk time is set, then use the average rate of rows/s
|
|
# from copying the orig table to determine the max size of
|
|
# a child table that can be altered within one chunk time.
|
|
# The limit is a fudge factor. Chunk time won't be set if
|
|
# the user specified --chunk-size=N on the cmd line, in which
|
|
# case the max child table size is their specified chunk size
|
|
# times the fudge factor.
|
|
my $max_rows
|
|
= $o->get('dry-run') ? $o->get('chunk-size') * $limit
|
|
: $chunk_time ? $avg_rate * $chunk_time * $limit
|
|
: $o->get('chunk-size') * $limit;
|
|
PTDEBUG && _d('Max allowed child table size:', $max_rows);
|
|
|
|
$alter_fk_method = determine_alter_fk_method(
|
|
child_tables => $child_tables,
|
|
max_rows => $max_rows,
|
|
Cxn => $cxn,
|
|
OptionParser => $o,
|
|
);
|
|
|
|
if ( $alter_fk_method eq 'drop_swap' ) {
|
|
$o->set('swap-tables', 0);
|
|
$o->set('drop-old-table', 0);
|
|
}
|
|
}
|
|
|
|
# #####################################################################
|
|
# XXX
|
|
# Step 5: Rename tables: orig -> old, new -> orig
|
|
# Past this step, the original table has been altered. This shouldn't
|
|
# fail, but if it does, the failure could be serious depending on what
|
|
# state the tables are left in.
|
|
# XXX
|
|
# #####################################################################
|
|
my $old_tbl;
|
|
if ( $o->get('swap-tables') ) {
|
|
eval {
|
|
$old_tbl = swap_tables(
|
|
orig_tbl => $orig_tbl,
|
|
new_tbl => $new_tbl,
|
|
suffix => '_old',
|
|
Cxn => $cxn,
|
|
Quoter => $q,
|
|
OptionParser => $o,
|
|
);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
die "Error swapping the tables: $EVAL_ERROR\n"
|
|
. "Verify that the original table $orig_tbl->{name} has not "
|
|
. "been modified or renamed to the old table $old_tbl->{name}. "
|
|
. "Then drop the new table $new_tbl->{name} if it exists.\n";
|
|
}
|
|
}
|
|
$orig_tbl->{swapped} = 1; # flag for cleanup tasks
|
|
PTDEBUG && _d('Old table:', Dumper($old_tbl));
|
|
|
|
# #####################################################################
|
|
# Step 6: Update foreign key constraints if there are child tables.
|
|
# #####################################################################
|
|
if ( $child_tables ) {
|
|
eval {
|
|
if ( $alter_fk_method eq 'none' ) {
|
|
# This shouldn't happen, but in case it does we should know.
|
|
warn "The tool detected child tables but "
|
|
. "--alter-foreign-keys-method=none";
|
|
}
|
|
elsif ( $alter_fk_method eq 'rebuild_constraints' ) {
|
|
rebuild_constraints(
|
|
orig_tbl => $orig_tbl,
|
|
old_tbl => $old_tbl,
|
|
child_tables => $child_tables,
|
|
OptionParser => $o,
|
|
Quoter => $q,
|
|
Cxn => $cxn,
|
|
TableParser => $tp,
|
|
);
|
|
}
|
|
elsif ( $alter_fk_method eq 'drop_swap' ) {
|
|
drop_swap(
|
|
orig_tbl => $orig_tbl,
|
|
new_tbl => $new_tbl,
|
|
Cxn => $cxn,
|
|
OptionParser => $o,
|
|
);
|
|
}
|
|
elsif ( !$alter_fk_method
|
|
&& $o->has('alter-foreign-keys-method')
|
|
&& ($o->get('alter-foreign-keys-method') || '') eq 'auto' ) {
|
|
# If --alter-foreign-keys-method is 'auto' and we are on a dry run,
|
|
# $alter_fk_method is left as an empty string.
|
|
print "Not updating foreign key constraints because this is a dry run.\n";
|
|
}
|
|
else {
|
|
# This should "never" happen because we check this var earlier.
|
|
die "Invalid --alter-foreign-keys-method: $alter_fk_method\n";
|
|
}
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
# TODO: improve error message and handling.
|
|
die "Error updating foreign key constraints: $EVAL_ERROR\n";
|
|
}
|
|
}
|
|
|
|
# ########################################################################
|
|
# Step 7: Drop the old table.
|
|
# ########################################################################
|
|
if ( $o->get('drop-old-table') ) {
|
|
if ( $o->get('dry-run') ) {
|
|
print "Not dropping old table because this is a dry run.\n";
|
|
}
|
|
elsif ( !$old_tbl ) {
|
|
print "Not dropping old table because --no-swap-tables was specified.\n";
|
|
}
|
|
else {
|
|
print "Dropping old table...\n";
|
|
|
|
if ( $alter_fk_method eq 'none' ) {
|
|
# Child tables still reference the old table, but the user
|
|
# has chosen to break fks, so we need to disable fk checks
|
|
# in order to drop the old table.
|
|
my $sql = "SET foreign_key_checks=0";
|
|
PTDEBUG && _d($sql);
|
|
print $sql, "\n" if $o->get('print');
|
|
$cxn->dbh()->do($sql);
|
|
}
|
|
|
|
my $sql = "DROP TABLE IF EXISTS $old_tbl->{name}";
|
|
print $sql, "\n" if $o->get('print');
|
|
PTDEBUG && _d($sql);
|
|
eval {
|
|
$cxn->dbh()->do($sql);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
die "Error dropping the old table: $EVAL_ERROR\n";
|
|
}
|
|
print "Dropped old table $old_tbl->{name} OK.\n";
|
|
}
|
|
}
|
|
|
|
# ########################################################################
|
|
# Done.
|
|
# ########################################################################
|
|
$orig_tbl->{success} = 1; # flag for cleanup tasks
|
|
$cleanup = undef; # exec cleanup tasks
|
|
|
|
return $exit_status;
|
|
}
|
|
|
|
# ############################################################################
|
|
# Subroutines.
|
|
# ############################################################################
|
|
sub create_new_table{
|
|
my (%args) = @_;
|
|
my @required_args = qw(orig_tbl Cxn Quoter OptionParser TableParser);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($orig_tbl, $cxn, $q, $o, $tp) = @args{@required_args};
|
|
|
|
# Get the original table struct.
|
|
my $ddl = $tp->get_create_table(
|
|
$cxn->dbh(),
|
|
$orig_tbl->{db},
|
|
$orig_tbl->{tbl},
|
|
);
|
|
|
|
my $tries = $args{tries} || 10; # don't try forever
|
|
my $prefix = $args{prefix} || '_';
|
|
my $suffix = $args{suffix} || '_new';
|
|
my $table_name = $orig_tbl->{tbl} . $suffix;
|
|
|
|
print "Creating new table...\n";
|
|
my $tryno = 1;
|
|
my @old_tables;
|
|
while ( $tryno++ < $tries ) {
|
|
$table_name = $prefix . $table_name;
|
|
my $quoted = $q->quote($orig_tbl->{db}, $table_name);
|
|
|
|
# Generate SQL to create the new table. We do not use CREATE TABLE LIKE
|
|
# because it doesn't preserve foreign key constraints. Here we need to
|
|
# rename the FK constraints, too. This is because FK constraints are
|
|
# internally stored as <database>.<constraint> and there cannot be
|
|
# duplicates. If we don't rename the constraints, then InnoDB will throw
|
|
# error 121 (duplicate key violation) when we try to execute the CREATE
|
|
# TABLE. TODO: this code isn't perfect. If we rename a constraint from
|
|
# foo to _foo and there is already a constraint with that name in this
|
|
# or another table, we can still have a collision. But if there are
|
|
# multiple FKs on this table, it's hard to know which one is causing the
|
|
# trouble. Should we generate random/UUID FK names or something instead?
|
|
my $sql = $ddl;
|
|
$sql =~ s/\ACREATE TABLE .*?\($/CREATE TABLE $quoted (/m;
|
|
$sql =~ s/^ CONSTRAINT `/ CONSTRAINT `_/gm;
|
|
PTDEBUG && _d($sql);
|
|
eval {
|
|
$cxn->dbh()->do($sql);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
# Ignore this error because if multiple instances of the tool
|
|
# are running, or previous runs failed and weren't cleaned up,
|
|
# then there will be other similarly named tables with fewer
|
|
# leading prefix chars. Or, in rarer cases, the db just happens
|
|
# to have a similarly named table created by the user for other
|
|
# purposes.
|
|
if ( $EVAL_ERROR =~ m/table.+?already exists/i ) {
|
|
push @old_tables, $q->quote($orig_tbl->{db}, $table_name);
|
|
next;
|
|
}
|
|
|
|
# Some other error happened. Let the caller catch it.
|
|
die $EVAL_ERROR;
|
|
}
|
|
print $sql, "\n" if $o->get('print'); # the sql that work
|
|
print "Created new table $orig_tbl->{db}.$table_name OK.\n";
|
|
return { # success
|
|
db => $orig_tbl->{db},
|
|
tbl => $table_name,
|
|
name => $q->quote($orig_tbl->{db}, $table_name),
|
|
};
|
|
}
|
|
|
|
die "Failed to find a unique new table name after $tries attemps. "
|
|
. "The following tables exist which may be left over from previous "
|
|
. "failed runs of the tool:\n"
|
|
. join("\n", map { " $_" } @old_tables)
|
|
. "\nExamine these tables and drop some or all of them if they are "
|
|
. "no longer need, then re-run the tool.\n";
|
|
}
|
|
|
|
sub swap_tables {
|
|
my (%args) = @_;
|
|
my @required_args = qw(orig_tbl new_tbl Cxn Quoter OptionParser);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($orig_tbl, $new_tbl, $cxn, $q, $o) = @args{@required_args};
|
|
|
|
my $prefix = '_';
|
|
my $table_name = $orig_tbl->{tbl} . ($args{suffix} || '');
|
|
my $tries = 10; # don't try forever
|
|
|
|
# This sub only works for --execute. Since the options are
|
|
# mutually exclusive and we return in the if case, the elsif
|
|
# is just a paranoid check because swapping the tables is one
|
|
# of the most sensitive/dangerous operations.
|
|
if ( $o->get('dry-run') ) {
|
|
print "Not swapping tables because this is a dry run.\n";
|
|
|
|
# A return value really isn't needed, but this trick allows
|
|
# rebuild_constraints() to parse and show the sql statements
|
|
# it would used. Otherwise, this has no effect.
|
|
return $orig_tbl;
|
|
}
|
|
elsif ( $o->get('execute') ) {
|
|
print "Swapping tables...\n";
|
|
|
|
while ( $tries-- ) {
|
|
$table_name = $prefix . $table_name;
|
|
my $sql = "RENAME TABLE $orig_tbl->{name} "
|
|
. "TO " . $q->quote($orig_tbl->{db}, $table_name)
|
|
. ", $new_tbl->{name} TO $orig_tbl->{name}";
|
|
PTDEBUG && _d($sql);
|
|
eval {
|
|
$cxn->dbh()->do($sql);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
# Ignore this error because if multiple instances of the tool
|
|
# are running, or previous runs failed and weren't cleaned up,
|
|
# then there will be other similarly named tables with fewer
|
|
# leading prefix chars. Or, in rarer cases, the db just happens
|
|
# to have a similarly named table created by the user for other
|
|
# purposes.
|
|
next if $EVAL_ERROR =~ m/table.+?already exists/i;
|
|
|
|
# Some other error happened. Let caller catch it.
|
|
die $EVAL_ERROR;
|
|
}
|
|
print $sql, "\n" if $o->get('print');
|
|
print "Swapped original and new tables OK.\n";
|
|
return { # success
|
|
db => $orig_tbl->{db},
|
|
tbl => $table_name,
|
|
name => $q->quote($orig_tbl->{db}, $table_name),
|
|
};
|
|
}
|
|
|
|
# This shouldn't happen.
|
|
# Here and in the attempt to find a new table name we probably ought to
|
|
# use --retries (and maybe a Retry object?)
|
|
die "Failed to find a unique old table name after serveral attempts.\n";
|
|
}
|
|
}
|
|
|
|
sub check_orig_table {
|
|
my ( %args ) = @_;
|
|
my @required_args = qw(orig_tbl Cxn TableParser OptionParser Quoter);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($orig_tbl, $cxn, $tp, $o, $q) = @args{@required_args};
|
|
|
|
my $dbh = $cxn->dbh();
|
|
|
|
# The original table must exist, of course.
|
|
if (!$tp->check_table(dbh=>$dbh,db=>$orig_tbl->{db},tbl=>$orig_tbl->{tbl})) {
|
|
die "The original table $orig_tbl->{name} does not exist.\n";
|
|
}
|
|
|
|
# There cannot be any triggers on the original table.
|
|
my $sql = 'SHOW TRIGGERS FROM ' . $q->quote($orig_tbl->{db})
|
|
. ' LIKE ' . $q->literal_like($orig_tbl->{tbl});
|
|
PTDEBUG && _d($sql);
|
|
my $triggers = $dbh->selectall_arrayref($sql);
|
|
if ( $triggers && @$triggers ) {
|
|
die "The table $orig_tbl->{name} has triggers. This tool "
|
|
. "needs to create its own triggers, so the table cannot "
|
|
. "already have triggers.\n";
|
|
}
|
|
|
|
# Get the table struct. NibbleIterator needs this, and so do we.
|
|
my $ddl = $tp->get_create_table(
|
|
$cxn->dbh(),
|
|
$orig_tbl->{db},
|
|
$orig_tbl->{tbl},
|
|
);
|
|
$orig_tbl->{tbl_struct} = $tp->parse($ddl);
|
|
|
|
# Must be able to nibble the original table (to copy rows to the new table).
|
|
eval {
|
|
NibbleIterator::can_nibble(
|
|
Cxn => $cxn,
|
|
tbl => $orig_tbl,
|
|
chunk_size => $o->get('chunk-size'),
|
|
chunk_indx => $o->get('chunk-index'),
|
|
OptionParser => $o,
|
|
TableParser => $tp,
|
|
);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
die "Cannot chunk the original table $orig_tbl->{name}: $EVAL_ERROR\n";
|
|
}
|
|
|
|
# Find a pk or unique index to use for the delete trigger. can_nibble()
|
|
# above returns an index, but NibbleIterator will use non-unique indexes,
|
|
# so we have to do this again here.
|
|
my $indexes = $orig_tbl->{tbl_struct}->{keys}; # brevity
|
|
foreach my $index ( $tp->sort_indexes($orig_tbl->{tbl_struct}) ) {
|
|
if ( $index eq 'PRIMARY' || $indexes->{$index}->{is_unique} ) {
|
|
PTDEBUG && _d('Delete trigger index:', Dumper($index));
|
|
$orig_tbl->{del_index} = $index;
|
|
last;
|
|
}
|
|
}
|
|
if ( !$orig_tbl->{del_index} ) {
|
|
die "The original table $orig_tbl->{name} does not have a PRIMARY KEY "
|
|
. "or a unique index which is required for the DELETE trigger.\n";
|
|
}
|
|
|
|
return; # success
|
|
}
|
|
|
|
sub find_child_tables {
|
|
my ( %args ) = @_;
|
|
my @required_args = qw(tbl Cxn Quoter);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($tbl, $cxn, $q) = @args{@required_args};
|
|
|
|
PTDEBUG && _d('Finding child tables');
|
|
|
|
my $sql = "SELECT table_schema, table_name "
|
|
. "FROM information_schema.key_column_usage "
|
|
. "WHERE constraint_schema='$tbl->{db}' "
|
|
. "AND referenced_table_name='$tbl->{tbl}'";
|
|
PTDEBUG && _d($sql);
|
|
my $rows = $cxn->dbh()->selectall_arrayref($sql);
|
|
if ( !$rows || !@$rows ) {
|
|
PTDEBUG && _d('No child tables found');
|
|
return;
|
|
}
|
|
|
|
my @child_tables;
|
|
foreach my $row ( @$rows ) {
|
|
my $tbl = {
|
|
db => $row->[0],
|
|
tbl => $row->[1],
|
|
name => $q->quote(@$row),
|
|
};
|
|
|
|
# Get row estimates for each child table so we can give the user
|
|
# some input on choosing an --alter-foreign-keys-method if they
|
|
# don't use "auto".
|
|
my ($n_rows) = NibbleIterator::get_row_estimate(
|
|
Cxn => $cxn,
|
|
tbl => $tbl,
|
|
);
|
|
$tbl->{row_est} = $n_rows;
|
|
|
|
push @child_tables, $tbl;
|
|
}
|
|
|
|
PTDEBUG && _d('Child tables:', Dumper(\@child_tables));
|
|
return \@child_tables;
|
|
}
|
|
|
|
sub determine_alter_fk_method {
|
|
my ( %args ) = @_;
|
|
my @required_args = qw(child_tables max_rows Cxn OptionParser);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($child_tables, $max_rows, $cxn, $o) = @args{@required_args};
|
|
|
|
if ( $o->get('dry-run') ) {
|
|
print "Not determining the method to update foreign keys "
|
|
. "because this is a dry run.\n";
|
|
return ''; # $alter_fk_method can't be undef
|
|
}
|
|
|
|
# The rebuild_constraints method is the default becuase it's safer
|
|
# and doesn't cause the orig table to go missing for a moment.
|
|
my $method = 'rebuild_constraints';
|
|
|
|
print "Max rows for the rebuild_constraints method: $max_rows\n",
|
|
"Determining the method to update foreign keys...\n";
|
|
foreach my $child_tbl ( @$child_tables ) {
|
|
print " $child_tbl->{name}: ";
|
|
my ($n_rows) = NibbleIterator::get_row_estimate(
|
|
Cxn => $cxn,
|
|
tbl => $child_tbl,
|
|
);
|
|
if ( $n_rows > $max_rows ) {
|
|
print "too many rows: $n_rows; must use drop_swap\n";
|
|
$method = 'drop_swap';
|
|
last;
|
|
}
|
|
else {
|
|
print "$n_rows rows; can use rebuild_constraints\n";
|
|
}
|
|
}
|
|
|
|
return $method || ''; # $alter_fk_method can't be undef
|
|
}
|
|
|
|
sub rebuild_constraints {
|
|
my ( %args ) = @_;
|
|
my @required_args = qw(orig_tbl old_tbl child_tables
|
|
Cxn Quoter OptionParser TableParser);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($orig_tbl, $old_tbl, $child_tables, $cxn, $q, $o, $tp)
|
|
= @args{@required_args};
|
|
|
|
# MySQL has a "feature" where if the parent tbl is in the same db,
|
|
# then the child tbl ref is simply `parent_tbl`, but if the parent tbl
|
|
# is in another db, then the child tbl ref is `other_db`.`parent_tbl`.
|
|
# When we recreate the ref below, we use the db-qualified form, and
|
|
# MySQL will automatically trim the db if the tables are in the same db.
|
|
my $quoted_old_table = $q->quote($old_tbl->{tbl});
|
|
my $constraint = qr/
|
|
^\s+
|
|
(
|
|
CONSTRAINT.+?
|
|
REFERENCES\s(?:$quoted_old_table|$old_tbl->{name})
|
|
.+
|
|
)$
|
|
/xm;
|
|
PTDEBUG && _d('Rebuilding fk constraint matching', $constraint);
|
|
|
|
if ( $o->get('dry-run') ) {
|
|
print "Not rebuilding foreign key constraints because this is a dry run.\n";
|
|
}
|
|
else {
|
|
print "Rebuilding foreign key constraints...\n";
|
|
}
|
|
|
|
CHILD_TABLE:
|
|
foreach my $child_tbl ( @$child_tables ) {
|
|
my $table_def = $tp->get_create_table(
|
|
$cxn->dbh(),
|
|
$child_tbl->{db},
|
|
$child_tbl->{tbl},
|
|
);
|
|
my @constraints = $table_def =~ m/$constraint/g;
|
|
if ( !@constraints ) {
|
|
warn "$child_tbl->{name} has no foreign key "
|
|
. "constraints referencing $old_tbl->{name}.\n";
|
|
next CHILD_TABLE;
|
|
}
|
|
|
|
my @rebuilt_constraints;
|
|
foreach my $constraint ( @constraints ) {
|
|
PTDEBUG && _d('Rebuilding fk constraint:', $constraint);
|
|
|
|
# Remove trailing commas in case there are multiple constraints on the
|
|
# table.
|
|
$constraint =~ s/,$//;
|
|
|
|
# Find the constraint name. It will be quoted already.
|
|
my ($fk) = $constraint =~ m/CONSTRAINT\s+`([^`]+)`/;
|
|
|
|
# Drop the reference to the old table/renamed orig table, and add a new
|
|
# reference to the new table. InnoDB will throw an error if the new
|
|
# constraint has the same name as the old one, so we must rename it.
|
|
# Example: after renaming sakila.actor to sakila.actor_old (for
|
|
# example), the foreign key on film_actor looks like this:
|
|
# CONSTRAINT `fk_film_actor_actor` FOREIGN KEY (`actor_id`) REFERENCES
|
|
# `actor_old` (`actor_id`) ON UPDATE CASCADE
|
|
# We need it to look like this instead:
|
|
# CONSTRAINT `_fk_film_actor_actor` FOREIGN KEY (`actor_id`) REFERENCES
|
|
# `actor` (`actor_id`) ON UPDATE CASCADE
|
|
# Reference the correct table name...
|
|
$constraint =~ s/REFERENCES[^\(]+/REFERENCES $orig_tbl->{name} /;
|
|
# And rename the constraint to avoid conflict
|
|
$constraint =~ s/CONSTRAINT `$fk`/CONSTRAINT `_$fk`/;
|
|
|
|
my $sql = "DROP FOREIGN KEY `$fk`, "
|
|
. "ADD $constraint";
|
|
push @rebuilt_constraints, $sql;
|
|
}
|
|
|
|
my $sql = "ALTER TABLE $child_tbl->{name} "
|
|
. join(', ', @rebuilt_constraints);
|
|
print $sql, "\n" if $o->get('print');
|
|
if ( $o->get('execute') ) {
|
|
PTDEBUG && _d($sql);
|
|
$cxn->dbh()->do($sql);
|
|
}
|
|
}
|
|
|
|
if ( $o->get('execute') ) {
|
|
print "Rebuilt foreign key constraints OK.\n";
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub drop_swap {
|
|
my ( %args ) = @_;
|
|
my @required_args = qw(orig_tbl new_tbl Cxn OptionParser);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($orig_tbl, $new_tbl, $cxn, $o) = @args{@required_args};
|
|
|
|
if ( $o->get('dry-run') ) {
|
|
print "Not drop-swapping tables because this is a dry run.\n";
|
|
}
|
|
else {
|
|
print "Drop-swapping tables...\n";
|
|
}
|
|
|
|
my @sqls = (
|
|
"SET foreign_key_checks=0",
|
|
"DROP TABLE IF EXISTS $orig_tbl->{name}",
|
|
"RENAME TABLE $new_tbl->{name} TO $orig_tbl->{name}",
|
|
);
|
|
|
|
foreach my $sql ( @sqls ) {
|
|
PTDEBUG && _d($sql);
|
|
print $sql, "\n" if $o->get('print');
|
|
$cxn->dbh()->do($sql) if $o->get('execute');
|
|
}
|
|
|
|
if ( $o->get('execute') ) {
|
|
print "Dropped and swapped tables OK.\n";
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub create_triggers {
|
|
my ( %args ) = @_;
|
|
my @required_args = qw(orig_tbl new_tbl columns Cxn Quoter OptionParser);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($orig_tbl, $new_tbl, $cols, $cxn, $q, $o) = @args{@required_args};
|
|
|
|
# This sub works for --dry-run and --execute. With --dry-run it's
|
|
# only interesting if --print is specified, too; then the user can
|
|
# see the create triggers statements for --execute.
|
|
if ( $o->get('dry-run') ) {
|
|
print "Not creating triggers because this is a dry run.\n";
|
|
}
|
|
else {
|
|
print "Creating triggers...\n";
|
|
}
|
|
|
|
# Create a unique trigger name prefix based on the orig table name
|
|
# so multiple instances of the tool can run on different tables.
|
|
my $prefix = 'pt_osc_' . $orig_tbl->{db} . '_' . $orig_tbl->{tbl};
|
|
$prefix =~ s/\W/_/g;
|
|
|
|
# To be safe, the delete trigger must specify all the columns of the
|
|
# primary key/unique index. We use null-safe equals, because unique
|
|
# unique indexes can be nullable.
|
|
my $tbl_struct = $orig_tbl->{tbl_struct};
|
|
my $del_index = $orig_tbl->{del_index};
|
|
my $del_index_cols = join(" AND ",
|
|
map {
|
|
my $col = $q->quote($_);
|
|
"$new_tbl->{name}.$col <=> OLD.$col"
|
|
} @{$tbl_struct->{keys}->{$del_index}->{cols}} );
|
|
my $delete_trigger
|
|
= "CREATE TRIGGER `${prefix}_del` AFTER DELETE ON $orig_tbl->{name} "
|
|
. "FOR EACH ROW "
|
|
. "DELETE IGNORE FROM $new_tbl->{name} "
|
|
. "WHERE $del_index_cols";
|
|
|
|
# The insert and update triggers should only use values for columns
|
|
# that exist in both tables.
|
|
my $qcols = join(', ', map { $q->quote($_) } @$cols);
|
|
my $new_vals = join(', ', map { "NEW.".$q->quote($_) } @$cols);
|
|
my $insert_trigger
|
|
= "CREATE TRIGGER `${prefix}_ins` AFTER INSERT ON $orig_tbl->{name} "
|
|
. "FOR EACH ROW "
|
|
. "REPLACE INTO $new_tbl->{name} ($qcols) VALUES ($new_vals)";
|
|
my $update_trigger
|
|
= "CREATE TRIGGER `${prefix}_upd` AFTER UPDATE ON $orig_tbl->{name} "
|
|
. "FOR EACH ROW "
|
|
. "REPLACE INTO $new_tbl->{name} ($qcols) VALUES ($new_vals)";
|
|
|
|
my @triggers = (
|
|
['del', $delete_trigger],
|
|
['upd', $update_trigger],
|
|
['ins', $insert_trigger],
|
|
);
|
|
|
|
@drop_trigger_sqls = ();
|
|
foreach my $trg ( @triggers ) {
|
|
my ($name, $sql) = @$trg;
|
|
print $sql, "\n" if $o->get('print');
|
|
if ( $o->get('execute') ) {
|
|
# Let caller catch errors.
|
|
PTDEBUG && _d($sql);
|
|
$cxn->dbh()->do($sql);
|
|
}
|
|
# Only save the trigger once it has been created
|
|
# (or faked to be created) so if the 2nd trigger
|
|
# fails to create, we know to only drop the 1st.
|
|
push @drop_trigger_sqls,
|
|
"DROP TRIGGER IF EXISTS "
|
|
. $q->quote($orig_tbl->{db}, "${prefix}_$name") . ";";
|
|
}
|
|
|
|
if ( $o->get('execute') ) {
|
|
print "Created triggers OK.\n";
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub drop_triggers {
|
|
my ( %args ) = @_;
|
|
my @required_args = qw(tbl Cxn Quoter OptionParser);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($tbl, $cxn, $q, $o) = @args{@required_args};
|
|
|
|
# This sub works for --dry-run and --execute, although --dry-run is
|
|
# only interesting with --print so the user can see the drop trigger
|
|
# statements for --execute.
|
|
if ( $o->get('dry-run') ) {
|
|
print "Not dropping triggers because this is a dry run.\n";
|
|
}
|
|
else {
|
|
print "Dropping triggers...\n";
|
|
}
|
|
|
|
my @not_dropped;
|
|
foreach my $sql ( @drop_trigger_sqls ) {
|
|
print $sql, "\n" if $o->get('print');
|
|
if ( $o->get('execute') ) {
|
|
PTDEBUG && _d($sql);
|
|
eval {
|
|
$cxn->dbh()->do($sql);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
warn "Error dropping trigger: $EVAL_ERROR\n";
|
|
push @not_dropped, $sql;
|
|
$exit_status = 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
if ( $o->get('execute') ) {
|
|
if ( !@not_dropped ) {
|
|
print "Dropped triggers OK.\n";
|
|
}
|
|
else {
|
|
warn "To try dropping the triggers again, execute:\n"
|
|
. join("\n", @not_dropped) . "\n";
|
|
}
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub exec_nibble {
|
|
my (%args) = @_;
|
|
my @required_args = qw(Cxn tbl NibbleIterator Retry Quoter OptionParser);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($cxn, $tbl, $nibble_iter, $retry, $q, $o)= @args{@required_args};
|
|
|
|
my $dbh = $cxn->dbh();
|
|
my $sth = $nibble_iter->statements();
|
|
my $boundary = $nibble_iter->boundaries();
|
|
my $lb_quoted = $q->serialize_list(@{$boundary->{lower}});
|
|
my $ub_quoted = $q->serialize_list(@{$boundary->{upper}});
|
|
my $chunk = $nibble_iter->nibble_number();
|
|
my $chunk_index = $nibble_iter->nibble_index();
|
|
|
|
# Completely ignore these error codes.
|
|
my %ignore_code = (
|
|
# Error: 1592 SQLSTATE: HY000 (ER_BINLOG_UNSAFE_STATEMENT)
|
|
# Message: Statement may not be safe to log in statement format.
|
|
# Ignore this warning because we have purposely set statement-based
|
|
# replication.
|
|
1592 => 1,
|
|
);
|
|
|
|
# Warn once per-table for these error codes if the error message
|
|
# matches the pattern.
|
|
my %warn_code = (
|
|
# Error: 1265 SQLSTATE: 01000 (WARN_DATA_TRUNCATED)
|
|
# Message: Data truncated for column '%s' at row %ld
|
|
1265 => {
|
|
# any pattern
|
|
# use MySQL's message for this warning
|
|
},
|
|
);
|
|
|
|
return $retry->retry(
|
|
tries => $o->get('retries'),
|
|
wait => sub { sleep 0.25; return; },
|
|
try => sub {
|
|
# ###################################################################
|
|
# Start timing the query.
|
|
# ###################################################################
|
|
my $t_start = time;
|
|
|
|
# Execute the INSERT..SELECT query.
|
|
PTDEBUG && _d($sth->{nibble}->{Statement},
|
|
'lower boundary:', @{$boundary->{lower}},
|
|
'upper boundary:', @{$boundary->{upper}});
|
|
$sth->{nibble}->execute(
|
|
# WHERE
|
|
@{$boundary->{lower}}, # upper boundary values
|
|
@{$boundary->{upper}}, # lower boundary values
|
|
);
|
|
|
|
my $t_end = time;
|
|
# ###################################################################
|
|
# End timing the query.
|
|
# ###################################################################
|
|
|
|
# How many rows were inserted this time. Used for auto chunk sizing.
|
|
$tbl->{row_cnt} = $sth->{nibble}->rows();
|
|
|
|
# Check if query caused any warnings.
|
|
my $sql_warn = 'SHOW WARNINGS';
|
|
PTDEBUG && _d($sql_warn);
|
|
my $warnings = $dbh->selectall_arrayref($sql_warn, { Slice => {} } );
|
|
foreach my $warning ( @$warnings ) {
|
|
my $code = ($warning->{code} || 0);
|
|
my $message = $warning->{message};
|
|
if ( $ignore_code{$code} ) {
|
|
PTDEBUG && _d('Ignoring warning:', $code, $message);
|
|
next;
|
|
}
|
|
elsif ( $warn_code{$code}
|
|
&& (!$warn_code{$code}->{pattern}
|
|
|| $message =~ m/$warn_code{$code}->{pattern}/) )
|
|
{
|
|
if ( !$tbl->{"warned_code_$code"} ) { # warn once per table
|
|
warn "Copying rows caused a MySQL error $code: "
|
|
. ($warn_code{$code}->{message}
|
|
? $warn_code{$code}->{message}
|
|
: $message)
|
|
. "\nThis MySQL error is being ignored and further "
|
|
. "occurrences of it will not be reported.\n";
|
|
$tbl->{"warned_code_$code"} = 1;
|
|
}
|
|
}
|
|
else {
|
|
# This die will propagate to fail which will return 0
|
|
# and propagate it to final_fail which will die with
|
|
# this error message.
|
|
die "Copying rows caused a MySQL error $code:\n"
|
|
. " Level: " . ($warning->{level} || '') . "\n"
|
|
. " Code: " . ($warning->{code} || '') . "\n"
|
|
. " Message: " . ($warning->{message} || '') . "\n"
|
|
. " Query: " . $sth->{nibble}->{Statement} . "\n";
|
|
}
|
|
}
|
|
|
|
# Success: no warnings, no errors. Return nibble time.
|
|
return $t_end - $t_start;
|
|
},
|
|
fail => sub {
|
|
my (%args) = @_;
|
|
my $error = $args{error};
|
|
|
|
# The query failed/caused an error. If the error is one of these,
|
|
# then we can possibly retry.
|
|
if ( $error =~ m/Lock wait timeout exceeded/
|
|
|| $error =~ m/Deadlock found/
|
|
|| $error =~ m/Query execution was interrupted/
|
|
) {
|
|
# These errors/warnings can be retried, so don't print
|
|
# a warning yet; do that in final_fail.
|
|
return 1; # try again
|
|
}
|
|
elsif ( $error =~ m/MySQL server has gone away/
|
|
|| $error =~ m/Lost connection to MySQL server/
|
|
) {
|
|
# The 2nd pattern means that MySQL itself died or was stopped.
|
|
# The 3rd pattern means that our cxn was killed (KILL <id>).
|
|
$dbh = $cxn->connect(); # connect or die trying
|
|
return 1; # reconnected, try again
|
|
}
|
|
|
|
# At this point, either the error/warning cannot be retried,
|
|
# or we failed to reconnect. Don't retry; call final_fail.
|
|
return 0;
|
|
},
|
|
final_fail => sub {
|
|
my (%args) = @_;
|
|
# This die should be caught by the caller. Copying rows and
|
|
# the tool will stop, which is probably good because by this
|
|
# point the error or warning indicates that something is wrong.
|
|
die $args{error};
|
|
}
|
|
);
|
|
}
|
|
|
|
# Sub: explain_statement
|
|
# EXPLAIN a statement.
|
|
#
|
|
# Required Arguments:
|
|
# * tbl - Standard tbl hashref
|
|
# * sth - Sth with EXLAIN <statement>
|
|
# * vals - Values for sth, if any
|
|
#
|
|
# Returns:
|
|
# Hashref with EXPLAIN plan
|
|
sub explain_statement {
|
|
my ( %args ) = @_;
|
|
my @required_args = qw(tbl sth vals);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless defined $args{$arg};
|
|
}
|
|
my ($tbl, $sth, $vals) = @args{@required_args};
|
|
|
|
my $expl;
|
|
eval {
|
|
PTDEBUG && _d($sth->{Statement}, 'params:', @$vals);
|
|
$sth->execute(@$vals);
|
|
$expl = $sth->fetchrow_hashref();
|
|
$sth->finish();
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
# This shouldn't happen.
|
|
die "Error executing " . $sth->{Statement} . ": $EVAL_ERROR\n";
|
|
}
|
|
PTDEBUG && _d('EXPLAIN plan:', Dumper($expl));
|
|
return $expl;
|
|
}
|
|
|
|
# Catches signals so we can exit gracefully.
|
|
sub sig_int {
|
|
my ( $signal ) = @_;
|
|
$oktorun = 0; # flag for cleanup tasks
|
|
print STDERR "# Exiting on SIG$signal.\n";
|
|
exit 1;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
# ############################################################################
|
|
# Run the program.
|
|
# ############################################################################
|
|
if ( !caller ) { exit main(@ARGV); }
|
|
|
|
1; # Because this is a module as well as a script.
|
|
|
|
# ############################################################################
|
|
# Documentation
|
|
# ############################################################################
|
|
=pod
|
|
|
|
=head1 NAME
|
|
|
|
pt-online-schema-change - ALTER tables without locking them.
|
|
|
|
=head1 SYNOPSIS
|
|
|
|
Usage: pt-online-schema-change [OPTIONS] DSN
|
|
|
|
pt-online-schema-change alters a table's structure without blocking reads or
|
|
writes. Specify the database and table in the DSN. Do not use this tool before
|
|
reading its documentation and checking your backups carefully.
|
|
|
|
Add a column to sakila.actor:
|
|
|
|
pt-online-schema-change --alter "ADD COLUMN c1 INT" D=sakila,t=actor
|
|
|
|
Change sakila.actor to InnoDB, effectively performing OPTIMIZE TABLE in a
|
|
non-blocking fashion because it is already an InnoDB table:
|
|
|
|
pt-online-schema-change --alter "ENGINE=InnoDB" D=sakila,t=actor
|
|
|
|
=head1 RISKS
|
|
|
|
The following section is included to inform users about the potential risks,
|
|
whether known or unknown, of using this tool. The two main categories of risks
|
|
are those created by the nature of the tool (e.g. read-only tools vs. read-write
|
|
tools) and those created by bugs.
|
|
|
|
pt-online-schema-change modifies data and structures. You should be careful with
|
|
it, and test it before using it in production. You should also ensure that you
|
|
have recoverable backups before using this tool.
|
|
|
|
At the time of this release, we know of no bugs that could cause harm to users.
|
|
|
|
The authoritative source for updated information is always the online issue
|
|
tracking system. Issues that affect this tool will be marked as such. You can
|
|
see a list of such issues at the following URL:
|
|
L<http://www.percona.com/bugs/pt-online-schema-change>.
|
|
|
|
See also L<"BUGS"> for more information on filing bugs and getting help.
|
|
|
|
=head1 DESCRIPTION
|
|
|
|
pt-online-schema-change emulates the way that MySQL alters tables internally,
|
|
but it works on a copy of the table you wish to alter. This means that the
|
|
original table is not locked, and clients may continue to read and change data
|
|
in it.
|
|
|
|
pt-online-schema-change works by creating an empty copy of the table to alter,
|
|
modifying it as desired, and then copying rows from the original table into the
|
|
new table. When the copy is complete, it moves away the original table and
|
|
replaces it with the new one. By default, it also drops the original table.
|
|
|
|
The data copy process is performed in small chunks of data, which are varied to
|
|
attempt to make them execute in a specific amount of time (see
|
|
L<"--chunk-time">). This process is very similar to how other tools, such as
|
|
pt-table-checksum, work. Any modifications to data in the original tables
|
|
during the copy will be reflected in the new table, because the tool creates
|
|
triggers on the original table to update the corresponding rows in the new
|
|
table. The use of triggers means that the tool will not work if any triggers
|
|
are already defined on the table.
|
|
|
|
When the tool finishes copying data into the new table, it uses an atomic
|
|
C<RENAME TABLE> operation to simultaneously rename the original and new tables.
|
|
After this is complete, the tool drops the original table.
|
|
|
|
Foreign keys complicate the tool's operation and introduce additional risk. The
|
|
technique of atomically renaming the original and new tables does not work when
|
|
foreign keys refer to the table. The tool must update foreign keys to refer to
|
|
the new table after the schema change is complete. The tool supports two methods
|
|
for accomplishing this. You can read more about this in the documentation for
|
|
L<"--alter-foreign-keys-method">.
|
|
|
|
Foreign keys also cause some side effects. The final table will have the same
|
|
foreign keys and indexes as the original table (unless you specify differently
|
|
in your ALTER statement), but the names of the objects may be changed slightly
|
|
to avoid object name collisions in MySQL and InnoDB.
|
|
|
|
For safety, the tool does not modify the table unless you specify the
|
|
L<"--execute"> option, which is not enabled by default. The tool supports a
|
|
variety of other measures to prevent unwanted load or other problems, including
|
|
automatically detecting replicas, connecting to them, and using the following
|
|
safety checks:
|
|
|
|
=over
|
|
|
|
=item *
|
|
|
|
The tool refuses to operate if it detects replication filters. See
|
|
L<"--[no]check-replication-filters"> for details.
|
|
|
|
=item *
|
|
|
|
The tool pauses the data copy operation if it observes any replicas that are
|
|
delayed in replication. See L<"--max-lag"> for details.
|
|
|
|
=item *
|
|
|
|
The tool pauses or aborts its operation if it detects too much load on the
|
|
server. See L<"--max-load"> and L<"--critical-load"> for details.
|
|
|
|
=item *
|
|
|
|
The tool sets its lock wait timeout to 1 second so that it is more likely to be
|
|
the victim of any lock contention, and less likely to disrupt other
|
|
transactions. See L<"--lock-wait-timeout"> for details.
|
|
|
|
=item *
|
|
|
|
The tool refuses to alter the table if foreign key constraints reference it,
|
|
unless you specify L<"--alter-foreign-keys-method">.
|
|
|
|
=back
|
|
|
|
=head1 OUTPUT
|
|
|
|
The tool prints information about its activities to STDOUT so that you can see
|
|
what it is doing. During the data copy phase, it prints progress reports to
|
|
STDERR. You can get additional information with the L<"--print"> option.
|
|
|
|
=head1 OPTIONS
|
|
|
|
L<"--dry-run"> and L<"--execute"> are mutually exclusive.
|
|
|
|
This tool accepts additional command-line arguments. Refer to the
|
|
L<"SYNOPSIS"> and usage information for details.
|
|
|
|
=over
|
|
|
|
=item --alter
|
|
|
|
type: string
|
|
|
|
The schema modification, without the ALTER TABLE keywords. You can perform
|
|
multiple modifications to the table by specifying them with commas. Please refer
|
|
to the MySQL manual for the syntax of ALTER TABLE.
|
|
|
|
The following limitations apply which, if attempted, will cause the tool
|
|
to fail in unpredictable ways:
|
|
|
|
=over
|
|
|
|
=item *
|
|
|
|
The C<RENAME> clause cannot be used to rename the table.
|
|
|
|
=item *
|
|
|
|
Columns cannot be renamed by dropping and re-adding with the new name.
|
|
The tool will not copy the original column's data to the new column.
|
|
|
|
=back
|
|
|
|
=item --alter-foreign-keys-method
|
|
|
|
type: string
|
|
|
|
How to modify foreign keys so they reference the new table. Foreign keys that
|
|
reference the table to be altered must be treated specially to ensure that they
|
|
continue to reference the correct table. When the tool renames the original
|
|
table to let the new one take its place, the foreign keys "follow" the renamed
|
|
table, and must be changed to reference the new table instead.
|
|
|
|
The tool supports two techniques to achieve this. It automatically finds "child
|
|
tables" that reference the table to be altered.
|
|
|
|
=over
|
|
|
|
=item auto
|
|
|
|
Automatically determine which method is best. The tool uses
|
|
C<rebuild_constraints> if possible (see the description of that method for
|
|
details), and if not, then it uses C<drop_swap>.
|
|
|
|
=item rebuild_constraints
|
|
|
|
This method uses C<ALTER TABLE> to drop and re-add foreign key constraints that
|
|
reference the new table. This is the preferred technique, unless one or more of
|
|
the "child" tables is so large that the C<ALTER> would take too long. The tool
|
|
determines that by comparing the number of rows in the child table to the rate
|
|
at which the tool is able to copy rows from the old table to the new table. If
|
|
the tool estimates that the child table can be altered in less time than the
|
|
L<"--chunk-time">, then it will use this technique. For purposes of estimating
|
|
the time required to alter the child table, the tool multiplies the row-copying
|
|
rate by L<"--chunk-size-limit">, because MySQL's C<ALTER TABLE> is typically
|
|
much faster than the external process of copying rows.
|
|
|
|
Due to a limitation in MySQL, foreign keys will not have the same names after
|
|
the ALTER that they did prior to it. The tool has to rename the foreign key
|
|
when it redefines it, which adds a leading underscore to the name. In some
|
|
cases, MySQL also automatically renames indexes required for the foreign key.
|
|
|
|
=item drop_swap
|
|
|
|
Disable foreign key checks (FOREIGN_KEY_CHECKS=0), then drop the original table
|
|
before renaming the new table into its place. This is different from the normal
|
|
method of swapping the old and new table, which uses an atomic C<RENAME> that is
|
|
undetectable to client applications.
|
|
|
|
This method is faster and does not block, but it is riskier for two reasons.
|
|
First, for a short time between dropping the original table and renaming the
|
|
temporary table, the table to be altered simply does not exist, and queries
|
|
against it will result in an error. Secondly, if there is an error and the new
|
|
table cannot be renamed into the place of the old one, then it is too late to
|
|
abort, because the old table is gone permanently.
|
|
|
|
This method forces C<--no-swap-tables> and C<--no-drop-old-table>.
|
|
|
|
=item none
|
|
|
|
This method is like C<drop_swap> without the "swap". Any foreign keys that
|
|
referenced the original table will now reference a nonexistent table. This will
|
|
typically cause foreign key violations that are visible in C<SHOW ENGINE INNODB
|
|
STATUS>, similar to the following:
|
|
|
|
Trying to add to index `idx_fk_staff_id` tuple:
|
|
DATA TUPLE: 2 fields;
|
|
0: len 1; hex 05; asc ;;
|
|
1: len 4; hex 80000001; asc ;;
|
|
But the parent table `sakila`.`staff_old`
|
|
or its .ibd file does not currently exist!
|
|
|
|
This is because the original table (in this case, sakila.staff) was renamed to
|
|
sakila.staff_old and then dropped. This method of handling foreign key
|
|
constraints is provided so that the database administrator can disable the
|
|
tool's built-in functionality if desired.
|
|
|
|
=back
|
|
|
|
=item --ask-pass
|
|
|
|
Prompt for a password when connecting to MySQL.
|
|
|
|
=item --charset
|
|
|
|
short form: -A; type: string
|
|
|
|
Default character set. If the value is utf8, sets Perl's binmode on
|
|
STDOUT to utf8, passes the mysql_enable_utf8 option to DBD::mysql, and runs SET
|
|
NAMES UTF8 after connecting to MySQL. Any other value sets binmode on STDOUT
|
|
without the utf8 layer, and runs SET NAMES after connecting to MySQL.
|
|
|
|
=item --check-interval
|
|
|
|
type: time; default: 1
|
|
|
|
Sleep time between checks for L<"--max-lag">.
|
|
|
|
=item --[no]check-replication-filters
|
|
|
|
default: yes
|
|
|
|
Abort if any replication filter is set on any server. The tool looks for
|
|
server options that filter replication, such as binlog_ignore_db and
|
|
replicate_do_db. If it finds any such filters, it aborts with an error.
|
|
|
|
If the replicas are configured with any filtering options, you should be careful
|
|
not to modify any databases or tables that exist on the master and not the
|
|
replicas, because it could cause replication to fail. For more information on
|
|
replication rules, see L<http://dev.mysql.com/doc/en/replication-rules.html>.
|
|
|
|
=item --check-slave-lag
|
|
|
|
type: string
|
|
|
|
Pause the data copy until this replica's lag is less than L<"--max-lag">. The
|
|
value is a DSN that inherits properties from the the connection options
|
|
(L<"--port">, L<"--user">, etc.). This option overrides the normal behavior of
|
|
finding and continually monitoring replication lag on ALL connected replicas.
|
|
If you don't want to monitor ALL replicas, but you want more than just one
|
|
replica to be monitored, then use the DSN option to the L<"--recursion-method">
|
|
option instead of this option.
|
|
|
|
=item --chunk-index
|
|
|
|
type: string
|
|
|
|
Prefer this index for chunking tables. By default, the tool chooses the most
|
|
appropriate index for chunking. This option lets you specify the index that you
|
|
prefer. If the index doesn't exist, then the tool will fall back to its default
|
|
behavior of choosing an index. The tool adds the index to the SQL statements in
|
|
a C<FORCE INDEX> clause. Be careful when using this option; a poor choice of
|
|
index could cause bad performance.
|
|
|
|
=item --chunk-size
|
|
|
|
type: size; default: 1000
|
|
|
|
Number of rows to select for each chunk copied. Allowable suffixes are
|
|
k, M, G.
|
|
|
|
This option can override the default behavior, which is to adjust chunk size
|
|
dynamically to try to make chunks run in exactly L<"--chunk-time"> seconds.
|
|
When this option isn't set explicitly, its default value is used as a starting
|
|
point, but after that, the tool ignores this option's value. If you set this
|
|
option explicitly, however, then it disables the dynamic adjustment behavior and
|
|
tries to make all chunks exactly the specified number of rows.
|
|
|
|
There is a subtlety: if the chunk index is not unique, then it's possible that
|
|
chunks will be larger than desired. For example, if a table is chunked by an
|
|
index that contains 10,000 of a given value, there is no way to write a WHERE
|
|
clause that matches only 1,000 of the values, and that chunk will be at least
|
|
10,000 rows large. Such a chunk will probably be skipped because of
|
|
L<"--chunk-size-limit">.
|
|
|
|
=item --chunk-size-limit
|
|
|
|
type: float; default: 4.0
|
|
|
|
Do not copy chunks this much larger than the desired chunk size.
|
|
|
|
When a table has no unique indexes, chunk sizes can be inaccurate. This option
|
|
specifies a maximum tolerable limit to the inaccuracy. The tool uses <EXPLAIN>
|
|
to estimate how many rows are in the chunk. If that estimate exceeds the
|
|
desired chunk size times the limit, then the tool skips the chunk.
|
|
|
|
The minimum value for this option is 1, which means that no chunk can be larger
|
|
than L<"--chunk-size">. You probably don't want to specify 1, because rows
|
|
reported by EXPLAIN are estimates, which can be different from the real number
|
|
of rows in the chunk. You can disable oversized chunk checking by specifying a
|
|
value of 0.
|
|
|
|
The tool also uses this option to determine how to handle foreign keys that
|
|
reference the table to be altered. See L<"--alter-foreign-keys-method"> for
|
|
details.
|
|
|
|
=item --chunk-time
|
|
|
|
type: float; default: 0.5
|
|
|
|
Adjust the chunk size dynamically so each data-copy query takes this long to
|
|
execute. The tool tracks the copy rate (rows per second) and adjusts the chunk
|
|
size after each data-copy query, so that the next query takes this amount of
|
|
time (in seconds) to execute. It keeps an exponentially decaying moving average
|
|
of queries per second, so that if the server's performance changes due to
|
|
changes in server load, the tool adapts quickly.
|
|
|
|
If this option is set to zero, the chunk size doesn't auto-adjust, so query
|
|
times will vary, but query chunk sizes will not. Another way to do the same
|
|
thing is to specify a value for L<"--chunk-size"> explicitly, instead of leaving
|
|
it at the default.
|
|
|
|
=item --config
|
|
|
|
type: Array
|
|
|
|
Read this comma-separated list of config files; if specified, this must be the
|
|
first option on the command line.
|
|
|
|
=item --critical-load
|
|
|
|
type: Array; default: Threads_running=50
|
|
|
|
Examine SHOW GLOBAL STATUS after every chunk, and abort if the load is too high.
|
|
The option accepts a comma-separated list of MySQL status variables and
|
|
thresholds. An optional C<=MAX_VALUE> (or C<:MAX_VALUE>) can follow each
|
|
variable. If not given, the tool determines a threshold by examining the
|
|
current value at startup and doubling it.
|
|
|
|
See L<"--max-load"> for further details. These options work similarly, except
|
|
that this option will abort the tool's operation instead of pausing it, and the
|
|
default value is computed differently if you specify no threshold. The reason
|
|
for this option is as a safety check in case the triggers on the original table
|
|
add so much load to the server that it causes downtime. There is probably no
|
|
single value of Threads_running that is wrong for every server, but a default of
|
|
50 seems likely to be unacceptably high for most servers, indicating that the
|
|
operation should be canceled immediately.
|
|
|
|
=item --defaults-file
|
|
|
|
short form: -F; type: string
|
|
|
|
Only read mysql options from the given file. You must give an absolute
|
|
pathname.
|
|
|
|
=item --[no]drop-new-table
|
|
|
|
default: yes
|
|
|
|
Drop the new table if copying the original table fails.
|
|
|
|
Specifying C<--no-drop-new-table> and C<--no-swap-tables> leaves the new,
|
|
altered copy of the table without modifying the original table. The new
|
|
table name is like C<_TBL_new> where C<TBL> is the table name.
|
|
|
|
L<--no-drop-new-table> does not work with
|
|
C<alter-foreign-keys-method drop_swap>.
|
|
|
|
=item --[no]drop-old-table
|
|
|
|
default: yes
|
|
|
|
Drop the original table after renaming it. After the original table has been
|
|
successfully renamed to let the new table take its place, and if there are no
|
|
errors, the tool drops the original table by default. If there are any errors,
|
|
the tool leaves the original table in place.
|
|
|
|
If C<--no-swap-tables> is specified, then there is no old table to drop.
|
|
|
|
=item --dry-run
|
|
|
|
Create and alter the new table, but do not create triggers, copy data, or
|
|
replace the original table.
|
|
|
|
=item --execute
|
|
|
|
Indicate that you have read the documentation and want to alter the table. You
|
|
must specify this option to alter the table. If you do not, then the tool will
|
|
only perform some safety checks and exit. This helps ensure that you have read the
|
|
documentation and understand how to use this tool. If you have not read the
|
|
documentation, then do not specify this option.
|
|
|
|
=item --help
|
|
|
|
Show help and exit.
|
|
|
|
=item --host
|
|
|
|
short form: -h; type: string
|
|
|
|
Connect to host.
|
|
|
|
=item --lock-wait-timeout
|
|
|
|
type: int; default: 1
|
|
|
|
Set the session value of C<innodb_lock_wait_timeout>. This option helps guard
|
|
against long lock waits if the data-copy queries become slow for some reason.
|
|
Setting this option dynamically requires the InnoDB plugin, so this works only
|
|
on newer InnoDB and MySQL versions. If the setting's current value is greater
|
|
than the specified value, and the tool cannot set the value as desired, then it
|
|
prints a warning. If the tool cannot set the value but the current value is less
|
|
than or equal to the desired value, there is no error.
|
|
|
|
=item --max-lag
|
|
|
|
type: time; default: 1s
|
|
|
|
Pause the data copy until all replicas' lag is less than this value. After each
|
|
data-copy query (each chunk), the tool looks at the replication lag of
|
|
all replicas to which it connects, using Seconds_Behind_Master. If any replica
|
|
is lagging more than the value of this option, then the tool will sleep
|
|
for L<"--check-interval"> seconds, then check all replicas again. If you
|
|
specify L<"--check-slave-lag">, then the tool only examines that server for
|
|
lag, not all servers. If you want to control exactly which servers the tool
|
|
monitors, use the DSN value to L<"--recursion-method">.
|
|
|
|
The tool waits forever for replicas to stop lagging. If any replica is
|
|
stopped, the tool waits forever until the replica is started. The data copy
|
|
continues when all replicas are running and not lagging too much.
|
|
|
|
The tool prints progress reports while waiting. If a replica is stopped, it
|
|
prints a progress report immediately, then again at every progress report
|
|
interval.
|
|
|
|
=item --max-load
|
|
|
|
type: Array; default: Threads_running=25
|
|
|
|
Examine SHOW GLOBAL STATUS after every chunk, and pause if any status variables
|
|
are higher than their thresholds. The option accepts a comma-separated list of
|
|
MySQL status variables. An optional C<=MAX_VALUE> (or C<:MAX_VALUE>) can follow
|
|
each variable. If not given, the tool determines a threshold by examining the
|
|
current value and increasing it by 20%.
|
|
|
|
For example, if you want the tool to pause when Threads_connected gets too high,
|
|
you can specify "Threads_connected", and the tool will check the current value
|
|
when it starts working and add 20% to that value. If the current value is 100,
|
|
then the tool will pause when Threads_connected exceeds 120, and resume working
|
|
when it is below 120 again. If you want to specify an explicit threshold, such
|
|
as 110, you can use either "Threads_connected:110" or "Threads_connected=110".
|
|
|
|
The purpose of this option is to prevent the tool from adding too much load to
|
|
the server. If the data-copy queries are intrusive, or if they cause lock waits,
|
|
then other queries on the server will tend to block and queue. This will
|
|
typically cause Threads_running to increase, and the tool can detect that by
|
|
running SHOW GLOBAL STATUS immediately after each query finishes. If you
|
|
specify a threshold for this variable, then you can instruct the tool to wait
|
|
until queries are running normally again. This will not prevent queueing,
|
|
however; it will only give the server a chance to recover from the queueing. If
|
|
you notice queueing, it is best to decrease the chunk time.
|
|
|
|
=item --password
|
|
|
|
short form: -p; type: string
|
|
|
|
Password to use when connecting.
|
|
|
|
=item --pid
|
|
|
|
type: string
|
|
|
|
Create the given PID file. The file contains the process ID of the tool's
|
|
instance. The PID file is removed when the tool exits. The tool checks for
|
|
the existence of the PID file when starting; if it exists and the process with
|
|
the matching PID exists, the tool exits.
|
|
|
|
=item --port
|
|
|
|
short form: -P; type: int
|
|
|
|
Port number to use for connection.
|
|
|
|
=item --print
|
|
|
|
Print SQL statements to STDOUT. Specifying this option allows you to see most
|
|
of the statements that the tool executes. You can use this option with
|
|
L<"--dry-run">, for example.
|
|
|
|
=item --progress
|
|
|
|
type: array; default: time,30
|
|
|
|
Print progress reports to STDERR while copying rows. The value is a
|
|
comma-separated list with two parts. The first part can be percentage, time, or
|
|
iterations; the second part specifies how often an update should be printed, in
|
|
percentage, seconds, or number of iterations.
|
|
|
|
=item --quiet
|
|
|
|
short form: -q
|
|
|
|
Do not print messages to STDOUT. Errors and warnings are still printed to
|
|
STDERR.
|
|
|
|
=item --recurse
|
|
|
|
type: int
|
|
|
|
Number of levels to recurse in the hierarchy when discovering replicas.
|
|
Default is infinite. See also L<"--recursion-method">.
|
|
|
|
=item --recursion-method
|
|
|
|
type: string
|
|
|
|
Preferred recursion method for discovering replicas. Possible methods are:
|
|
|
|
METHOD USES
|
|
=========== ==================
|
|
processlist SHOW PROCESSLIST
|
|
hosts SHOW SLAVE HOSTS
|
|
dsn=DSN DSNs from a table
|
|
none Do not find slaves
|
|
|
|
The processlist method is the default, because SHOW SLAVE HOSTS is not
|
|
reliable. However, the hosts method can work better if the server uses a
|
|
non-standard port (not 3306). The tool usually does the right thing and
|
|
finds all replicas, but you may give a preferred method and it will be used
|
|
first.
|
|
|
|
The hosts method requires replicas to be configured with report_host,
|
|
report_port, etc.
|
|
|
|
The dsn method is special: it specifies a table from which other DSN strings
|
|
are read. The specified DSN must specify a D and t, or a database-qualified
|
|
t. The DSN table should have the following structure:
|
|
|
|
CREATE TABLE `dsns` (
|
|
`id` int(11) NOT NULL AUTO_INCREMENT,
|
|
`parent_id` int(11) DEFAULT NULL,
|
|
`dsn` varchar(255) NOT NULL,
|
|
PRIMARY KEY (`id`)
|
|
);
|
|
|
|
To make the tool monitor only the hosts 10.10.1.16 and 10.10.1.17 for
|
|
replication lag, insert the values C<h=10.10.1.16> and C<h=10.10.1.17> into the
|
|
table. Currently, the DSNs are ordered by id, but id and parent_id are otherwise
|
|
ignored.
|
|
|
|
=item --retries
|
|
|
|
type: int; default: 3
|
|
|
|
Retry a chunk this many times when there is a nonfatal error. Nonfatal errors
|
|
are problems such as a lock wait timeout or the query being killed. This option
|
|
applies to the data copy operation.
|
|
|
|
=item --set-vars
|
|
|
|
type: string; default: wait_timeout=10000
|
|
|
|
Set these MySQL variables. Immediately after connecting to MySQL, this string
|
|
will be appended to SET and executed.
|
|
|
|
=item --socket
|
|
|
|
short form: -S; type: string
|
|
|
|
Socket file to use for connection.
|
|
|
|
=item --[no]swap-tables
|
|
|
|
default: yes
|
|
|
|
Swap the original table and the new, altered table. This step completes the
|
|
online schema change process by making the table with the new schema take the
|
|
place of the original table. The original table becomes the "old table," and
|
|
the tool drops it unless you disable L<"--[no]drop-old-table">.
|
|
|
|
=item --user
|
|
|
|
short form: -u; type: string
|
|
|
|
User for login if not current user.
|
|
|
|
=item --version
|
|
|
|
Show version and exit.
|
|
|
|
=back
|
|
|
|
=head1 DSN OPTIONS
|
|
|
|
These DSN options are used to create a DSN. Each option is given like
|
|
C<option=value>. The options are case-sensitive, so P and p are not the
|
|
same option. There cannot be whitespace before or after the C<=> and
|
|
if the value contains whitespace it must be quoted. DSN options are
|
|
comma-separated. See the L<percona-toolkit> manpage for full details.
|
|
|
|
=over
|
|
|
|
=item * A
|
|
|
|
dsn: charset; copy: yes
|
|
|
|
Default character set.
|
|
|
|
=item * D
|
|
|
|
dsn: database; copy: yes
|
|
|
|
Database for the old and new table.
|
|
|
|
=item * F
|
|
|
|
dsn: mysql_read_default_file; copy: yes
|
|
|
|
Only read default options from the given file
|
|
|
|
=item * h
|
|
|
|
dsn: host; copy: yes
|
|
|
|
Connect to host.
|
|
|
|
=item * p
|
|
|
|
dsn: password; copy: yes
|
|
|
|
Password to use when connecting.
|
|
|
|
=item * P
|
|
|
|
dsn: port; copy: yes
|
|
|
|
Port number to use for connection.
|
|
|
|
=item * S
|
|
|
|
dsn: mysql_socket; copy: yes
|
|
|
|
Socket file to use for connection.
|
|
|
|
=item * t
|
|
|
|
dsn: table; copy: no
|
|
|
|
Table to alter.
|
|
|
|
=item * u
|
|
|
|
dsn: user; copy: yes
|
|
|
|
User for login if not current user.
|
|
|
|
=back
|
|
|
|
=head1 ENVIRONMENT
|
|
|
|
The environment variable C<PTDEBUG> enables verbose debugging output to STDERR.
|
|
To enable debugging and capture all output to a file, run the tool like:
|
|
|
|
PTDEBUG=1 pt-online-schema-change ... > FILE 2>&1
|
|
|
|
Be careful: debugging output is voluminous and can generate several megabytes
|
|
of output.
|
|
|
|
=head1 SYSTEM REQUIREMENTS
|
|
|
|
You need Perl, DBI, DBD::mysql, and some core packages that ought to be
|
|
installed in any reasonably new version of Perl.
|
|
|
|
This tool works only on MySQL 5.0.2 and newer versions, because earlier versions
|
|
do not support triggers.
|
|
|
|
=head1 BUGS
|
|
|
|
For a list of known bugs, see L<http://www.percona.com/bugs/pt-online-schema-change>.
|
|
|
|
Please report bugs at L<https://bugs.launchpad.net/percona-toolkit>.
|
|
Include the following information in your bug report:
|
|
|
|
=over
|
|
|
|
=item * Complete command-line used to run the tool
|
|
|
|
=item * Tool L<"--version">
|
|
|
|
=item * MySQL version of all servers involved
|
|
|
|
=item * Output from the tool including STDERR
|
|
|
|
=item * Input files (log/dump/config files, etc.)
|
|
|
|
=back
|
|
|
|
If possible, include debugging output by running the tool with C<PTDEBUG>;
|
|
see L<"ENVIRONMENT">.
|
|
|
|
=head1 DOWNLOADING
|
|
|
|
Visit L<http://www.percona.com/software/percona-toolkit/> to download the
|
|
latest release of Percona Toolkit. Or, get the latest release from the
|
|
command line:
|
|
|
|
wget percona.com/get/percona-toolkit.tar.gz
|
|
|
|
wget percona.com/get/percona-toolkit.rpm
|
|
|
|
wget percona.com/get/percona-toolkit.deb
|
|
|
|
You can also get individual tools from the latest release:
|
|
|
|
wget percona.com/get/TOOL
|
|
|
|
Replace C<TOOL> with the name of any tool.
|
|
|
|
=head1 AUTHORS
|
|
|
|
Daniel Nichter and Baron Schwartz
|
|
|
|
=head1 ACKNOWLEDGMENTS
|
|
|
|
The "online schema change" concept was first implemented by Shlomi Noach
|
|
in his tool C<oak-online-alter-table>, part of
|
|
L<http://code.google.com/p/openarkkit/>. Engineers at Facebook then built
|
|
another version called C<OnlineSchemaChange.php> as explained by their blog
|
|
post: L<http://tinyurl.com/32zeb86>. This tool is a hybrid of both approaches,
|
|
with additional features and functionality not present in either.
|
|
|
|
=head1 ABOUT PERCONA TOOLKIT
|
|
|
|
This tool is part of Percona Toolkit, a collection of advanced command-line
|
|
tools developed by Percona for MySQL support and consulting. Percona Toolkit
|
|
was forked from two projects in June, 2011: Maatkit and Aspersa. Those
|
|
projects were created by Baron Schwartz and developed primarily by him and
|
|
Daniel Nichter, both of whom are employed by Percona. Visit
|
|
L<http://www.percona.com/software/> for more software developed by Percona.
|
|
|
|
=head1 COPYRIGHT, LICENSE, AND WARRANTY
|
|
|
|
This program is copyright 2011-2012 Percona Inc.
|
|
Feedback and improvements are welcome.
|
|
|
|
THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED
|
|
WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
|
|
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
|
|
|
|
This program is free software; you can redistribute it and/or modify it under
|
|
the terms of the GNU General Public License as published by the Free Software
|
|
Foundation, version 2; OR the Perl Artistic License. On UNIX and similar
|
|
systems, you can issue `man perlgpl' or `man perlartistic' to read these
|
|
licenses.
|
|
|
|
You should have received a copy of the GNU General Public License along with
|
|
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
|
Place, Suite 330, Boston, MA 02111-1307 USA.
|
|
|
|
=head1 VERSION
|
|
|
|
pt-online-schema-change 2.1.1
|
|
|
|
=cut
|