mirror of
https://github.com/percona/percona-toolkit.git
synced 2025-09-28 08:51:44 +00:00

Also added parens around the Transformer import list. Previously it was a bare qw() following the import, like this: Module->import qw( stuff ) However, that's actually ambiguous syntax, which happens to be deprecated in 5.14 (so it throws warnings by default), and will be removed in 5.18.
4312 lines
132 KiB
Perl
Executable File
4312 lines
132 KiB
Perl
Executable File
#!/usr/bin/env perl
|
|
|
|
# This program is part of Percona Toolkit: http://www.percona.com/software/
|
|
# See "COPYRIGHT, LICENSE, AND WARRANTY" at the end of this file for legal
|
|
# notices and disclaimers.
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use constant MKDEBUG => $ENV{MKDEBUG} || 0;
|
|
|
|
# ###########################################################################
|
|
# MasterSlave package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/MasterSlave.pm
|
|
# t/lib/MasterSlave.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package MasterSlave;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant MKDEBUG => $ENV{MKDEBUG} || 0;
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
my $self = {
|
|
%args,
|
|
replication_thread => {},
|
|
};
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub recurse_to_slaves {
|
|
my ( $self, $args, $level ) = @_;
|
|
$level ||= 0;
|
|
my $dp = $args->{dsn_parser};
|
|
my $dsn = $args->{dsn};
|
|
|
|
my $dbh;
|
|
eval {
|
|
$dbh = $args->{dbh} || $dp->get_dbh(
|
|
$dp->get_cxn_params($dsn), { AutoCommit => 1 });
|
|
MKDEBUG && _d('Connected to', $dp->as_string($dsn));
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
print STDERR "Cannot connect to ", $dp->as_string($dsn), "\n"
|
|
or die "Cannot print: $OS_ERROR";
|
|
return;
|
|
}
|
|
|
|
my $sql = 'SELECT @@SERVER_ID';
|
|
MKDEBUG && _d($sql);
|
|
my ($id) = $dbh->selectrow_array($sql);
|
|
MKDEBUG && _d('Working on server ID', $id);
|
|
my $master_thinks_i_am = $dsn->{server_id};
|
|
if ( !defined $id
|
|
|| ( defined $master_thinks_i_am && $master_thinks_i_am != $id )
|
|
|| $args->{server_ids_seen}->{$id}++
|
|
) {
|
|
MKDEBUG && _d('Server ID seen, or not what master said');
|
|
if ( $args->{skip_callback} ) {
|
|
$args->{skip_callback}->($dsn, $dbh, $level, $args->{parent});
|
|
}
|
|
return;
|
|
}
|
|
|
|
$args->{callback}->($dsn, $dbh, $level, $args->{parent});
|
|
|
|
if ( !defined $args->{recurse} || $level < $args->{recurse} ) {
|
|
|
|
my @slaves =
|
|
grep { !$_->{master_id} || $_->{master_id} == $id } # Only my slaves.
|
|
$self->find_slave_hosts($dp, $dbh, $dsn, $args->{method});
|
|
|
|
foreach my $slave ( @slaves ) {
|
|
MKDEBUG && _d('Recursing from',
|
|
$dp->as_string($dsn), 'to', $dp->as_string($slave));
|
|
$self->recurse_to_slaves(
|
|
{ %$args, dsn => $slave, dbh => undef, parent => $dsn }, $level + 1 );
|
|
}
|
|
}
|
|
}
|
|
|
|
sub find_slave_hosts {
|
|
my ( $self, $dsn_parser, $dbh, $dsn, $method ) = @_;
|
|
|
|
my @methods = qw(processlist hosts);
|
|
if ( $method ) {
|
|
@methods = grep { $_ ne $method } @methods;
|
|
unshift @methods, $method;
|
|
}
|
|
else {
|
|
if ( ($dsn->{P} || 3306) != 3306 ) {
|
|
MKDEBUG && _d('Port number is non-standard; using only hosts method');
|
|
@methods = qw(hosts);
|
|
}
|
|
}
|
|
MKDEBUG && _d('Looking for slaves on', $dsn_parser->as_string($dsn),
|
|
'using methods', @methods);
|
|
|
|
my @slaves;
|
|
METHOD:
|
|
foreach my $method ( @methods ) {
|
|
my $find_slaves = "_find_slaves_by_$method";
|
|
MKDEBUG && _d('Finding slaves with', $find_slaves);
|
|
@slaves = $self->$find_slaves($dsn_parser, $dbh, $dsn);
|
|
last METHOD if @slaves;
|
|
}
|
|
|
|
MKDEBUG && _d('Found', scalar(@slaves), 'slaves');
|
|
return @slaves;
|
|
}
|
|
|
|
sub _find_slaves_by_processlist {
|
|
my ( $self, $dsn_parser, $dbh, $dsn ) = @_;
|
|
|
|
my @slaves = map {
|
|
my $slave = $dsn_parser->parse("h=$_", $dsn);
|
|
$slave->{source} = 'processlist';
|
|
$slave;
|
|
}
|
|
grep { $_ }
|
|
map {
|
|
my ( $host ) = $_->{host} =~ m/^([^:]+):/;
|
|
if ( $host eq 'localhost' ) {
|
|
$host = '127.0.0.1'; # Replication never uses sockets.
|
|
}
|
|
$host;
|
|
} $self->get_connected_slaves($dbh);
|
|
|
|
return @slaves;
|
|
}
|
|
|
|
sub _find_slaves_by_hosts {
|
|
my ( $self, $dsn_parser, $dbh, $dsn ) = @_;
|
|
|
|
my @slaves;
|
|
my $sql = 'SHOW SLAVE HOSTS';
|
|
MKDEBUG && _d($dbh, $sql);
|
|
@slaves = @{$dbh->selectall_arrayref($sql, { Slice => {} })};
|
|
|
|
if ( @slaves ) {
|
|
MKDEBUG && _d('Found some SHOW SLAVE HOSTS info');
|
|
@slaves = map {
|
|
my %hash;
|
|
@hash{ map { lc $_ } keys %$_ } = values %$_;
|
|
my $spec = "h=$hash{host},P=$hash{port}"
|
|
. ( $hash{user} ? ",u=$hash{user}" : '')
|
|
. ( $hash{password} ? ",p=$hash{password}" : '');
|
|
my $dsn = $dsn_parser->parse($spec, $dsn);
|
|
$dsn->{server_id} = $hash{server_id};
|
|
$dsn->{master_id} = $hash{master_id};
|
|
$dsn->{source} = 'hosts';
|
|
$dsn;
|
|
} @slaves;
|
|
}
|
|
|
|
return @slaves;
|
|
}
|
|
|
|
sub get_connected_slaves {
|
|
my ( $self, $dbh ) = @_;
|
|
|
|
my $show = "SHOW GRANTS FOR ";
|
|
my $user = 'CURRENT_USER()';
|
|
my $vp = $self->{VersionParser};
|
|
if ( $vp && !$vp->version_ge($dbh, '4.1.2') ) {
|
|
$user = $dbh->selectrow_arrayref('SELECT USER()')->[0];
|
|
$user =~ s/([^@]+)@(.+)/'$1'\@'$2'/;
|
|
}
|
|
my $sql = $show . $user;
|
|
MKDEBUG && _d($dbh, $sql);
|
|
|
|
my $proc;
|
|
eval {
|
|
$proc = grep {
|
|
m/ALL PRIVILEGES.*?\*\.\*|PROCESS/
|
|
} @{$dbh->selectcol_arrayref($sql)};
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
|
|
if ( $EVAL_ERROR =~ m/no such grant defined for user/ ) {
|
|
MKDEBUG && _d('Retrying SHOW GRANTS without host; error:',
|
|
$EVAL_ERROR);
|
|
($user) = split('@', $user);
|
|
$sql = $show . $user;
|
|
MKDEBUG && _d($sql);
|
|
eval {
|
|
$proc = grep {
|
|
m/ALL PRIVILEGES.*?\*\.\*|PROCESS/
|
|
} @{$dbh->selectcol_arrayref($sql)};
|
|
};
|
|
}
|
|
|
|
die "Failed to $sql: $EVAL_ERROR" if $EVAL_ERROR;
|
|
}
|
|
if ( !$proc ) {
|
|
die "You do not have the PROCESS privilege";
|
|
}
|
|
|
|
$sql = 'SHOW PROCESSLIST';
|
|
MKDEBUG && _d($dbh, $sql);
|
|
grep { $_->{command} =~ m/Binlog Dump/i }
|
|
map { # Lowercase the column names
|
|
my %hash;
|
|
@hash{ map { lc $_ } keys %$_ } = values %$_;
|
|
\%hash;
|
|
}
|
|
@{$dbh->selectall_arrayref($sql, { Slice => {} })};
|
|
}
|
|
|
|
sub is_master_of {
|
|
my ( $self, $master, $slave ) = @_;
|
|
my $master_status = $self->get_master_status($master)
|
|
or die "The server specified as a master is not a master";
|
|
my $slave_status = $self->get_slave_status($slave)
|
|
or die "The server specified as a slave is not a slave";
|
|
my @connected = $self->get_connected_slaves($master)
|
|
or die "The server specified as a master has no connected slaves";
|
|
my (undef, $port) = $master->selectrow_array('SHOW VARIABLES LIKE "port"');
|
|
|
|
if ( $port != $slave_status->{master_port} ) {
|
|
die "The slave is connected to $slave_status->{master_port} "
|
|
. "but the master's port is $port";
|
|
}
|
|
|
|
if ( !grep { $slave_status->{master_user} eq $_->{user} } @connected ) {
|
|
die "I don't see any slave I/O thread connected with user "
|
|
. $slave_status->{master_user};
|
|
}
|
|
|
|
if ( ($slave_status->{slave_io_state} || '')
|
|
eq 'Waiting for master to send event' )
|
|
{
|
|
my ( $master_log_name, $master_log_num )
|
|
= $master_status->{file} =~ m/^(.*?)\.0*([1-9][0-9]*)$/;
|
|
my ( $slave_log_name, $slave_log_num )
|
|
= $slave_status->{master_log_file} =~ m/^(.*?)\.0*([1-9][0-9]*)$/;
|
|
if ( $master_log_name ne $slave_log_name
|
|
|| abs($master_log_num - $slave_log_num) > 1 )
|
|
{
|
|
die "The slave thinks it is reading from "
|
|
. "$slave_status->{master_log_file}, but the "
|
|
. "master is writing to $master_status->{file}";
|
|
}
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
sub get_master_dsn {
|
|
my ( $self, $dbh, $dsn, $dsn_parser ) = @_;
|
|
my $master = $self->get_slave_status($dbh) or return undef;
|
|
my $spec = "h=$master->{master_host},P=$master->{master_port}";
|
|
return $dsn_parser->parse($spec, $dsn);
|
|
}
|
|
|
|
sub get_slave_status {
|
|
my ( $self, $dbh ) = @_;
|
|
if ( !$self->{not_a_slave}->{$dbh} ) {
|
|
my $sth = $self->{sths}->{$dbh}->{SLAVE_STATUS}
|
|
||= $dbh->prepare('SHOW SLAVE STATUS');
|
|
MKDEBUG && _d($dbh, 'SHOW SLAVE STATUS');
|
|
$sth->execute();
|
|
my ($ss) = @{$sth->fetchall_arrayref({})};
|
|
|
|
if ( $ss && %$ss ) {
|
|
$ss = { map { lc($_) => $ss->{$_} } keys %$ss }; # lowercase the keys
|
|
return $ss;
|
|
}
|
|
|
|
MKDEBUG && _d('This server returns nothing for SHOW SLAVE STATUS');
|
|
$self->{not_a_slave}->{$dbh}++;
|
|
}
|
|
}
|
|
|
|
sub get_master_status {
|
|
my ( $self, $dbh ) = @_;
|
|
|
|
if ( $self->{not_a_master}->{$dbh} ) {
|
|
MKDEBUG && _d('Server on dbh', $dbh, 'is not a master');
|
|
return;
|
|
}
|
|
|
|
my $sth = $self->{sths}->{$dbh}->{MASTER_STATUS}
|
|
||= $dbh->prepare('SHOW MASTER STATUS');
|
|
MKDEBUG && _d($dbh, 'SHOW MASTER STATUS');
|
|
$sth->execute();
|
|
my ($ms) = @{$sth->fetchall_arrayref({})};
|
|
MKDEBUG && _d(
|
|
$ms ? map { "$_=" . (defined $ms->{$_} ? $ms->{$_} : '') } keys %$ms
|
|
: '');
|
|
|
|
if ( !$ms || scalar keys %$ms < 2 ) {
|
|
MKDEBUG && _d('Server on dbh', $dbh, 'does not seem to be a master');
|
|
$self->{not_a_master}->{$dbh}++;
|
|
}
|
|
|
|
return { map { lc($_) => $ms->{$_} } keys %$ms }; # lowercase the keys
|
|
}
|
|
|
|
sub wait_for_master {
|
|
my ( $self, %args ) = @_;
|
|
my @required_args = qw(master_status slave_dbh);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($master_status, $slave_dbh) = @args{@required_args};
|
|
my $timeout = $args{timeout} || 60;
|
|
|
|
my $result;
|
|
my $waited;
|
|
if ( $master_status ) {
|
|
my $sql = "SELECT MASTER_POS_WAIT('$master_status->{file}', "
|
|
. "$master_status->{position}, $timeout)";
|
|
MKDEBUG && _d($slave_dbh, $sql);
|
|
my $start = time;
|
|
($result) = $slave_dbh->selectrow_array($sql);
|
|
|
|
$waited = time - $start;
|
|
|
|
MKDEBUG && _d('Result of waiting:', $result);
|
|
MKDEBUG && _d("Waited", $waited, "seconds");
|
|
}
|
|
else {
|
|
MKDEBUG && _d('Not waiting: this server is not a master');
|
|
}
|
|
|
|
return {
|
|
result => $result,
|
|
waited => $waited,
|
|
};
|
|
}
|
|
|
|
sub stop_slave {
|
|
my ( $self, $dbh ) = @_;
|
|
my $sth = $self->{sths}->{$dbh}->{STOP_SLAVE}
|
|
||= $dbh->prepare('STOP SLAVE');
|
|
MKDEBUG && _d($dbh, $sth->{Statement});
|
|
$sth->execute();
|
|
}
|
|
|
|
sub start_slave {
|
|
my ( $self, $dbh, $pos ) = @_;
|
|
if ( $pos ) {
|
|
my $sql = "START SLAVE UNTIL MASTER_LOG_FILE='$pos->{file}', "
|
|
. "MASTER_LOG_POS=$pos->{position}";
|
|
MKDEBUG && _d($dbh, $sql);
|
|
$dbh->do($sql);
|
|
}
|
|
else {
|
|
my $sth = $self->{sths}->{$dbh}->{START_SLAVE}
|
|
||= $dbh->prepare('START SLAVE');
|
|
MKDEBUG && _d($dbh, $sth->{Statement});
|
|
$sth->execute();
|
|
}
|
|
}
|
|
|
|
sub catchup_to_master {
|
|
my ( $self, $slave, $master, $timeout ) = @_;
|
|
$self->stop_slave($master);
|
|
$self->stop_slave($slave);
|
|
my $slave_status = $self->get_slave_status($slave);
|
|
my $slave_pos = $self->repl_posn($slave_status);
|
|
my $master_status = $self->get_master_status($master);
|
|
my $master_pos = $self->repl_posn($master_status);
|
|
MKDEBUG && _d('Master position:', $self->pos_to_string($master_pos),
|
|
'Slave position:', $self->pos_to_string($slave_pos));
|
|
|
|
my $result;
|
|
if ( $self->pos_cmp($slave_pos, $master_pos) < 0 ) {
|
|
MKDEBUG && _d('Waiting for slave to catch up to master');
|
|
$self->start_slave($slave, $master_pos);
|
|
|
|
$result = $self->wait_for_master(
|
|
master_status => $master_status,
|
|
slave_dbh => $slave,
|
|
timeout => $timeout,
|
|
master_status => $master_status
|
|
);
|
|
if ( !defined $result->{result} ) {
|
|
$slave_status = $self->get_slave_status($slave);
|
|
if ( !$self->slave_is_running($slave_status) ) {
|
|
MKDEBUG && _d('Master position:',
|
|
$self->pos_to_string($master_pos),
|
|
'Slave position:', $self->pos_to_string($slave_pos));
|
|
$slave_pos = $self->repl_posn($slave_status);
|
|
if ( $self->pos_cmp($slave_pos, $master_pos) != 0 ) {
|
|
die "MASTER_POS_WAIT() returned NULL but slave has not "
|
|
. "caught up to master";
|
|
}
|
|
MKDEBUG && _d('Slave is caught up to master and stopped');
|
|
}
|
|
else {
|
|
die "Slave has not caught up to master and it is still running";
|
|
}
|
|
}
|
|
}
|
|
else {
|
|
MKDEBUG && _d("Slave is already caught up to master");
|
|
}
|
|
|
|
return $result;
|
|
}
|
|
|
|
sub catchup_to_same_pos {
|
|
my ( $self, $s1_dbh, $s2_dbh ) = @_;
|
|
$self->stop_slave($s1_dbh);
|
|
$self->stop_slave($s2_dbh);
|
|
my $s1_status = $self->get_slave_status($s1_dbh);
|
|
my $s2_status = $self->get_slave_status($s2_dbh);
|
|
my $s1_pos = $self->repl_posn($s1_status);
|
|
my $s2_pos = $self->repl_posn($s2_status);
|
|
if ( $self->pos_cmp($s1_pos, $s2_pos) < 0 ) {
|
|
$self->start_slave($s1_dbh, $s2_pos);
|
|
}
|
|
elsif ( $self->pos_cmp($s2_pos, $s1_pos) < 0 ) {
|
|
$self->start_slave($s2_dbh, $s1_pos);
|
|
}
|
|
|
|
$s1_status = $self->get_slave_status($s1_dbh);
|
|
$s2_status = $self->get_slave_status($s2_dbh);
|
|
$s1_pos = $self->repl_posn($s1_status);
|
|
$s2_pos = $self->repl_posn($s2_status);
|
|
|
|
if ( $self->slave_is_running($s1_status)
|
|
|| $self->slave_is_running($s2_status)
|
|
|| $self->pos_cmp($s1_pos, $s2_pos) != 0)
|
|
{
|
|
die "The servers aren't both stopped at the same position";
|
|
}
|
|
|
|
}
|
|
|
|
sub slave_is_running {
|
|
my ( $self, $slave_status ) = @_;
|
|
return ($slave_status->{slave_sql_running} || 'No') eq 'Yes';
|
|
}
|
|
|
|
sub has_slave_updates {
|
|
my ( $self, $dbh ) = @_;
|
|
my $sql = q{SHOW VARIABLES LIKE 'log_slave_updates'};
|
|
MKDEBUG && _d($dbh, $sql);
|
|
my ($name, $value) = $dbh->selectrow_array($sql);
|
|
return $value && $value =~ m/^(1|ON)$/;
|
|
}
|
|
|
|
sub repl_posn {
|
|
my ( $self, $status ) = @_;
|
|
if ( exists $status->{file} && exists $status->{position} ) {
|
|
return {
|
|
file => $status->{file},
|
|
position => $status->{position},
|
|
};
|
|
}
|
|
else {
|
|
return {
|
|
file => $status->{relay_master_log_file},
|
|
position => $status->{exec_master_log_pos},
|
|
};
|
|
}
|
|
}
|
|
|
|
sub get_slave_lag {
|
|
my ( $self, $dbh ) = @_;
|
|
my $stat = $self->get_slave_status($dbh);
|
|
return unless $stat; # server is not a slave
|
|
return $stat->{seconds_behind_master};
|
|
}
|
|
|
|
sub pos_cmp {
|
|
my ( $self, $a, $b ) = @_;
|
|
return $self->pos_to_string($a) cmp $self->pos_to_string($b);
|
|
}
|
|
|
|
sub short_host {
|
|
my ( $self, $dsn ) = @_;
|
|
my ($host, $port);
|
|
if ( $dsn->{master_host} ) {
|
|
$host = $dsn->{master_host};
|
|
$port = $dsn->{master_port};
|
|
}
|
|
else {
|
|
$host = $dsn->{h};
|
|
$port = $dsn->{P};
|
|
}
|
|
return ($host || '[default]') . ( ($port || 3306) == 3306 ? '' : ":$port" );
|
|
}
|
|
|
|
sub is_replication_thread {
|
|
my ( $self, $query, %args ) = @_;
|
|
return unless $query;
|
|
|
|
my $type = lc($args{type} || 'all');
|
|
die "Invalid type: $type"
|
|
unless $type =~ m/^binlog_dump|slave_io|slave_sql|all$/i;
|
|
|
|
my $match = 0;
|
|
if ( $type =~ m/binlog_dump|all/i ) {
|
|
$match = 1
|
|
if ($query->{Command} || $query->{command} || '') eq "Binlog Dump";
|
|
}
|
|
if ( !$match ) {
|
|
if ( ($query->{User} || $query->{user} || '') eq "system user" ) {
|
|
MKDEBUG && _d("Slave replication thread");
|
|
if ( $type ne 'all' ) {
|
|
my $state = $query->{State} || $query->{state} || '';
|
|
|
|
if ( $state =~ m/^init|end$/ ) {
|
|
MKDEBUG && _d("Special state:", $state);
|
|
$match = 1;
|
|
}
|
|
else {
|
|
my ($slave_sql) = $state =~ m/
|
|
^(Waiting\sfor\sthe\snext\sevent
|
|
|Reading\sevent\sfrom\sthe\srelay\slog
|
|
|Has\sread\sall\srelay\slog;\swaiting
|
|
|Making\stemp\sfile
|
|
|Waiting\sfor\sslave\smutex\son\sexit)/xi;
|
|
|
|
$match = $type eq 'slave_sql' && $slave_sql ? 1
|
|
: $type eq 'slave_io' && !$slave_sql ? 1
|
|
: 0;
|
|
}
|
|
}
|
|
else {
|
|
$match = 1;
|
|
}
|
|
}
|
|
else {
|
|
MKDEBUG && _d('Not system user');
|
|
}
|
|
|
|
if ( !defined $args{check_known_ids} || $args{check_known_ids} ) {
|
|
my $id = $query->{Id} || $query->{id};
|
|
if ( $match ) {
|
|
$self->{replication_thread}->{$id} = 1;
|
|
}
|
|
else {
|
|
if ( $self->{replication_thread}->{$id} ) {
|
|
MKDEBUG && _d("Thread ID is a known replication thread ID");
|
|
$match = 1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
MKDEBUG && _d('Matches', $type, 'replication thread:',
|
|
($match ? 'yes' : 'no'), '; match:', $match);
|
|
|
|
return $match;
|
|
}
|
|
|
|
|
|
sub get_replication_filters {
|
|
my ( $self, %args ) = @_;
|
|
my @required_args = qw(dbh);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($dbh) = @args{@required_args};
|
|
|
|
my %filters = ();
|
|
|
|
my $status = $self->get_master_status($dbh);
|
|
if ( $status ) {
|
|
map { $filters{$_} = $status->{$_} }
|
|
grep { defined $status->{$_} && $status->{$_} ne '' }
|
|
qw(
|
|
binlog_do_db
|
|
binlog_ignore_db
|
|
);
|
|
}
|
|
|
|
$status = $self->get_slave_status($dbh);
|
|
if ( $status ) {
|
|
map { $filters{$_} = $status->{$_} }
|
|
grep { defined $status->{$_} && $status->{$_} ne '' }
|
|
qw(
|
|
replicate_do_db
|
|
replicate_ignore_db
|
|
replicate_do_table
|
|
replicate_ignore_table
|
|
replicate_wild_do_table
|
|
replicate_wild_ignore_table
|
|
);
|
|
|
|
my $sql = "SHOW VARIABLES LIKE 'slave_skip_errors'";
|
|
MKDEBUG && _d($dbh, $sql);
|
|
my $row = $dbh->selectrow_arrayref($sql);
|
|
$filters{slave_skip_errors} = $row->[1] if $row->[1] && $row->[1] ne 'OFF';
|
|
}
|
|
|
|
return \%filters;
|
|
}
|
|
|
|
|
|
sub pos_to_string {
|
|
my ( $self, $pos ) = @_;
|
|
my $fmt = '%s/%020d';
|
|
return sprintf($fmt, @{$pos}{qw(file position)});
|
|
}
|
|
|
|
sub reset_known_replication_threads {
|
|
my ( $self ) = @_;
|
|
$self->{replication_thread} = {};
|
|
return;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End MasterSlave package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# OptionParser package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/OptionParser.pm
|
|
# t/lib/OptionParser.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package OptionParser;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant MKDEBUG => $ENV{MKDEBUG} || 0;
|
|
|
|
use List::Util qw(max);
|
|
use Getopt::Long;
|
|
|
|
my $POD_link_re = '[LC]<"?([^">]+)"?>';
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
my @required_args = qw();
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
|
|
my ($program_name) = $PROGRAM_NAME =~ m/([.A-Za-z-]+)$/;
|
|
$program_name ||= $PROGRAM_NAME;
|
|
my $home = $ENV{HOME} || $ENV{HOMEPATH} || $ENV{USERPROFILE} || '.';
|
|
|
|
my %attributes = (
|
|
'type' => 1,
|
|
'short form' => 1,
|
|
'group' => 1,
|
|
'default' => 1,
|
|
'cumulative' => 1,
|
|
'negatable' => 1,
|
|
);
|
|
|
|
my $self = {
|
|
head1 => 'OPTIONS', # These args are used internally
|
|
skip_rules => 0, # to instantiate another Option-
|
|
item => '--(.*)', # Parser obj that parses the
|
|
attributes => \%attributes, # DSN OPTIONS section. Tools
|
|
parse_attributes => \&_parse_attribs, # don't tinker with these args.
|
|
|
|
%args,
|
|
|
|
strict => 1, # disabled by a special rule
|
|
program_name => $program_name,
|
|
opts => {},
|
|
got_opts => 0,
|
|
short_opts => {},
|
|
defaults => {},
|
|
groups => {},
|
|
allowed_groups => {},
|
|
errors => [],
|
|
rules => [], # desc of rules for --help
|
|
mutex => [], # rule: opts are mutually exclusive
|
|
atleast1 => [], # rule: at least one opt is required
|
|
disables => {}, # rule: opt disables other opts
|
|
defaults_to => {}, # rule: opt defaults to value of other opt
|
|
DSNParser => undef,
|
|
default_files => [
|
|
"/etc/percona-toolkit/percona-toolkit.conf",
|
|
"/etc/percona-toolkit/$program_name.conf",
|
|
"$home/.percona-toolkit.conf",
|
|
"$home/.$program_name.conf",
|
|
],
|
|
types => {
|
|
string => 's', # standard Getopt type
|
|
int => 'i', # standard Getopt type
|
|
float => 'f', # standard Getopt type
|
|
Hash => 'H', # hash, formed from a comma-separated list
|
|
hash => 'h', # hash as above, but only if a value is given
|
|
Array => 'A', # array, similar to Hash
|
|
array => 'a', # array, similar to hash
|
|
DSN => 'd', # DSN
|
|
size => 'z', # size with kMG suffix (powers of 2^10)
|
|
time => 'm', # time, with an optional suffix of s/h/m/d
|
|
},
|
|
};
|
|
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub get_specs {
|
|
my ( $self, $file ) = @_;
|
|
$file ||= $self->{file} || __FILE__;
|
|
my @specs = $self->_pod_to_specs($file);
|
|
$self->_parse_specs(@specs);
|
|
|
|
open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR";
|
|
my $contents = do { local $/ = undef; <$fh> };
|
|
close $fh;
|
|
if ( $contents =~ m/^=head1 DSN OPTIONS/m ) {
|
|
MKDEBUG && _d('Parsing DSN OPTIONS');
|
|
my $dsn_attribs = {
|
|
dsn => 1,
|
|
copy => 1,
|
|
};
|
|
my $parse_dsn_attribs = sub {
|
|
my ( $self, $option, $attribs ) = @_;
|
|
map {
|
|
my $val = $attribs->{$_};
|
|
if ( $val ) {
|
|
$val = $val eq 'yes' ? 1
|
|
: $val eq 'no' ? 0
|
|
: $val;
|
|
$attribs->{$_} = $val;
|
|
}
|
|
} keys %$attribs;
|
|
return {
|
|
key => $option,
|
|
%$attribs,
|
|
};
|
|
};
|
|
my $dsn_o = new OptionParser(
|
|
description => 'DSN OPTIONS',
|
|
head1 => 'DSN OPTIONS',
|
|
dsn => 0, # XXX don't infinitely recurse!
|
|
item => '\* (.)', # key opts are a single character
|
|
skip_rules => 1, # no rules before opts
|
|
attributes => $dsn_attribs,
|
|
parse_attributes => $parse_dsn_attribs,
|
|
);
|
|
my @dsn_opts = map {
|
|
my $opts = {
|
|
key => $_->{spec}->{key},
|
|
dsn => $_->{spec}->{dsn},
|
|
copy => $_->{spec}->{copy},
|
|
desc => $_->{desc},
|
|
};
|
|
$opts;
|
|
} $dsn_o->_pod_to_specs($file);
|
|
$self->{DSNParser} = DSNParser->new(opts => \@dsn_opts);
|
|
}
|
|
|
|
if ( $contents =~ m/^=head1 VERSION\n\n^(.+)$/m ) {
|
|
$self->{version} = $1;
|
|
MKDEBUG && _d($self->{version});
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub DSNParser {
|
|
my ( $self ) = @_;
|
|
return $self->{DSNParser};
|
|
};
|
|
|
|
sub get_defaults_files {
|
|
my ( $self ) = @_;
|
|
return @{$self->{default_files}};
|
|
}
|
|
|
|
sub _pod_to_specs {
|
|
my ( $self, $file ) = @_;
|
|
$file ||= $self->{file} || __FILE__;
|
|
open my $fh, '<', $file or die "Cannot open $file: $OS_ERROR";
|
|
|
|
my @specs = ();
|
|
my @rules = ();
|
|
my $para;
|
|
|
|
local $INPUT_RECORD_SEPARATOR = '';
|
|
while ( $para = <$fh> ) {
|
|
next unless $para =~ m/^=head1 $self->{head1}/;
|
|
last;
|
|
}
|
|
|
|
while ( $para = <$fh> ) {
|
|
last if $para =~ m/^=over/;
|
|
next if $self->{skip_rules};
|
|
chomp $para;
|
|
$para =~ s/\s+/ /g;
|
|
$para =~ s/$POD_link_re/$1/go;
|
|
MKDEBUG && _d('Option rule:', $para);
|
|
push @rules, $para;
|
|
}
|
|
|
|
die "POD has no $self->{head1} section" unless $para;
|
|
|
|
do {
|
|
if ( my ($option) = $para =~ m/^=item $self->{item}/ ) {
|
|
chomp $para;
|
|
MKDEBUG && _d($para);
|
|
my %attribs;
|
|
|
|
$para = <$fh>; # read next paragraph, possibly attributes
|
|
|
|
if ( $para =~ m/: / ) { # attributes
|
|
$para =~ s/\s+\Z//g;
|
|
%attribs = map {
|
|
my ( $attrib, $val) = split(/: /, $_);
|
|
die "Unrecognized attribute for --$option: $attrib"
|
|
unless $self->{attributes}->{$attrib};
|
|
($attrib, $val);
|
|
} split(/; /, $para);
|
|
if ( $attribs{'short form'} ) {
|
|
$attribs{'short form'} =~ s/-//;
|
|
}
|
|
$para = <$fh>; # read next paragraph, probably short help desc
|
|
}
|
|
else {
|
|
MKDEBUG && _d('Option has no attributes');
|
|
}
|
|
|
|
$para =~ s/\s+\Z//g;
|
|
$para =~ s/\s+/ /g;
|
|
$para =~ s/$POD_link_re/$1/go;
|
|
|
|
$para =~ s/\.(?:\n.*| [A-Z].*|\Z)//s;
|
|
MKDEBUG && _d('Short help:', $para);
|
|
|
|
die "No description after option spec $option" if $para =~ m/^=item/;
|
|
|
|
if ( my ($base_option) = $option =~ m/^\[no\](.*)/ ) {
|
|
$option = $base_option;
|
|
$attribs{'negatable'} = 1;
|
|
}
|
|
|
|
push @specs, {
|
|
spec => $self->{parse_attributes}->($self, $option, \%attribs),
|
|
desc => $para
|
|
. (defined $attribs{default} ? " (default $attribs{default})" : ''),
|
|
group => ($attribs{'group'} ? $attribs{'group'} : 'default'),
|
|
};
|
|
}
|
|
while ( $para = <$fh> ) {
|
|
last unless $para;
|
|
if ( $para =~ m/^=head1/ ) {
|
|
$para = undef; # Can't 'last' out of a do {} block.
|
|
last;
|
|
}
|
|
last if $para =~ m/^=item /;
|
|
}
|
|
} while ( $para );
|
|
|
|
die "No valid specs in $self->{head1}" unless @specs;
|
|
|
|
close $fh;
|
|
return @specs, @rules;
|
|
}
|
|
|
|
sub _parse_specs {
|
|
my ( $self, @specs ) = @_;
|
|
my %disables; # special rule that requires deferred checking
|
|
|
|
foreach my $opt ( @specs ) {
|
|
if ( ref $opt ) { # It's an option spec, not a rule.
|
|
MKDEBUG && _d('Parsing opt spec:',
|
|
map { ($_, '=>', $opt->{$_}) } keys %$opt);
|
|
|
|
my ( $long, $short ) = $opt->{spec} =~ m/^([\w-]+)(?:\|([^!+=]*))?/;
|
|
if ( !$long ) {
|
|
die "Cannot parse long option from spec $opt->{spec}";
|
|
}
|
|
$opt->{long} = $long;
|
|
|
|
die "Duplicate long option --$long" if exists $self->{opts}->{$long};
|
|
$self->{opts}->{$long} = $opt;
|
|
|
|
if ( length $long == 1 ) {
|
|
MKDEBUG && _d('Long opt', $long, 'looks like short opt');
|
|
$self->{short_opts}->{$long} = $long;
|
|
}
|
|
|
|
if ( $short ) {
|
|
die "Duplicate short option -$short"
|
|
if exists $self->{short_opts}->{$short};
|
|
$self->{short_opts}->{$short} = $long;
|
|
$opt->{short} = $short;
|
|
}
|
|
else {
|
|
$opt->{short} = undef;
|
|
}
|
|
|
|
$opt->{is_negatable} = $opt->{spec} =~ m/!/ ? 1 : 0;
|
|
$opt->{is_cumulative} = $opt->{spec} =~ m/\+/ ? 1 : 0;
|
|
$opt->{is_required} = $opt->{desc} =~ m/required/ ? 1 : 0;
|
|
|
|
$opt->{group} ||= 'default';
|
|
$self->{groups}->{ $opt->{group} }->{$long} = 1;
|
|
|
|
$opt->{value} = undef;
|
|
$opt->{got} = 0;
|
|
|
|
my ( $type ) = $opt->{spec} =~ m/=(.)/;
|
|
$opt->{type} = $type;
|
|
MKDEBUG && _d($long, 'type:', $type);
|
|
|
|
|
|
$opt->{spec} =~ s/=./=s/ if ( $type && $type =~ m/[HhAadzm]/ );
|
|
|
|
if ( (my ($def) = $opt->{desc} =~ m/default\b(?: ([^)]+))?/) ) {
|
|
$self->{defaults}->{$long} = defined $def ? $def : 1;
|
|
MKDEBUG && _d($long, 'default:', $def);
|
|
}
|
|
|
|
if ( $long eq 'config' ) {
|
|
$self->{defaults}->{$long} = join(',', $self->get_defaults_files());
|
|
}
|
|
|
|
if ( (my ($dis) = $opt->{desc} =~ m/(disables .*)/) ) {
|
|
$disables{$long} = $dis;
|
|
MKDEBUG && _d('Deferring check of disables rule for', $opt, $dis);
|
|
}
|
|
|
|
$self->{opts}->{$long} = $opt;
|
|
}
|
|
else { # It's an option rule, not a spec.
|
|
MKDEBUG && _d('Parsing rule:', $opt);
|
|
push @{$self->{rules}}, $opt;
|
|
my @participants = $self->_get_participants($opt);
|
|
my $rule_ok = 0;
|
|
|
|
if ( $opt =~ m/mutually exclusive|one and only one/ ) {
|
|
$rule_ok = 1;
|
|
push @{$self->{mutex}}, \@participants;
|
|
MKDEBUG && _d(@participants, 'are mutually exclusive');
|
|
}
|
|
if ( $opt =~ m/at least one|one and only one/ ) {
|
|
$rule_ok = 1;
|
|
push @{$self->{atleast1}}, \@participants;
|
|
MKDEBUG && _d(@participants, 'require at least one');
|
|
}
|
|
if ( $opt =~ m/default to/ ) {
|
|
$rule_ok = 1;
|
|
$self->{defaults_to}->{$participants[0]} = $participants[1];
|
|
MKDEBUG && _d($participants[0], 'defaults to', $participants[1]);
|
|
}
|
|
if ( $opt =~ m/restricted to option groups/ ) {
|
|
$rule_ok = 1;
|
|
my ($groups) = $opt =~ m/groups ([\w\s\,]+)/;
|
|
my @groups = split(',', $groups);
|
|
%{$self->{allowed_groups}->{$participants[0]}} = map {
|
|
s/\s+//;
|
|
$_ => 1;
|
|
} @groups;
|
|
}
|
|
if( $opt =~ m/accepts additional command-line arguments/ ) {
|
|
$rule_ok = 1;
|
|
$self->{strict} = 0;
|
|
MKDEBUG && _d("Strict mode disabled by rule");
|
|
}
|
|
|
|
die "Unrecognized option rule: $opt" unless $rule_ok;
|
|
}
|
|
}
|
|
|
|
foreach my $long ( keys %disables ) {
|
|
my @participants = $self->_get_participants($disables{$long});
|
|
$self->{disables}->{$long} = \@participants;
|
|
MKDEBUG && _d('Option', $long, 'disables', @participants);
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub _get_participants {
|
|
my ( $self, $str ) = @_;
|
|
my @participants;
|
|
foreach my $long ( $str =~ m/--(?:\[no\])?([\w-]+)/g ) {
|
|
die "Option --$long does not exist while processing rule $str"
|
|
unless exists $self->{opts}->{$long};
|
|
push @participants, $long;
|
|
}
|
|
MKDEBUG && _d('Participants for', $str, ':', @participants);
|
|
return @participants;
|
|
}
|
|
|
|
sub opts {
|
|
my ( $self ) = @_;
|
|
my %opts = %{$self->{opts}};
|
|
return %opts;
|
|
}
|
|
|
|
sub short_opts {
|
|
my ( $self ) = @_;
|
|
my %short_opts = %{$self->{short_opts}};
|
|
return %short_opts;
|
|
}
|
|
|
|
sub set_defaults {
|
|
my ( $self, %defaults ) = @_;
|
|
$self->{defaults} = {};
|
|
foreach my $long ( keys %defaults ) {
|
|
die "Cannot set default for nonexistent option $long"
|
|
unless exists $self->{opts}->{$long};
|
|
$self->{defaults}->{$long} = $defaults{$long};
|
|
MKDEBUG && _d('Default val for', $long, ':', $defaults{$long});
|
|
}
|
|
return;
|
|
}
|
|
|
|
sub get_defaults {
|
|
my ( $self ) = @_;
|
|
return $self->{defaults};
|
|
}
|
|
|
|
sub get_groups {
|
|
my ( $self ) = @_;
|
|
return $self->{groups};
|
|
}
|
|
|
|
sub _set_option {
|
|
my ( $self, $opt, $val ) = @_;
|
|
my $long = exists $self->{opts}->{$opt} ? $opt
|
|
: exists $self->{short_opts}->{$opt} ? $self->{short_opts}->{$opt}
|
|
: die "Getopt::Long gave a nonexistent option: $opt";
|
|
|
|
$opt = $self->{opts}->{$long};
|
|
if ( $opt->{is_cumulative} ) {
|
|
$opt->{value}++;
|
|
}
|
|
else {
|
|
$opt->{value} = $val;
|
|
}
|
|
$opt->{got} = 1;
|
|
MKDEBUG && _d('Got option', $long, '=', $val);
|
|
}
|
|
|
|
sub get_opts {
|
|
my ( $self ) = @_;
|
|
|
|
foreach my $long ( keys %{$self->{opts}} ) {
|
|
$self->{opts}->{$long}->{got} = 0;
|
|
$self->{opts}->{$long}->{value}
|
|
= exists $self->{defaults}->{$long} ? $self->{defaults}->{$long}
|
|
: $self->{opts}->{$long}->{is_cumulative} ? 0
|
|
: undef;
|
|
}
|
|
$self->{got_opts} = 0;
|
|
|
|
$self->{errors} = [];
|
|
|
|
if ( @ARGV && $ARGV[0] eq "--config" ) {
|
|
shift @ARGV;
|
|
$self->_set_option('config', shift @ARGV);
|
|
}
|
|
if ( $self->has('config') ) {
|
|
my @extra_args;
|
|
foreach my $filename ( split(',', $self->get('config')) ) {
|
|
eval {
|
|
push @extra_args, $self->_read_config_file($filename);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
if ( $self->got('config') ) {
|
|
die $EVAL_ERROR;
|
|
}
|
|
elsif ( MKDEBUG ) {
|
|
_d($EVAL_ERROR);
|
|
}
|
|
}
|
|
}
|
|
unshift @ARGV, @extra_args;
|
|
}
|
|
|
|
Getopt::Long::Configure('no_ignore_case', 'bundling');
|
|
GetOptions(
|
|
map { $_->{spec} => sub { $self->_set_option(@_); } }
|
|
grep { $_->{long} ne 'config' } # --config is handled specially above.
|
|
values %{$self->{opts}}
|
|
) or $self->save_error('Error parsing options');
|
|
|
|
if ( exists $self->{opts}->{version} && $self->{opts}->{version}->{got} ) {
|
|
if ( $self->{version} ) {
|
|
print $self->{version}, "\n";
|
|
}
|
|
else {
|
|
print "Error parsing version. See the VERSION section of the tool's documentation.\n";
|
|
}
|
|
exit 0;
|
|
}
|
|
|
|
if ( @ARGV && $self->{strict} ) {
|
|
$self->save_error("Unrecognized command-line options @ARGV");
|
|
}
|
|
|
|
foreach my $mutex ( @{$self->{mutex}} ) {
|
|
my @set = grep { $self->{opts}->{$_}->{got} } @$mutex;
|
|
if ( @set > 1 ) {
|
|
my $err = join(', ', map { "--$self->{opts}->{$_}->{long}" }
|
|
@{$mutex}[ 0 .. scalar(@$mutex) - 2] )
|
|
. ' and --'.$self->{opts}->{$mutex->[-1]}->{long}
|
|
. ' are mutually exclusive.';
|
|
$self->save_error($err);
|
|
}
|
|
}
|
|
|
|
foreach my $required ( @{$self->{atleast1}} ) {
|
|
my @set = grep { $self->{opts}->{$_}->{got} } @$required;
|
|
if ( @set == 0 ) {
|
|
my $err = join(', ', map { "--$self->{opts}->{$_}->{long}" }
|
|
@{$required}[ 0 .. scalar(@$required) - 2] )
|
|
.' or --'.$self->{opts}->{$required->[-1]}->{long};
|
|
$self->save_error("Specify at least one of $err");
|
|
}
|
|
}
|
|
|
|
$self->_check_opts( keys %{$self->{opts}} );
|
|
$self->{got_opts} = 1;
|
|
return;
|
|
}
|
|
|
|
sub _check_opts {
|
|
my ( $self, @long ) = @_;
|
|
my $long_last = scalar @long;
|
|
while ( @long ) {
|
|
foreach my $i ( 0..$#long ) {
|
|
my $long = $long[$i];
|
|
next unless $long;
|
|
my $opt = $self->{opts}->{$long};
|
|
if ( $opt->{got} ) {
|
|
if ( exists $self->{disables}->{$long} ) {
|
|
my @disable_opts = @{$self->{disables}->{$long}};
|
|
map { $self->{opts}->{$_}->{value} = undef; } @disable_opts;
|
|
MKDEBUG && _d('Unset options', @disable_opts,
|
|
'because', $long,'disables them');
|
|
}
|
|
|
|
if ( exists $self->{allowed_groups}->{$long} ) {
|
|
|
|
my @restricted_groups = grep {
|
|
!exists $self->{allowed_groups}->{$long}->{$_}
|
|
} keys %{$self->{groups}};
|
|
|
|
my @restricted_opts;
|
|
foreach my $restricted_group ( @restricted_groups ) {
|
|
RESTRICTED_OPT:
|
|
foreach my $restricted_opt (
|
|
keys %{$self->{groups}->{$restricted_group}} )
|
|
{
|
|
next RESTRICTED_OPT if $restricted_opt eq $long;
|
|
push @restricted_opts, $restricted_opt
|
|
if $self->{opts}->{$restricted_opt}->{got};
|
|
}
|
|
}
|
|
|
|
if ( @restricted_opts ) {
|
|
my $err;
|
|
if ( @restricted_opts == 1 ) {
|
|
$err = "--$restricted_opts[0]";
|
|
}
|
|
else {
|
|
$err = join(', ',
|
|
map { "--$self->{opts}->{$_}->{long}" }
|
|
grep { $_ }
|
|
@restricted_opts[0..scalar(@restricted_opts) - 2]
|
|
)
|
|
. ' or --'.$self->{opts}->{$restricted_opts[-1]}->{long};
|
|
}
|
|
$self->save_error("--$long is not allowed with $err");
|
|
}
|
|
}
|
|
|
|
}
|
|
elsif ( $opt->{is_required} ) {
|
|
$self->save_error("Required option --$long must be specified");
|
|
}
|
|
|
|
$self->_validate_type($opt);
|
|
if ( $opt->{parsed} ) {
|
|
delete $long[$i];
|
|
}
|
|
else {
|
|
MKDEBUG && _d('Temporarily failed to parse', $long);
|
|
}
|
|
}
|
|
|
|
die "Failed to parse options, possibly due to circular dependencies"
|
|
if @long == $long_last;
|
|
$long_last = @long;
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub _validate_type {
|
|
my ( $self, $opt ) = @_;
|
|
return unless $opt;
|
|
|
|
if ( !$opt->{type} ) {
|
|
$opt->{parsed} = 1;
|
|
return;
|
|
}
|
|
|
|
my $val = $opt->{value};
|
|
|
|
if ( $val && $opt->{type} eq 'm' ) { # type time
|
|
MKDEBUG && _d('Parsing option', $opt->{long}, 'as a time value');
|
|
my ( $prefix, $num, $suffix ) = $val =~ m/([+-]?)(\d+)([a-z])?$/;
|
|
if ( !$suffix ) {
|
|
my ( $s ) = $opt->{desc} =~ m/\(suffix (.)\)/;
|
|
$suffix = $s || 's';
|
|
MKDEBUG && _d('No suffix given; using', $suffix, 'for',
|
|
$opt->{long}, '(value:', $val, ')');
|
|
}
|
|
if ( $suffix =~ m/[smhd]/ ) {
|
|
$val = $suffix eq 's' ? $num # Seconds
|
|
: $suffix eq 'm' ? $num * 60 # Minutes
|
|
: $suffix eq 'h' ? $num * 3600 # Hours
|
|
: $num * 86400; # Days
|
|
$opt->{value} = ($prefix || '') . $val;
|
|
MKDEBUG && _d('Setting option', $opt->{long}, 'to', $val);
|
|
}
|
|
else {
|
|
$self->save_error("Invalid time suffix for --$opt->{long}");
|
|
}
|
|
}
|
|
elsif ( $val && $opt->{type} eq 'd' ) { # type DSN
|
|
MKDEBUG && _d('Parsing option', $opt->{long}, 'as a DSN');
|
|
my $prev = {};
|
|
my $from_key = $self->{defaults_to}->{ $opt->{long} };
|
|
if ( $from_key ) {
|
|
MKDEBUG && _d($opt->{long}, 'DSN copies from', $from_key, 'DSN');
|
|
if ( $self->{opts}->{$from_key}->{parsed} ) {
|
|
$prev = $self->{opts}->{$from_key}->{value};
|
|
}
|
|
else {
|
|
MKDEBUG && _d('Cannot parse', $opt->{long}, 'until',
|
|
$from_key, 'parsed');
|
|
return;
|
|
}
|
|
}
|
|
my $defaults = $self->{DSNParser}->parse_options($self);
|
|
$opt->{value} = $self->{DSNParser}->parse($val, $prev, $defaults);
|
|
}
|
|
elsif ( $val && $opt->{type} eq 'z' ) { # type size
|
|
MKDEBUG && _d('Parsing option', $opt->{long}, 'as a size value');
|
|
$self->_parse_size($opt, $val);
|
|
}
|
|
elsif ( $opt->{type} eq 'H' || (defined $val && $opt->{type} eq 'h') ) {
|
|
$opt->{value} = { map { $_ => 1 } split(/(?<!\\),\s*/, ($val || '')) };
|
|
}
|
|
elsif ( $opt->{type} eq 'A' || (defined $val && $opt->{type} eq 'a') ) {
|
|
$opt->{value} = [ split(/(?<!\\),\s*/, ($val || '')) ];
|
|
}
|
|
else {
|
|
MKDEBUG && _d('Nothing to validate for option',
|
|
$opt->{long}, 'type', $opt->{type}, 'value', $val);
|
|
}
|
|
|
|
$opt->{parsed} = 1;
|
|
return;
|
|
}
|
|
|
|
sub get {
|
|
my ( $self, $opt ) = @_;
|
|
my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt);
|
|
die "Option $opt does not exist"
|
|
unless $long && exists $self->{opts}->{$long};
|
|
return $self->{opts}->{$long}->{value};
|
|
}
|
|
|
|
sub got {
|
|
my ( $self, $opt ) = @_;
|
|
my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt);
|
|
die "Option $opt does not exist"
|
|
unless $long && exists $self->{opts}->{$long};
|
|
return $self->{opts}->{$long}->{got};
|
|
}
|
|
|
|
sub has {
|
|
my ( $self, $opt ) = @_;
|
|
my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt);
|
|
return defined $long ? exists $self->{opts}->{$long} : 0;
|
|
}
|
|
|
|
sub set {
|
|
my ( $self, $opt, $val ) = @_;
|
|
my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt);
|
|
die "Option $opt does not exist"
|
|
unless $long && exists $self->{opts}->{$long};
|
|
$self->{opts}->{$long}->{value} = $val;
|
|
return;
|
|
}
|
|
|
|
sub save_error {
|
|
my ( $self, $error ) = @_;
|
|
push @{$self->{errors}}, $error;
|
|
return;
|
|
}
|
|
|
|
sub errors {
|
|
my ( $self ) = @_;
|
|
return $self->{errors};
|
|
}
|
|
|
|
sub usage {
|
|
my ( $self ) = @_;
|
|
warn "No usage string is set" unless $self->{usage}; # XXX
|
|
return "Usage: " . ($self->{usage} || '') . "\n";
|
|
}
|
|
|
|
sub descr {
|
|
my ( $self ) = @_;
|
|
warn "No description string is set" unless $self->{description}; # XXX
|
|
my $descr = ($self->{description} || $self->{program_name} || '')
|
|
. " For more details, please use the --help option, "
|
|
. "or try 'perldoc $PROGRAM_NAME' "
|
|
. "for complete documentation.";
|
|
$descr = join("\n", $descr =~ m/(.{0,80})(?:\s+|$)/g)
|
|
unless $ENV{DONT_BREAK_LINES};
|
|
$descr =~ s/ +$//mg;
|
|
return $descr;
|
|
}
|
|
|
|
sub usage_or_errors {
|
|
my ( $self, $file, $return ) = @_;
|
|
$file ||= $self->{file} || __FILE__;
|
|
|
|
if ( !$self->{description} || !$self->{usage} ) {
|
|
MKDEBUG && _d("Getting description and usage from SYNOPSIS in", $file);
|
|
my %synop = $self->_parse_synopsis($file);
|
|
$self->{description} ||= $synop{description};
|
|
$self->{usage} ||= $synop{usage};
|
|
MKDEBUG && _d("Description:", $self->{description},
|
|
"\nUsage:", $self->{usage});
|
|
}
|
|
|
|
if ( $self->{opts}->{help}->{got} ) {
|
|
print $self->print_usage() or die "Cannot print usage: $OS_ERROR";
|
|
exit 0 unless $return;
|
|
}
|
|
elsif ( scalar @{$self->{errors}} ) {
|
|
print $self->print_errors() or die "Cannot print errors: $OS_ERROR";
|
|
exit 0 unless $return;
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub print_errors {
|
|
my ( $self ) = @_;
|
|
my $usage = $self->usage() . "\n";
|
|
if ( (my @errors = @{$self->{errors}}) ) {
|
|
$usage .= join("\n * ", 'Errors in command-line arguments:', @errors)
|
|
. "\n";
|
|
}
|
|
return $usage . "\n" . $self->descr();
|
|
}
|
|
|
|
sub print_usage {
|
|
my ( $self ) = @_;
|
|
die "Run get_opts() before print_usage()" unless $self->{got_opts};
|
|
my @opts = values %{$self->{opts}};
|
|
|
|
my $maxl = max(
|
|
map {
|
|
length($_->{long}) # option long name
|
|
+ ($_->{is_negatable} ? 4 : 0) # "[no]" if opt is negatable
|
|
+ ($_->{type} ? 2 : 0) # "=x" where x is the opt type
|
|
}
|
|
@opts);
|
|
|
|
my $maxs = max(0,
|
|
map {
|
|
length($_)
|
|
+ ($self->{opts}->{$_}->{is_negatable} ? 4 : 0)
|
|
+ ($self->{opts}->{$_}->{type} ? 2 : 0)
|
|
}
|
|
values %{$self->{short_opts}});
|
|
|
|
my $lcol = max($maxl, ($maxs + 3));
|
|
my $rcol = 80 - $lcol - 6;
|
|
my $rpad = ' ' x ( 80 - $rcol );
|
|
|
|
$maxs = max($lcol - 3, $maxs);
|
|
|
|
my $usage = $self->descr() . "\n" . $self->usage();
|
|
|
|
my @groups = reverse sort grep { $_ ne 'default'; } keys %{$self->{groups}};
|
|
push @groups, 'default';
|
|
|
|
foreach my $group ( reverse @groups ) {
|
|
$usage .= "\n".($group eq 'default' ? 'Options' : $group).":\n\n";
|
|
foreach my $opt (
|
|
sort { $a->{long} cmp $b->{long} }
|
|
grep { $_->{group} eq $group }
|
|
@opts )
|
|
{
|
|
my $long = $opt->{is_negatable} ? "[no]$opt->{long}" : $opt->{long};
|
|
my $short = $opt->{short};
|
|
my $desc = $opt->{desc};
|
|
|
|
$long .= $opt->{type} ? "=$opt->{type}" : "";
|
|
|
|
if ( $opt->{type} && $opt->{type} eq 'm' ) {
|
|
my ($s) = $desc =~ m/\(suffix (.)\)/;
|
|
$s ||= 's';
|
|
$desc =~ s/\s+\(suffix .\)//;
|
|
$desc .= ". Optional suffix s=seconds, m=minutes, h=hours, "
|
|
. "d=days; if no suffix, $s is used.";
|
|
}
|
|
$desc = join("\n$rpad", grep { $_ } $desc =~ m/(.{0,$rcol})(?:\s+|$)/g);
|
|
$desc =~ s/ +$//mg;
|
|
if ( $short ) {
|
|
$usage .= sprintf(" --%-${maxs}s -%s %s\n", $long, $short, $desc);
|
|
}
|
|
else {
|
|
$usage .= sprintf(" --%-${lcol}s %s\n", $long, $desc);
|
|
}
|
|
}
|
|
}
|
|
|
|
$usage .= "\nOption types: s=string, i=integer, f=float, h/H/a/A=comma-separated list, d=DSN, z=size, m=time\n";
|
|
|
|
if ( (my @rules = @{$self->{rules}}) ) {
|
|
$usage .= "\nRules:\n\n";
|
|
$usage .= join("\n", map { " $_" } @rules) . "\n";
|
|
}
|
|
if ( $self->{DSNParser} ) {
|
|
$usage .= "\n" . $self->{DSNParser}->usage();
|
|
}
|
|
$usage .= "\nOptions and values after processing arguments:\n\n";
|
|
foreach my $opt ( sort { $a->{long} cmp $b->{long} } @opts ) {
|
|
my $val = $opt->{value};
|
|
my $type = $opt->{type} || '';
|
|
my $bool = $opt->{spec} =~ m/^[\w-]+(?:\|[\w-])?!?$/;
|
|
$val = $bool ? ( $val ? 'TRUE' : 'FALSE' )
|
|
: !defined $val ? '(No value)'
|
|
: $type eq 'd' ? $self->{DSNParser}->as_string($val)
|
|
: $type =~ m/H|h/ ? join(',', sort keys %$val)
|
|
: $type =~ m/A|a/ ? join(',', @$val)
|
|
: $val;
|
|
$usage .= sprintf(" --%-${lcol}s %s\n", $opt->{long}, $val);
|
|
}
|
|
return $usage;
|
|
}
|
|
|
|
sub prompt_noecho {
|
|
shift @_ if ref $_[0] eq __PACKAGE__;
|
|
my ( $prompt ) = @_;
|
|
local $OUTPUT_AUTOFLUSH = 1;
|
|
print $prompt
|
|
or die "Cannot print: $OS_ERROR";
|
|
my $response;
|
|
eval {
|
|
require Term::ReadKey;
|
|
Term::ReadKey::ReadMode('noecho');
|
|
chomp($response = <STDIN>);
|
|
Term::ReadKey::ReadMode('normal');
|
|
print "\n"
|
|
or die "Cannot print: $OS_ERROR";
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
die "Cannot read response; is Term::ReadKey installed? $EVAL_ERROR";
|
|
}
|
|
return $response;
|
|
}
|
|
|
|
sub _read_config_file {
|
|
my ( $self, $filename ) = @_;
|
|
open my $fh, "<", $filename or die "Cannot open $filename: $OS_ERROR\n";
|
|
my @args;
|
|
my $prefix = '--';
|
|
my $parse = 1;
|
|
|
|
LINE:
|
|
while ( my $line = <$fh> ) {
|
|
chomp $line;
|
|
next LINE if $line =~ m/^\s*(?:\#|\;|$)/;
|
|
$line =~ s/\s+#.*$//g;
|
|
$line =~ s/^\s+|\s+$//g;
|
|
if ( $line eq '--' ) {
|
|
$prefix = '';
|
|
$parse = 0;
|
|
next LINE;
|
|
}
|
|
if ( $parse
|
|
&& (my($opt, $arg) = $line =~ m/^\s*([^=\s]+?)(?:\s*=\s*(.*?)\s*)?$/)
|
|
) {
|
|
push @args, grep { defined $_ } ("$prefix$opt", $arg);
|
|
}
|
|
elsif ( $line =~ m/./ ) {
|
|
push @args, $line;
|
|
}
|
|
else {
|
|
die "Syntax error in file $filename at line $INPUT_LINE_NUMBER";
|
|
}
|
|
}
|
|
close $fh;
|
|
return @args;
|
|
}
|
|
|
|
sub read_para_after {
|
|
my ( $self, $file, $regex ) = @_;
|
|
open my $fh, "<", $file or die "Can't open $file: $OS_ERROR";
|
|
local $INPUT_RECORD_SEPARATOR = '';
|
|
my $para;
|
|
while ( $para = <$fh> ) {
|
|
next unless $para =~ m/^=pod$/m;
|
|
last;
|
|
}
|
|
while ( $para = <$fh> ) {
|
|
next unless $para =~ m/$regex/;
|
|
last;
|
|
}
|
|
$para = <$fh>;
|
|
chomp($para);
|
|
close $fh or die "Can't close $file: $OS_ERROR";
|
|
return $para;
|
|
}
|
|
|
|
sub clone {
|
|
my ( $self ) = @_;
|
|
|
|
my %clone = map {
|
|
my $hashref = $self->{$_};
|
|
my $val_copy = {};
|
|
foreach my $key ( keys %$hashref ) {
|
|
my $ref = ref $hashref->{$key};
|
|
$val_copy->{$key} = !$ref ? $hashref->{$key}
|
|
: $ref eq 'HASH' ? { %{$hashref->{$key}} }
|
|
: $ref eq 'ARRAY' ? [ @{$hashref->{$key}} ]
|
|
: $hashref->{$key};
|
|
}
|
|
$_ => $val_copy;
|
|
} qw(opts short_opts defaults);
|
|
|
|
foreach my $scalar ( qw(got_opts) ) {
|
|
$clone{$scalar} = $self->{$scalar};
|
|
}
|
|
|
|
return bless \%clone;
|
|
}
|
|
|
|
sub _parse_size {
|
|
my ( $self, $opt, $val ) = @_;
|
|
|
|
if ( lc($val || '') eq 'null' ) {
|
|
MKDEBUG && _d('NULL size for', $opt->{long});
|
|
$opt->{value} = 'null';
|
|
return;
|
|
}
|
|
|
|
my %factor_for = (k => 1_024, M => 1_048_576, G => 1_073_741_824);
|
|
my ($pre, $num, $factor) = $val =~ m/^([+-])?(\d+)([kMG])?$/;
|
|
if ( defined $num ) {
|
|
if ( $factor ) {
|
|
$num *= $factor_for{$factor};
|
|
MKDEBUG && _d('Setting option', $opt->{y},
|
|
'to num', $num, '* factor', $factor);
|
|
}
|
|
$opt->{value} = ($pre || '') . $num;
|
|
}
|
|
else {
|
|
$self->save_error("Invalid size for --$opt->{long}");
|
|
}
|
|
return;
|
|
}
|
|
|
|
sub _parse_attribs {
|
|
my ( $self, $option, $attribs ) = @_;
|
|
my $types = $self->{types};
|
|
return $option
|
|
. ($attribs->{'short form'} ? '|' . $attribs->{'short form'} : '' )
|
|
. ($attribs->{'negatable'} ? '!' : '' )
|
|
. ($attribs->{'cumulative'} ? '+' : '' )
|
|
. ($attribs->{'type'} ? '=' . $types->{$attribs->{type}} : '' );
|
|
}
|
|
|
|
sub _parse_synopsis {
|
|
my ( $self, $file ) = @_;
|
|
$file ||= $self->{file} || __FILE__;
|
|
MKDEBUG && _d("Parsing SYNOPSIS in", $file);
|
|
|
|
local $INPUT_RECORD_SEPARATOR = ''; # read paragraphs
|
|
open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR";
|
|
my $para;
|
|
1 while defined($para = <$fh>) && $para !~ m/^=head1 SYNOPSIS/;
|
|
die "$file does not contain a SYNOPSIS section" unless $para;
|
|
my @synop;
|
|
for ( 1..2 ) { # 1 for the usage, 2 for the description
|
|
my $para = <$fh>;
|
|
push @synop, $para;
|
|
}
|
|
close $fh;
|
|
MKDEBUG && _d("Raw SYNOPSIS text:", @synop);
|
|
my ($usage, $desc) = @synop;
|
|
die "The SYNOPSIS section in $file is not formatted properly"
|
|
unless $usage && $desc;
|
|
|
|
$usage =~ s/^\s*Usage:\s+(.+)/$1/;
|
|
chomp $usage;
|
|
|
|
$desc =~ s/\n/ /g;
|
|
$desc =~ s/\s{2,}/ /g;
|
|
$desc =~ s/\. ([A-Z][a-z])/. $1/g;
|
|
$desc =~ s/\s+$//;
|
|
|
|
return (
|
|
description => $desc,
|
|
usage => $usage,
|
|
);
|
|
};
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
if ( MKDEBUG ) {
|
|
print '# ', $^X, ' ', $], "\n";
|
|
if ( my $uname = `uname -a` ) {
|
|
$uname =~ s/\s+/ /g;
|
|
print "# $uname\n";
|
|
}
|
|
print '# Arguments: ',
|
|
join(' ', map { my $a = "_[$_]_"; $a =~ s/\n/\n# /g; $a; } @ARGV), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End OptionParser package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# DSNParser package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/DSNParser.pm
|
|
# t/lib/DSNParser.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package DSNParser;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant MKDEBUG => $ENV{MKDEBUG} || 0;
|
|
|
|
use Data::Dumper;
|
|
$Data::Dumper::Indent = 0;
|
|
$Data::Dumper::Quotekeys = 0;
|
|
|
|
eval {
|
|
require DBI;
|
|
};
|
|
my $have_dbi = $EVAL_ERROR ? 0 : 1;
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
foreach my $arg ( qw(opts) ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my $self = {
|
|
opts => {} # h, P, u, etc. Should come from DSN OPTIONS section in POD.
|
|
};
|
|
foreach my $opt ( @{$args{opts}} ) {
|
|
if ( !$opt->{key} || !$opt->{desc} ) {
|
|
die "Invalid DSN option: ", Dumper($opt);
|
|
}
|
|
MKDEBUG && _d('DSN option:',
|
|
join(', ',
|
|
map { "$_=" . (defined $opt->{$_} ? ($opt->{$_} || '') : 'undef') }
|
|
keys %$opt
|
|
)
|
|
);
|
|
$self->{opts}->{$opt->{key}} = {
|
|
dsn => $opt->{dsn},
|
|
desc => $opt->{desc},
|
|
copy => $opt->{copy} || 0,
|
|
};
|
|
}
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub prop {
|
|
my ( $self, $prop, $value ) = @_;
|
|
if ( @_ > 2 ) {
|
|
MKDEBUG && _d('Setting', $prop, 'property');
|
|
$self->{$prop} = $value;
|
|
}
|
|
return $self->{$prop};
|
|
}
|
|
|
|
sub parse {
|
|
my ( $self, $dsn, $prev, $defaults ) = @_;
|
|
if ( !$dsn ) {
|
|
MKDEBUG && _d('No DSN to parse');
|
|
return;
|
|
}
|
|
MKDEBUG && _d('Parsing', $dsn);
|
|
$prev ||= {};
|
|
$defaults ||= {};
|
|
my %given_props;
|
|
my %final_props;
|
|
my $opts = $self->{opts};
|
|
|
|
foreach my $dsn_part ( split(/,/, $dsn) ) {
|
|
if ( my ($prop_key, $prop_val) = $dsn_part =~ m/^(.)=(.*)$/ ) {
|
|
$given_props{$prop_key} = $prop_val;
|
|
}
|
|
else {
|
|
MKDEBUG && _d('Interpreting', $dsn_part, 'as h=', $dsn_part);
|
|
$given_props{h} = $dsn_part;
|
|
}
|
|
}
|
|
|
|
foreach my $key ( keys %$opts ) {
|
|
MKDEBUG && _d('Finding value for', $key);
|
|
$final_props{$key} = $given_props{$key};
|
|
if ( !defined $final_props{$key}
|
|
&& defined $prev->{$key} && $opts->{$key}->{copy} )
|
|
{
|
|
$final_props{$key} = $prev->{$key};
|
|
MKDEBUG && _d('Copying value for', $key, 'from previous DSN');
|
|
}
|
|
if ( !defined $final_props{$key} ) {
|
|
$final_props{$key} = $defaults->{$key};
|
|
MKDEBUG && _d('Copying value for', $key, 'from defaults');
|
|
}
|
|
}
|
|
|
|
foreach my $key ( keys %given_props ) {
|
|
die "Unknown DSN option '$key' in '$dsn'. For more details, "
|
|
. "please use the --help option, or try 'perldoc $PROGRAM_NAME' "
|
|
. "for complete documentation."
|
|
unless exists $opts->{$key};
|
|
}
|
|
if ( (my $required = $self->prop('required')) ) {
|
|
foreach my $key ( keys %$required ) {
|
|
die "Missing required DSN option '$key' in '$dsn'. For more details, "
|
|
. "please use the --help option, or try 'perldoc $PROGRAM_NAME' "
|
|
. "for complete documentation."
|
|
unless $final_props{$key};
|
|
}
|
|
}
|
|
|
|
return \%final_props;
|
|
}
|
|
|
|
sub parse_options {
|
|
my ( $self, $o ) = @_;
|
|
die 'I need an OptionParser object' unless ref $o eq 'OptionParser';
|
|
my $dsn_string
|
|
= join(',',
|
|
map { "$_=".$o->get($_); }
|
|
grep { $o->has($_) && $o->get($_) }
|
|
keys %{$self->{opts}}
|
|
);
|
|
MKDEBUG && _d('DSN string made from options:', $dsn_string);
|
|
return $self->parse($dsn_string);
|
|
}
|
|
|
|
sub as_string {
|
|
my ( $self, $dsn, $props ) = @_;
|
|
return $dsn unless ref $dsn;
|
|
my %allowed = $props ? map { $_=>1 } @$props : ();
|
|
return join(',',
|
|
map { "$_=" . ($_ eq 'p' ? '...' : $dsn->{$_}) }
|
|
grep { defined $dsn->{$_} && $self->{opts}->{$_} }
|
|
grep { !$props || $allowed{$_} }
|
|
sort keys %$dsn );
|
|
}
|
|
|
|
sub usage {
|
|
my ( $self ) = @_;
|
|
my $usage
|
|
= "DSN syntax is key=value[,key=value...] Allowable DSN keys:\n\n"
|
|
. " KEY COPY MEANING\n"
|
|
. " === ==== =============================================\n";
|
|
my %opts = %{$self->{opts}};
|
|
foreach my $key ( sort keys %opts ) {
|
|
$usage .= " $key "
|
|
. ($opts{$key}->{copy} ? 'yes ' : 'no ')
|
|
. ($opts{$key}->{desc} || '[No description]')
|
|
. "\n";
|
|
}
|
|
$usage .= "\n If the DSN is a bareword, the word is treated as the 'h' key.\n";
|
|
return $usage;
|
|
}
|
|
|
|
sub get_cxn_params {
|
|
my ( $self, $info ) = @_;
|
|
my $dsn;
|
|
my %opts = %{$self->{opts}};
|
|
my $driver = $self->prop('dbidriver') || '';
|
|
if ( $driver eq 'Pg' ) {
|
|
$dsn = 'DBI:Pg:dbname=' . ( $info->{D} || '' ) . ';'
|
|
. join(';', map { "$opts{$_}->{dsn}=$info->{$_}" }
|
|
grep { defined $info->{$_} }
|
|
qw(h P));
|
|
}
|
|
else {
|
|
$dsn = 'DBI:mysql:' . ( $info->{D} || '' ) . ';'
|
|
. join(';', map { "$opts{$_}->{dsn}=$info->{$_}" }
|
|
grep { defined $info->{$_} }
|
|
qw(F h P S A))
|
|
. ';mysql_read_default_group=client';
|
|
}
|
|
MKDEBUG && _d($dsn);
|
|
return ($dsn, $info->{u}, $info->{p});
|
|
}
|
|
|
|
sub fill_in_dsn {
|
|
my ( $self, $dbh, $dsn ) = @_;
|
|
my $vars = $dbh->selectall_hashref('SHOW VARIABLES', 'Variable_name');
|
|
my ($user, $db) = $dbh->selectrow_array('SELECT USER(), DATABASE()');
|
|
$user =~ s/@.*//;
|
|
$dsn->{h} ||= $vars->{hostname}->{Value};
|
|
$dsn->{S} ||= $vars->{'socket'}->{Value};
|
|
$dsn->{P} ||= $vars->{port}->{Value};
|
|
$dsn->{u} ||= $user;
|
|
$dsn->{D} ||= $db;
|
|
}
|
|
|
|
sub get_dbh {
|
|
my ( $self, $cxn_string, $user, $pass, $opts ) = @_;
|
|
$opts ||= {};
|
|
my $defaults = {
|
|
AutoCommit => 0,
|
|
RaiseError => 1,
|
|
PrintError => 0,
|
|
ShowErrorStatement => 1,
|
|
mysql_enable_utf8 => ($cxn_string =~ m/charset=utf8/i ? 1 : 0),
|
|
};
|
|
@{$defaults}{ keys %$opts } = values %$opts;
|
|
|
|
if ( $opts->{mysql_use_result} ) {
|
|
$defaults->{mysql_use_result} = 1;
|
|
}
|
|
|
|
if ( !$have_dbi ) {
|
|
die "Cannot connect to MySQL because the Perl DBI module is not "
|
|
. "installed or not found. Run 'perl -MDBI' to see the directories "
|
|
. "that Perl searches for DBI. If DBI is not installed, try:\n"
|
|
. " Debian/Ubuntu apt-get install libdbi-perl\n"
|
|
. " RHEL/CentOS yum install perl-DBI\n"
|
|
. " OpenSolaris pgk install pkg:/SUNWpmdbi\n";
|
|
|
|
}
|
|
|
|
my $dbh;
|
|
my $tries = 2;
|
|
while ( !$dbh && $tries-- ) {
|
|
MKDEBUG && _d($cxn_string, ' ', $user, ' ', $pass,
|
|
join(', ', map { "$_=>$defaults->{$_}" } keys %$defaults ));
|
|
|
|
eval {
|
|
$dbh = DBI->connect($cxn_string, $user, $pass, $defaults);
|
|
|
|
if ( $cxn_string =~ m/mysql/i ) {
|
|
my $sql;
|
|
|
|
$sql = 'SELECT @@SQL_MODE';
|
|
MKDEBUG && _d($dbh, $sql);
|
|
my ($sql_mode) = $dbh->selectrow_array($sql);
|
|
|
|
$sql = 'SET @@SQL_QUOTE_SHOW_CREATE = 1'
|
|
. '/*!40101, @@SQL_MODE=\'NO_AUTO_VALUE_ON_ZERO'
|
|
. ($sql_mode ? ",$sql_mode" : '')
|
|
. '\'*/';
|
|
MKDEBUG && _d($dbh, $sql);
|
|
$dbh->do($sql);
|
|
|
|
if ( my ($charset) = $cxn_string =~ m/charset=(\w+)/ ) {
|
|
$sql = "/*!40101 SET NAMES $charset*/";
|
|
MKDEBUG && _d($dbh, ':', $sql);
|
|
$dbh->do($sql);
|
|
MKDEBUG && _d('Enabling charset for STDOUT');
|
|
if ( $charset eq 'utf8' ) {
|
|
binmode(STDOUT, ':utf8')
|
|
or die "Can't binmode(STDOUT, ':utf8'): $OS_ERROR";
|
|
}
|
|
else {
|
|
binmode(STDOUT) or die "Can't binmode(STDOUT): $OS_ERROR";
|
|
}
|
|
}
|
|
|
|
if ( $self->prop('set-vars') ) {
|
|
$sql = "SET " . $self->prop('set-vars');
|
|
MKDEBUG && _d($dbh, ':', $sql);
|
|
$dbh->do($sql);
|
|
}
|
|
}
|
|
};
|
|
if ( !$dbh && $EVAL_ERROR ) {
|
|
MKDEBUG && _d($EVAL_ERROR);
|
|
if ( $EVAL_ERROR =~ m/not a compiled character set|character set utf8/ ) {
|
|
MKDEBUG && _d('Going to try again without utf8 support');
|
|
delete $defaults->{mysql_enable_utf8};
|
|
}
|
|
elsif ( $EVAL_ERROR =~ m/locate DBD\/mysql/i ) {
|
|
die "Cannot connect to MySQL because the Perl DBD::mysql module is "
|
|
. "not installed or not found. Run 'perl -MDBD::mysql' to see "
|
|
. "the directories that Perl searches for DBD::mysql. If "
|
|
. "DBD::mysql is not installed, try:\n"
|
|
. " Debian/Ubuntu apt-get install libdbd-mysql-perl\n"
|
|
. " RHEL/CentOS yum install perl-DBD-MySQL\n"
|
|
. " OpenSolaris pgk install pkg:/SUNWapu13dbd-mysql\n";
|
|
}
|
|
if ( !$tries ) {
|
|
die $EVAL_ERROR;
|
|
}
|
|
}
|
|
}
|
|
|
|
MKDEBUG && _d('DBH info: ',
|
|
$dbh,
|
|
Dumper($dbh->selectrow_hashref(
|
|
'SELECT DATABASE(), CONNECTION_ID(), VERSION()/*!50038 , @@hostname*/')),
|
|
'Connection info:', $dbh->{mysql_hostinfo},
|
|
'Character set info:', Dumper($dbh->selectall_arrayref(
|
|
'SHOW VARIABLES LIKE "character_set%"', { Slice => {}})),
|
|
'$DBD::mysql::VERSION:', $DBD::mysql::VERSION,
|
|
'$DBI::VERSION:', $DBI::VERSION,
|
|
);
|
|
|
|
return $dbh;
|
|
}
|
|
|
|
sub get_hostname {
|
|
my ( $self, $dbh ) = @_;
|
|
if ( my ($host) = ($dbh->{mysql_hostinfo} || '') =~ m/^(\w+) via/ ) {
|
|
return $host;
|
|
}
|
|
my ( $hostname, $one ) = $dbh->selectrow_array(
|
|
'SELECT /*!50038 @@hostname, */ 1');
|
|
return $hostname;
|
|
}
|
|
|
|
sub disconnect {
|
|
my ( $self, $dbh ) = @_;
|
|
MKDEBUG && $self->print_active_handles($dbh);
|
|
$dbh->disconnect;
|
|
}
|
|
|
|
sub print_active_handles {
|
|
my ( $self, $thing, $level ) = @_;
|
|
$level ||= 0;
|
|
printf("# Active %sh: %s %s %s\n", ($thing->{Type} || 'undef'), "\t" x $level,
|
|
$thing, (($thing->{Type} || '') eq 'st' ? $thing->{Statement} || '' : ''))
|
|
or die "Cannot print: $OS_ERROR";
|
|
foreach my $handle ( grep {defined} @{ $thing->{ChildHandles} } ) {
|
|
$self->print_active_handles( $handle, $level + 1 );
|
|
}
|
|
}
|
|
|
|
sub copy {
|
|
my ( $self, $dsn_1, $dsn_2, %args ) = @_;
|
|
die 'I need a dsn_1 argument' unless $dsn_1;
|
|
die 'I need a dsn_2 argument' unless $dsn_2;
|
|
my %new_dsn = map {
|
|
my $key = $_;
|
|
my $val;
|
|
if ( $args{overwrite} ) {
|
|
$val = defined $dsn_1->{$key} ? $dsn_1->{$key} : $dsn_2->{$key};
|
|
}
|
|
else {
|
|
$val = defined $dsn_2->{$key} ? $dsn_2->{$key} : $dsn_1->{$key};
|
|
}
|
|
$key => $val;
|
|
} keys %{$self->{opts}};
|
|
return \%new_dsn;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End DSNParser package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# Daemon package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/Daemon.pm
|
|
# t/lib/Daemon.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package Daemon;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant MKDEBUG => $ENV{MKDEBUG} || 0;
|
|
|
|
use POSIX qw(setsid);
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
foreach my $arg ( qw(o) ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my $o = $args{o};
|
|
my $self = {
|
|
o => $o,
|
|
log_file => $o->has('log') ? $o->get('log') : undef,
|
|
PID_file => $o->has('pid') ? $o->get('pid') : undef,
|
|
};
|
|
|
|
check_PID_file(undef, $self->{PID_file});
|
|
|
|
MKDEBUG && _d('Daemonized child will log to', $self->{log_file});
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub daemonize {
|
|
my ( $self ) = @_;
|
|
|
|
MKDEBUG && _d('About to fork and daemonize');
|
|
defined (my $pid = fork()) or die "Cannot fork: $OS_ERROR";
|
|
if ( $pid ) {
|
|
MKDEBUG && _d('I am the parent and now I die');
|
|
exit;
|
|
}
|
|
|
|
$self->{PID_owner} = $PID;
|
|
$self->{child} = 1;
|
|
|
|
POSIX::setsid() or die "Cannot start a new session: $OS_ERROR";
|
|
chdir '/' or die "Cannot chdir to /: $OS_ERROR";
|
|
|
|
$self->_make_PID_file();
|
|
|
|
$OUTPUT_AUTOFLUSH = 1;
|
|
|
|
if ( -t STDIN ) {
|
|
close STDIN;
|
|
open STDIN, '/dev/null'
|
|
or die "Cannot reopen STDIN to /dev/null: $OS_ERROR";
|
|
}
|
|
|
|
if ( $self->{log_file} ) {
|
|
close STDOUT;
|
|
open STDOUT, '>>', $self->{log_file}
|
|
or die "Cannot open log file $self->{log_file}: $OS_ERROR";
|
|
|
|
close STDERR;
|
|
open STDERR, ">&STDOUT"
|
|
or die "Cannot dupe STDERR to STDOUT: $OS_ERROR";
|
|
}
|
|
else {
|
|
if ( -t STDOUT ) {
|
|
close STDOUT;
|
|
open STDOUT, '>', '/dev/null'
|
|
or die "Cannot reopen STDOUT to /dev/null: $OS_ERROR";
|
|
}
|
|
if ( -t STDERR ) {
|
|
close STDERR;
|
|
open STDERR, '>', '/dev/null'
|
|
or die "Cannot reopen STDERR to /dev/null: $OS_ERROR";
|
|
}
|
|
}
|
|
|
|
MKDEBUG && _d('I am the child and now I live daemonized');
|
|
return;
|
|
}
|
|
|
|
sub check_PID_file {
|
|
my ( $self, $file ) = @_;
|
|
my $PID_file = $self ? $self->{PID_file} : $file;
|
|
MKDEBUG && _d('Checking PID file', $PID_file);
|
|
if ( $PID_file && -f $PID_file ) {
|
|
my $pid;
|
|
eval { chomp($pid = `cat $PID_file`); };
|
|
die "Cannot cat $PID_file: $OS_ERROR" if $EVAL_ERROR;
|
|
MKDEBUG && _d('PID file exists; it contains PID', $pid);
|
|
if ( $pid ) {
|
|
my $pid_is_alive = kill 0, $pid;
|
|
if ( $pid_is_alive ) {
|
|
die "The PID file $PID_file already exists "
|
|
. " and the PID that it contains, $pid, is running";
|
|
}
|
|
else {
|
|
warn "Overwriting PID file $PID_file because the PID that it "
|
|
. "contains, $pid, is not running";
|
|
}
|
|
}
|
|
else {
|
|
die "The PID file $PID_file already exists but it does not "
|
|
. "contain a PID";
|
|
}
|
|
}
|
|
else {
|
|
MKDEBUG && _d('No PID file');
|
|
}
|
|
return;
|
|
}
|
|
|
|
sub make_PID_file {
|
|
my ( $self ) = @_;
|
|
if ( exists $self->{child} ) {
|
|
die "Do not call Daemon::make_PID_file() for daemonized scripts";
|
|
}
|
|
$self->_make_PID_file();
|
|
$self->{PID_owner} = $PID;
|
|
return;
|
|
}
|
|
|
|
sub _make_PID_file {
|
|
my ( $self ) = @_;
|
|
|
|
my $PID_file = $self->{PID_file};
|
|
if ( !$PID_file ) {
|
|
MKDEBUG && _d('No PID file to create');
|
|
return;
|
|
}
|
|
|
|
$self->check_PID_file();
|
|
|
|
open my $PID_FH, '>', $PID_file
|
|
or die "Cannot open PID file $PID_file: $OS_ERROR";
|
|
print $PID_FH $PID
|
|
or die "Cannot print to PID file $PID_file: $OS_ERROR";
|
|
close $PID_FH
|
|
or die "Cannot close PID file $PID_file: $OS_ERROR";
|
|
|
|
MKDEBUG && _d('Created PID file:', $self->{PID_file});
|
|
return;
|
|
}
|
|
|
|
sub _remove_PID_file {
|
|
my ( $self ) = @_;
|
|
if ( $self->{PID_file} && -f $self->{PID_file} ) {
|
|
unlink $self->{PID_file}
|
|
or warn "Cannot remove PID file $self->{PID_file}: $OS_ERROR";
|
|
MKDEBUG && _d('Removed PID file');
|
|
}
|
|
else {
|
|
MKDEBUG && _d('No PID to remove');
|
|
}
|
|
return;
|
|
}
|
|
|
|
sub DESTROY {
|
|
my ( $self ) = @_;
|
|
|
|
$self->_remove_PID_file() if ($self->{PID_owner} || 0) == $PID;
|
|
|
|
return;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End Daemon package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# Quoter package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/Quoter.pm
|
|
# t/lib/Quoter.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package Quoter;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant MKDEBUG => $ENV{MKDEBUG} || 0;
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
return bless {}, $class;
|
|
}
|
|
|
|
sub quote {
|
|
my ( $self, @vals ) = @_;
|
|
foreach my $val ( @vals ) {
|
|
$val =~ s/`/``/g;
|
|
}
|
|
return join('.', map { '`' . $_ . '`' } @vals);
|
|
}
|
|
|
|
sub quote_val {
|
|
my ( $self, $val ) = @_;
|
|
|
|
return 'NULL' unless defined $val; # undef = NULL
|
|
return "''" if $val eq ''; # blank string = ''
|
|
return $val if $val =~ m/^0x[0-9a-fA-F]+$/; # hex data
|
|
|
|
$val =~ s/(['\\])/\\$1/g;
|
|
return "'$val'";
|
|
}
|
|
|
|
sub split_unquote {
|
|
my ( $self, $db_tbl, $default_db ) = @_;
|
|
$db_tbl =~ s/`//g;
|
|
my ( $db, $tbl ) = split(/[.]/, $db_tbl);
|
|
if ( !$tbl ) {
|
|
$tbl = $db;
|
|
$db = $default_db;
|
|
}
|
|
return ($db, $tbl);
|
|
}
|
|
|
|
sub literal_like {
|
|
my ( $self, $like ) = @_;
|
|
return unless $like;
|
|
$like =~ s/([%_])/\\$1/g;
|
|
return "'$like'";
|
|
}
|
|
|
|
sub join_quote {
|
|
my ( $self, $default_db, $db_tbl ) = @_;
|
|
return unless $db_tbl;
|
|
my ($db, $tbl) = split(/[.]/, $db_tbl);
|
|
if ( !$tbl ) {
|
|
$tbl = $db;
|
|
$db = $default_db;
|
|
}
|
|
$db = "`$db`" if $db && $db !~ m/^`/;
|
|
$tbl = "`$tbl`" if $tbl && $tbl !~ m/^`/;
|
|
return $db ? "$db.$tbl" : $tbl;
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End Quoter package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# VersionParser package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/VersionParser.pm
|
|
# t/lib/VersionParser.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package VersionParser;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant MKDEBUG => $ENV{MKDEBUG} || 0;
|
|
|
|
sub new {
|
|
my ( $class ) = @_;
|
|
bless {}, $class;
|
|
}
|
|
|
|
sub parse {
|
|
my ( $self, $str ) = @_;
|
|
my $result = sprintf('%03d%03d%03d', $str =~ m/(\d+)/g);
|
|
MKDEBUG && _d($str, 'parses to', $result);
|
|
return $result;
|
|
}
|
|
|
|
sub version_ge {
|
|
my ( $self, $dbh, $target ) = @_;
|
|
if ( !$self->{$dbh} ) {
|
|
$self->{$dbh} = $self->parse(
|
|
$dbh->selectrow_array('SELECT VERSION()'));
|
|
}
|
|
my $result = $self->{$dbh} ge $self->parse($target) ? 1 : 0;
|
|
MKDEBUG && _d($self->{$dbh}, 'ge', $target, ':', $result);
|
|
return $result;
|
|
}
|
|
|
|
sub innodb_version {
|
|
my ( $self, $dbh ) = @_;
|
|
return unless $dbh;
|
|
my $innodb_version = "NO";
|
|
|
|
my ($innodb) =
|
|
grep { $_->{engine} =~ m/InnoDB/i }
|
|
map {
|
|
my %hash;
|
|
@hash{ map { lc $_ } keys %$_ } = values %$_;
|
|
\%hash;
|
|
}
|
|
@{ $dbh->selectall_arrayref("SHOW ENGINES", {Slice=>{}}) };
|
|
if ( $innodb ) {
|
|
MKDEBUG && _d("InnoDB support:", $innodb->{support});
|
|
if ( $innodb->{support} =~ m/YES|DEFAULT/i ) {
|
|
my $vars = $dbh->selectrow_hashref(
|
|
"SHOW VARIABLES LIKE 'innodb_version'");
|
|
$innodb_version = !$vars ? "BUILTIN"
|
|
: ($vars->{Value} || $vars->{value});
|
|
}
|
|
else {
|
|
$innodb_version = $innodb->{support}; # probably DISABLED or NO
|
|
}
|
|
}
|
|
|
|
MKDEBUG && _d("InnoDB version:", $innodb_version);
|
|
return $innodb_version;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End VersionParser package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# TableParser package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/TableParser.pm
|
|
# t/lib/TableParser.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package TableParser;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant MKDEBUG => $ENV{MKDEBUG} || 0;
|
|
|
|
use Data::Dumper;
|
|
$Data::Dumper::Indent = 1;
|
|
$Data::Dumper::Sortkeys = 1;
|
|
$Data::Dumper::Quotekeys = 0;
|
|
|
|
sub new {
|
|
my ( $class, %args ) = @_;
|
|
my @required_args = qw(Quoter);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my $self = { %args };
|
|
return bless $self, $class;
|
|
}
|
|
|
|
sub parse {
|
|
my ( $self, $ddl, $opts ) = @_;
|
|
return unless $ddl;
|
|
if ( ref $ddl eq 'ARRAY' ) {
|
|
if ( lc $ddl->[0] eq 'table' ) {
|
|
$ddl = $ddl->[1];
|
|
}
|
|
else {
|
|
return {
|
|
engine => 'VIEW',
|
|
};
|
|
}
|
|
}
|
|
|
|
if ( $ddl !~ m/CREATE (?:TEMPORARY )?TABLE `/ ) {
|
|
die "Cannot parse table definition; is ANSI quoting "
|
|
. "enabled or SQL_QUOTE_SHOW_CREATE disabled?";
|
|
}
|
|
|
|
my ($name) = $ddl =~ m/CREATE (?:TEMPORARY )?TABLE\s+(`.+?`)/;
|
|
(undef, $name) = $self->{Quoter}->split_unquote($name) if $name;
|
|
|
|
$ddl =~ s/(`[^`]+`)/\L$1/g;
|
|
|
|
my $engine = $self->get_engine($ddl);
|
|
|
|
my @defs = $ddl =~ m/^(\s+`.*?),?$/gm;
|
|
my @cols = map { $_ =~ m/`([^`]+)`/ } @defs;
|
|
MKDEBUG && _d('Table cols:', join(', ', map { "`$_`" } @cols));
|
|
|
|
my %def_for;
|
|
@def_for{@cols} = @defs;
|
|
|
|
my (@nums, @null);
|
|
my (%type_for, %is_nullable, %is_numeric, %is_autoinc);
|
|
foreach my $col ( @cols ) {
|
|
my $def = $def_for{$col};
|
|
my ( $type ) = $def =~ m/`[^`]+`\s([a-z]+)/;
|
|
die "Can't determine column type for $def" unless $type;
|
|
$type_for{$col} = $type;
|
|
if ( $type =~ m/(?:(?:tiny|big|medium|small)?int|float|double|decimal|year)/ ) {
|
|
push @nums, $col;
|
|
$is_numeric{$col} = 1;
|
|
}
|
|
if ( $def !~ m/NOT NULL/ ) {
|
|
push @null, $col;
|
|
$is_nullable{$col} = 1;
|
|
}
|
|
$is_autoinc{$col} = $def =~ m/AUTO_INCREMENT/i ? 1 : 0;
|
|
}
|
|
|
|
my ($keys, $clustered_key) = $self->get_keys($ddl, $opts, \%is_nullable);
|
|
|
|
my ($charset) = $ddl =~ m/DEFAULT CHARSET=(\w+)/;
|
|
|
|
return {
|
|
name => $name,
|
|
cols => \@cols,
|
|
col_posn => { map { $cols[$_] => $_ } 0..$#cols },
|
|
is_col => { map { $_ => 1 } @cols },
|
|
null_cols => \@null,
|
|
is_nullable => \%is_nullable,
|
|
is_autoinc => \%is_autoinc,
|
|
clustered_key => $clustered_key,
|
|
keys => $keys,
|
|
defs => \%def_for,
|
|
numeric_cols => \@nums,
|
|
is_numeric => \%is_numeric,
|
|
engine => $engine,
|
|
type_for => \%type_for,
|
|
charset => $charset,
|
|
};
|
|
}
|
|
|
|
sub sort_indexes {
|
|
my ( $self, $tbl ) = @_;
|
|
|
|
my @indexes
|
|
= sort {
|
|
(($a ne 'PRIMARY') <=> ($b ne 'PRIMARY'))
|
|
|| ( !$tbl->{keys}->{$a}->{is_unique} <=> !$tbl->{keys}->{$b}->{is_unique} )
|
|
|| ( $tbl->{keys}->{$a}->{is_nullable} <=> $tbl->{keys}->{$b}->{is_nullable} )
|
|
|| ( scalar(@{$tbl->{keys}->{$a}->{cols}}) <=> scalar(@{$tbl->{keys}->{$b}->{cols}}) )
|
|
}
|
|
grep {
|
|
$tbl->{keys}->{$_}->{type} eq 'BTREE'
|
|
}
|
|
sort keys %{$tbl->{keys}};
|
|
|
|
MKDEBUG && _d('Indexes sorted best-first:', join(', ', @indexes));
|
|
return @indexes;
|
|
}
|
|
|
|
sub find_best_index {
|
|
my ( $self, $tbl, $index ) = @_;
|
|
my $best;
|
|
if ( $index ) {
|
|
($best) = grep { uc $_ eq uc $index } keys %{$tbl->{keys}};
|
|
}
|
|
if ( !$best ) {
|
|
if ( $index ) {
|
|
die "Index '$index' does not exist in table";
|
|
}
|
|
else {
|
|
($best) = $self->sort_indexes($tbl);
|
|
}
|
|
}
|
|
MKDEBUG && _d('Best index found is', $best);
|
|
return $best;
|
|
}
|
|
|
|
sub find_possible_keys {
|
|
my ( $self, $dbh, $database, $table, $quoter, $where ) = @_;
|
|
return () unless $where;
|
|
my $sql = 'EXPLAIN SELECT * FROM ' . $quoter->quote($database, $table)
|
|
. ' WHERE ' . $where;
|
|
MKDEBUG && _d($sql);
|
|
my $expl = $dbh->selectrow_hashref($sql);
|
|
$expl = { map { lc($_) => $expl->{$_} } keys %$expl };
|
|
if ( $expl->{possible_keys} ) {
|
|
MKDEBUG && _d('possible_keys =', $expl->{possible_keys});
|
|
my @candidates = split(',', $expl->{possible_keys});
|
|
my %possible = map { $_ => 1 } @candidates;
|
|
if ( $expl->{key} ) {
|
|
MKDEBUG && _d('MySQL chose', $expl->{key});
|
|
unshift @candidates, grep { $possible{$_} } split(',', $expl->{key});
|
|
MKDEBUG && _d('Before deduping:', join(', ', @candidates));
|
|
my %seen;
|
|
@candidates = grep { !$seen{$_}++ } @candidates;
|
|
}
|
|
MKDEBUG && _d('Final list:', join(', ', @candidates));
|
|
return @candidates;
|
|
}
|
|
else {
|
|
MKDEBUG && _d('No keys in possible_keys');
|
|
return ();
|
|
}
|
|
}
|
|
|
|
sub check_table {
|
|
my ( $self, %args ) = @_;
|
|
my @required_args = qw(dbh db tbl);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($dbh, $db, $tbl) = @args{@required_args};
|
|
my $q = $self->{Quoter};
|
|
my $db_tbl = $q->quote($db, $tbl);
|
|
MKDEBUG && _d('Checking', $db_tbl);
|
|
|
|
my $sql = "SHOW TABLES FROM " . $q->quote($db)
|
|
. ' LIKE ' . $q->literal_like($tbl);
|
|
MKDEBUG && _d($sql);
|
|
my $row;
|
|
eval {
|
|
$row = $dbh->selectrow_arrayref($sql);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
MKDEBUG && _d($EVAL_ERROR);
|
|
return 0;
|
|
}
|
|
if ( !$row->[0] || $row->[0] ne $tbl ) {
|
|
MKDEBUG && _d('Table does not exist');
|
|
return 0;
|
|
}
|
|
|
|
MKDEBUG && _d('Table exists; no privs to check');
|
|
return 1 unless $args{all_privs};
|
|
|
|
$sql = "SHOW FULL COLUMNS FROM $db_tbl";
|
|
MKDEBUG && _d($sql);
|
|
eval {
|
|
$row = $dbh->selectrow_hashref($sql);
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
MKDEBUG && _d($EVAL_ERROR);
|
|
return 0;
|
|
}
|
|
if ( !scalar keys %$row ) {
|
|
MKDEBUG && _d('Table has no columns:', Dumper($row));
|
|
return 0;
|
|
}
|
|
my $privs = $row->{privileges} || $row->{Privileges};
|
|
|
|
$sql = "DELETE FROM $db_tbl LIMIT 0";
|
|
MKDEBUG && _d($sql);
|
|
eval {
|
|
$dbh->do($sql);
|
|
};
|
|
my $can_delete = $EVAL_ERROR ? 0 : 1;
|
|
|
|
MKDEBUG && _d('User privs on', $db_tbl, ':', $privs,
|
|
($can_delete ? 'delete' : ''));
|
|
|
|
if ( !($privs =~ m/select/ && $privs =~ m/insert/ && $privs =~ m/update/
|
|
&& $can_delete) ) {
|
|
MKDEBUG && _d('User does not have all privs');
|
|
return 0;
|
|
}
|
|
|
|
MKDEBUG && _d('User has all privs');
|
|
return 1;
|
|
}
|
|
|
|
sub get_engine {
|
|
my ( $self, $ddl, $opts ) = @_;
|
|
my ( $engine ) = $ddl =~ m/\).*?(?:ENGINE|TYPE)=(\w+)/;
|
|
MKDEBUG && _d('Storage engine:', $engine);
|
|
return $engine || undef;
|
|
}
|
|
|
|
sub get_keys {
|
|
my ( $self, $ddl, $opts, $is_nullable ) = @_;
|
|
my $engine = $self->get_engine($ddl);
|
|
my $keys = {};
|
|
my $clustered_key = undef;
|
|
|
|
KEY:
|
|
foreach my $key ( $ddl =~ m/^ ((?:[A-Z]+ )?KEY .*)$/gm ) {
|
|
|
|
next KEY if $key =~ m/FOREIGN/;
|
|
|
|
my $key_ddl = $key;
|
|
MKDEBUG && _d('Parsed key:', $key_ddl);
|
|
|
|
if ( $engine !~ m/MEMORY|HEAP/ ) {
|
|
$key =~ s/USING HASH/USING BTREE/;
|
|
}
|
|
|
|
my ( $type, $cols ) = $key =~ m/(?:USING (\w+))? \((.+)\)/;
|
|
my ( $special ) = $key =~ m/(FULLTEXT|SPATIAL)/;
|
|
$type = $type || $special || 'BTREE';
|
|
if ( $opts->{mysql_version} && $opts->{mysql_version} lt '004001000'
|
|
&& $engine =~ m/HEAP|MEMORY/i )
|
|
{
|
|
$type = 'HASH'; # MySQL pre-4.1 supports only HASH indexes on HEAP
|
|
}
|
|
|
|
my ($name) = $key =~ m/(PRIMARY|`[^`]*`)/;
|
|
my $unique = $key =~ m/PRIMARY|UNIQUE/ ? 1 : 0;
|
|
my @cols;
|
|
my @col_prefixes;
|
|
foreach my $col_def ( $cols =~ m/`[^`]+`(?:\(\d+\))?/g ) {
|
|
my ($name, $prefix) = $col_def =~ m/`([^`]+)`(?:\((\d+)\))?/;
|
|
push @cols, $name;
|
|
push @col_prefixes, $prefix;
|
|
}
|
|
$name =~ s/`//g;
|
|
|
|
MKDEBUG && _d( $name, 'key cols:', join(', ', map { "`$_`" } @cols));
|
|
|
|
$keys->{$name} = {
|
|
name => $name,
|
|
type => $type,
|
|
colnames => $cols,
|
|
cols => \@cols,
|
|
col_prefixes => \@col_prefixes,
|
|
is_unique => $unique,
|
|
is_nullable => scalar(grep { $is_nullable->{$_} } @cols),
|
|
is_col => { map { $_ => 1 } @cols },
|
|
ddl => $key_ddl,
|
|
};
|
|
|
|
if ( $engine =~ m/InnoDB/i && !$clustered_key ) {
|
|
my $this_key = $keys->{$name};
|
|
if ( $this_key->{name} eq 'PRIMARY' ) {
|
|
$clustered_key = 'PRIMARY';
|
|
}
|
|
elsif ( $this_key->{is_unique} && !$this_key->{is_nullable} ) {
|
|
$clustered_key = $this_key->{name};
|
|
}
|
|
MKDEBUG && $clustered_key && _d('This key is the clustered key');
|
|
}
|
|
}
|
|
|
|
return $keys, $clustered_key;
|
|
}
|
|
|
|
sub get_fks {
|
|
my ( $self, $ddl, $opts ) = @_;
|
|
my $q = $self->{Quoter};
|
|
my $fks = {};
|
|
|
|
foreach my $fk (
|
|
$ddl =~ m/CONSTRAINT .* FOREIGN KEY .* REFERENCES [^\)]*\)/mg )
|
|
{
|
|
my ( $name ) = $fk =~ m/CONSTRAINT `(.*?)`/;
|
|
my ( $cols ) = $fk =~ m/FOREIGN KEY \(([^\)]+)\)/;
|
|
my ( $parent, $parent_cols ) = $fk =~ m/REFERENCES (\S+) \(([^\)]+)\)/;
|
|
|
|
my ($db, $tbl) = $q->split_unquote($parent, $opts->{database});
|
|
my %parent_tbl = (tbl => $tbl);
|
|
$parent_tbl{db} = $db if $db;
|
|
|
|
if ( $parent !~ m/\./ && $opts->{database} ) {
|
|
$parent = $q->quote($opts->{database}) . ".$parent";
|
|
}
|
|
|
|
$fks->{$name} = {
|
|
name => $name,
|
|
colnames => $cols,
|
|
cols => [ map { s/[ `]+//g; $_; } split(',', $cols) ],
|
|
parent_tbl => \%parent_tbl,
|
|
parent_tblname => $parent,
|
|
parent_cols => [ map { s/[ `]+//g; $_; } split(',', $parent_cols) ],
|
|
parent_colnames=> $parent_cols,
|
|
ddl => $fk,
|
|
};
|
|
}
|
|
|
|
return $fks;
|
|
}
|
|
|
|
sub remove_auto_increment {
|
|
my ( $self, $ddl ) = @_;
|
|
$ddl =~ s/(^\).*?) AUTO_INCREMENT=\d+\b/$1/m;
|
|
return $ddl;
|
|
}
|
|
|
|
sub remove_secondary_indexes {
|
|
my ( $self, $ddl ) = @_;
|
|
my $sec_indexes_ddl;
|
|
my $tbl_struct = $self->parse($ddl);
|
|
|
|
if ( ($tbl_struct->{engine} || '') =~ m/InnoDB/i ) {
|
|
my $clustered_key = $tbl_struct->{clustered_key};
|
|
$clustered_key ||= '';
|
|
|
|
my @sec_indexes = map {
|
|
my $key_def = $_->{ddl};
|
|
$key_def =~ s/([\(\)])/\\$1/g;
|
|
$ddl =~ s/\s+$key_def//i;
|
|
|
|
my $key_ddl = "ADD $_->{ddl}";
|
|
$key_ddl .= ',' unless $key_ddl =~ m/,$/;
|
|
$key_ddl;
|
|
}
|
|
grep { $_->{name} ne $clustered_key }
|
|
values %{$tbl_struct->{keys}};
|
|
MKDEBUG && _d('Secondary indexes:', Dumper(\@sec_indexes));
|
|
|
|
if ( @sec_indexes ) {
|
|
$sec_indexes_ddl = join(' ', @sec_indexes);
|
|
$sec_indexes_ddl =~ s/,$//;
|
|
}
|
|
|
|
$ddl =~ s/,(\n\) )/$1/s;
|
|
}
|
|
else {
|
|
MKDEBUG && _d('Not removing secondary indexes from',
|
|
$tbl_struct->{engine}, 'table');
|
|
}
|
|
|
|
return $ddl, $sec_indexes_ddl, $tbl_struct;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End TableParser package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# Transformers package
|
|
# This package is a copy without comments from the original. The original
|
|
# with comments and its test file can be found in the Bazaar repository at,
|
|
# lib/Transformers.pm
|
|
# t/lib/Transformers.t
|
|
# See https://launchpad.net/percona-toolkit for more information.
|
|
# ###########################################################################
|
|
{
|
|
package Transformers;
|
|
|
|
use strict;
|
|
use warnings FATAL => 'all';
|
|
use English qw(-no_match_vars);
|
|
use constant MKDEBUG => $ENV{MKDEBUG} || 0;
|
|
|
|
use Time::Local qw(timegm timelocal);
|
|
use Digest::MD5 qw(md5_hex);
|
|
|
|
require Exporter;
|
|
our @ISA = qw(Exporter);
|
|
our %EXPORT_TAGS = ();
|
|
our @EXPORT = ();
|
|
our @EXPORT_OK = qw(
|
|
micro_t
|
|
percentage_of
|
|
secs_to_time
|
|
time_to_secs
|
|
shorten
|
|
ts
|
|
parse_timestamp
|
|
unix_timestamp
|
|
any_unix_timestamp
|
|
make_checksum
|
|
crc32
|
|
);
|
|
|
|
our $mysql_ts = qr/(\d\d)(\d\d)(\d\d) +(\d+):(\d+):(\d+)(\.\d+)?/;
|
|
our $proper_ts = qr/(\d\d\d\d)-(\d\d)-(\d\d)[T ](\d\d):(\d\d):(\d\d)(\.\d+)?/;
|
|
our $n_ts = qr/(\d{1,5})([shmd]?)/; # Limit \d{1,5} because \d{6} looks
|
|
|
|
sub micro_t {
|
|
my ( $t, %args ) = @_;
|
|
my $p_ms = defined $args{p_ms} ? $args{p_ms} : 0; # precision for ms vals
|
|
my $p_s = defined $args{p_s} ? $args{p_s} : 0; # precision for s vals
|
|
my $f;
|
|
|
|
$t = 0 if $t < 0;
|
|
|
|
$t = sprintf('%.17f', $t) if $t =~ /e/;
|
|
|
|
$t =~ s/\.(\d{1,6})\d*/\.$1/;
|
|
|
|
if ($t > 0 && $t <= 0.000999) {
|
|
$f = ($t * 1000000) . 'us';
|
|
}
|
|
elsif ($t >= 0.001000 && $t <= 0.999999) {
|
|
$f = sprintf("%.${p_ms}f", $t * 1000);
|
|
$f = ($f * 1) . 'ms'; # * 1 to remove insignificant zeros
|
|
}
|
|
elsif ($t >= 1) {
|
|
$f = sprintf("%.${p_s}f", $t);
|
|
$f = ($f * 1) . 's'; # * 1 to remove insignificant zeros
|
|
}
|
|
else {
|
|
$f = 0; # $t should = 0 at this point
|
|
}
|
|
|
|
return $f;
|
|
}
|
|
|
|
sub percentage_of {
|
|
my ( $is, $of, %args ) = @_;
|
|
my $p = $args{p} || 0; # float precision
|
|
my $fmt = $p ? "%.${p}f" : "%d";
|
|
return sprintf $fmt, ($is * 100) / ($of ||= 1);
|
|
}
|
|
|
|
sub secs_to_time {
|
|
my ( $secs, $fmt ) = @_;
|
|
$secs ||= 0;
|
|
return '00:00' unless $secs;
|
|
|
|
$fmt ||= $secs >= 86_400 ? 'd'
|
|
: $secs >= 3_600 ? 'h'
|
|
: 'm';
|
|
|
|
return
|
|
$fmt eq 'd' ? sprintf(
|
|
"%d+%02d:%02d:%02d",
|
|
int($secs / 86_400),
|
|
int(($secs % 86_400) / 3_600),
|
|
int(($secs % 3_600) / 60),
|
|
$secs % 60)
|
|
: $fmt eq 'h' ? sprintf(
|
|
"%02d:%02d:%02d",
|
|
int(($secs % 86_400) / 3_600),
|
|
int(($secs % 3_600) / 60),
|
|
$secs % 60)
|
|
: sprintf(
|
|
"%02d:%02d",
|
|
int(($secs % 3_600) / 60),
|
|
$secs % 60);
|
|
}
|
|
|
|
sub time_to_secs {
|
|
my ( $val, $default_suffix ) = @_;
|
|
die "I need a val argument" unless defined $val;
|
|
my $t = 0;
|
|
my ( $prefix, $num, $suffix ) = $val =~ m/([+-]?)(\d+)([a-z])?$/;
|
|
$suffix = $suffix || $default_suffix || 's';
|
|
if ( $suffix =~ m/[smhd]/ ) {
|
|
$t = $suffix eq 's' ? $num * 1 # Seconds
|
|
: $suffix eq 'm' ? $num * 60 # Minutes
|
|
: $suffix eq 'h' ? $num * 3600 # Hours
|
|
: $num * 86400; # Days
|
|
|
|
$t *= -1 if $prefix && $prefix eq '-';
|
|
}
|
|
else {
|
|
die "Invalid suffix for $val: $suffix";
|
|
}
|
|
return $t;
|
|
}
|
|
|
|
sub shorten {
|
|
my ( $num, %args ) = @_;
|
|
my $p = defined $args{p} ? $args{p} : 2; # float precision
|
|
my $d = defined $args{d} ? $args{d} : 1_024; # divisor
|
|
my $n = 0;
|
|
my @units = ('', qw(k M G T P E Z Y));
|
|
while ( $num >= $d && $n < @units - 1 ) {
|
|
$num /= $d;
|
|
++$n;
|
|
}
|
|
return sprintf(
|
|
$num =~ m/\./ || $n
|
|
? "%.${p}f%s"
|
|
: '%d',
|
|
$num, $units[$n]);
|
|
}
|
|
|
|
sub ts {
|
|
my ( $time, $gmt ) = @_;
|
|
my ( $sec, $min, $hour, $mday, $mon, $year )
|
|
= $gmt ? gmtime($time) : localtime($time);
|
|
$mon += 1;
|
|
$year += 1900;
|
|
my $val = sprintf("%d-%02d-%02dT%02d:%02d:%02d",
|
|
$year, $mon, $mday, $hour, $min, $sec);
|
|
if ( my ($us) = $time =~ m/(\.\d+)$/ ) {
|
|
$us = sprintf("%.6f", $us);
|
|
$us =~ s/^0\././;
|
|
$val .= $us;
|
|
}
|
|
return $val;
|
|
}
|
|
|
|
sub parse_timestamp {
|
|
my ( $val ) = @_;
|
|
if ( my($y, $m, $d, $h, $i, $s, $f)
|
|
= $val =~ m/^$mysql_ts$/ )
|
|
{
|
|
return sprintf "%d-%02d-%02d %02d:%02d:"
|
|
. (defined $f ? '%09.6f' : '%02d'),
|
|
$y + 2000, $m, $d, $h, $i, (defined $f ? $s + $f : $s);
|
|
}
|
|
return $val;
|
|
}
|
|
|
|
sub unix_timestamp {
|
|
my ( $val, $gmt ) = @_;
|
|
if ( my($y, $m, $d, $h, $i, $s, $us) = $val =~ m/^$proper_ts$/ ) {
|
|
$val = $gmt
|
|
? timegm($s, $i, $h, $d, $m - 1, $y)
|
|
: timelocal($s, $i, $h, $d, $m - 1, $y);
|
|
if ( defined $us ) {
|
|
$us = sprintf('%.6f', $us);
|
|
$us =~ s/^0\././;
|
|
$val .= $us;
|
|
}
|
|
}
|
|
return $val;
|
|
}
|
|
|
|
sub any_unix_timestamp {
|
|
my ( $val, $callback ) = @_;
|
|
|
|
if ( my ($n, $suffix) = $val =~ m/^$n_ts$/ ) {
|
|
$n = $suffix eq 's' ? $n # Seconds
|
|
: $suffix eq 'm' ? $n * 60 # Minutes
|
|
: $suffix eq 'h' ? $n * 3600 # Hours
|
|
: $suffix eq 'd' ? $n * 86400 # Days
|
|
: $n; # default: Seconds
|
|
MKDEBUG && _d('ts is now - N[shmd]:', $n);
|
|
return time - $n;
|
|
}
|
|
elsif ( $val =~ m/^\d{9,}/ ) {
|
|
MKDEBUG && _d('ts is already a unix timestamp');
|
|
return $val;
|
|
}
|
|
elsif ( my ($ymd, $hms) = $val =~ m/^(\d{6})(?:\s+(\d+:\d+:\d+))?/ ) {
|
|
MKDEBUG && _d('ts is MySQL slow log timestamp');
|
|
$val .= ' 00:00:00' unless $hms;
|
|
return unix_timestamp(parse_timestamp($val));
|
|
}
|
|
elsif ( ($ymd, $hms) = $val =~ m/^(\d{4}-\d\d-\d\d)(?:[T ](\d+:\d+:\d+))?/) {
|
|
MKDEBUG && _d('ts is properly formatted timestamp');
|
|
$val .= ' 00:00:00' unless $hms;
|
|
return unix_timestamp($val);
|
|
}
|
|
else {
|
|
MKDEBUG && _d('ts is MySQL expression');
|
|
return $callback->($val) if $callback && ref $callback eq 'CODE';
|
|
}
|
|
|
|
MKDEBUG && _d('Unknown ts type:', $val);
|
|
return;
|
|
}
|
|
|
|
sub make_checksum {
|
|
my ( $val ) = @_;
|
|
my $checksum = uc substr(md5_hex($val), -16);
|
|
MKDEBUG && _d($checksum, 'checksum for', $val);
|
|
return $checksum;
|
|
}
|
|
|
|
sub crc32 {
|
|
my ( $string ) = @_;
|
|
return unless $string;
|
|
my $poly = 0xEDB88320;
|
|
my $crc = 0xFFFFFFFF;
|
|
foreach my $char ( split(//, $string) ) {
|
|
my $comp = ($crc ^ ord($char)) & 0xFF;
|
|
for ( 1 .. 8 ) {
|
|
$comp = $comp & 1 ? $poly ^ ($comp >> 1) : $comp >> 1;
|
|
}
|
|
$crc = (($crc >> 8) & 0x00FFFFFF) ^ $comp;
|
|
}
|
|
return $crc ^ 0xFFFFFFFF;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
1;
|
|
}
|
|
# ###########################################################################
|
|
# End Transformers package
|
|
# ###########################################################################
|
|
|
|
# ###########################################################################
|
|
# This is a combination of modules and programs in one -- a runnable module.
|
|
# http://www.perl.com/pub/a/2006/07/13/lightning-articles.html?page=last
|
|
# Or, look it up in the Camel book on pages 642 and 643 in the 3rd edition.
|
|
#
|
|
# Check at the end of this package for the call to main() which actually runs
|
|
# the program.
|
|
# ###########################################################################
|
|
package pt_heartbeat;
|
|
|
|
use English qw(-no_match_vars);
|
|
use List::Util qw(min max sum);
|
|
use Time::HiRes qw(gettimeofday time sleep usleep);
|
|
use IO::File;
|
|
|
|
use constant MKDEBUG => $ENV{MKDEBUG} || 0;
|
|
|
|
Transformers->import(qw(ts unix_timestamp));
|
|
|
|
my @dbhs; # Holds slave DBHs if --recurse
|
|
my @sths; # Holds [$host, $sth] if --recurse
|
|
|
|
sub main {
|
|
@ARGV = @_; # set global ARGV for this package
|
|
|
|
# Reset all global vars between test runs else weird things happen.
|
|
@dbhs = ();
|
|
@sths = ();
|
|
|
|
# ########################################################################
|
|
# Get configuration information.
|
|
# ########################################################################
|
|
my $o = OptionParser->new();
|
|
$o->get_specs();
|
|
$o->get_opts();
|
|
|
|
my $dp = $o->DSNParser;
|
|
$dp->prop('dbidriver', $o->get('dbi-driver'));
|
|
$dp->prop('set-vars', $o->get('set-vars'));
|
|
|
|
if ( !$o->get('help') ) {
|
|
my @frames = $o->get('frames') =~ m/(\d+[smhd])/g;
|
|
if ( @frames ) {
|
|
my @times;
|
|
foreach my $frame ( @frames ) {
|
|
my ($num, $suf) = $frame =~ m/(\d+)([smhd])$/;
|
|
if ( !$num ) {
|
|
$o->save_error("Invalid --frames argument");
|
|
}
|
|
else {
|
|
push @times,
|
|
$suf eq 's' ? $num # Seconds
|
|
: $suf eq 'm' ? $num * 60 # Minutes
|
|
: $suf eq 'h' ? $num * 3600 # Hours
|
|
: $num * 86400; # Days
|
|
}
|
|
}
|
|
$o->set('frames', \@times);
|
|
}
|
|
else {
|
|
$o->save_error("Invalid --frames argument");
|
|
}
|
|
|
|
if ( $o->get('create-table')
|
|
&& !($o->get('database') && $o->get('table'))) {
|
|
$o->save_error('--create-table requires both --database and --table');
|
|
}
|
|
|
|
if ( $o->get('interval') < 0.01 ) {
|
|
$o->save_error("--interval must be >= 0.01");
|
|
}
|
|
|
|
if ( !$o->get('stop') && !$o->get('database') ) {
|
|
$o->save_error('--database must be specified');
|
|
}
|
|
}
|
|
|
|
$o->usage_or_errors();
|
|
|
|
# ########################################################################
|
|
# Make common modules and var for frequently used options.
|
|
# ########################################################################
|
|
my $q = Quoter->new();
|
|
my $tp = TableParser->new(Quoter => $q);
|
|
|
|
my $interval = $o->get('interval');
|
|
my $skew = $o->get('update') ? 0 : $o->get('skew');
|
|
my $sentinel = $o->get('sentinel');
|
|
my $frames = $o->get('frames');
|
|
my $db = $o->get('database');
|
|
my $tbl = $o->get('table');
|
|
|
|
# ########################################################################
|
|
# Create --sentinel file if --stop was given, and possibly exit.
|
|
# ########################################################################
|
|
if ( $o->get('stop') ) {
|
|
MKDEBUG && _d('Creating sentinel file', $sentinel);
|
|
my $file = IO::File->new($sentinel, ">>")
|
|
or die "Cannot open $sentinel: $OS_ERROR\n";
|
|
print $file "Remove this file to permit pt-heartbeat to run\n"
|
|
or die "Cannot write to $sentinel: $OS_ERROR\n";
|
|
close $file
|
|
or die "Cannot close $sentinel: $OS_ERROR\n";
|
|
print STDOUT "Successfully created file $sentinel\n";
|
|
# Exit only if no other action (update, monitor, check) is given.
|
|
if ( !$o->get('update') && !$o->get('check') && !$o->get('monitor') ) {
|
|
MKDEBUG && _d("Nothing more to do, quitting");
|
|
return 0;
|
|
}
|
|
else {
|
|
# Wait for all other running instances to quit, assuming they have the
|
|
# same --interval as this invocation. Then remove the file and
|
|
# continue.
|
|
MKDEBUG && _d("Waiting for other instances to quit");
|
|
sleep $interval ;
|
|
MKDEBUG && _d("Unlinking", $sentinel);
|
|
unlink $sentinel
|
|
or die "Cannot unlink $sentinel: $OS_ERROR";
|
|
}
|
|
}
|
|
|
|
# ########################################################################
|
|
# Connect to MySQL.
|
|
# ########################################################################
|
|
if ( $o->get('ask-pass') ) {
|
|
$o->set('password', OptionParser::prompt_noecho("Enter password: "));
|
|
}
|
|
my $dsn_defaults = $dp->parse_options($o);
|
|
my $dsn = @ARGV ? $dp->parse(shift @ARGV, $dsn_defaults)
|
|
: $dsn_defaults;
|
|
my $dbh = $dp->get_dbh($dp->get_cxn_params($dsn), {AutoCommit=>1});
|
|
|
|
$dbh->{InactiveDestroy} = 1; # Don't disconnect on fork
|
|
$dbh->{FetchHashKeyName} = 'NAME_lc';
|
|
$dbh->do("USE `$db`");
|
|
|
|
# ########################################################################
|
|
# Create the heartbeat table if --create-table was given.
|
|
# ########################################################################
|
|
my $db_tbl = $q->quote($db, $tbl);
|
|
my $server_id = $dbh->selectrow_array('SELECT @@server_id');
|
|
if ( $o->get('create-table') ) {
|
|
my $sql = $o->read_para_after(__FILE__, qr/MAGIC_create_heartbeat/);
|
|
$sql =~ s/heartbeat/IF NOT EXISTS $db_tbl/;
|
|
MKDEBUG && _d($sql);
|
|
$dbh->do($sql);
|
|
|
|
$sql = "INSERT INTO $db_tbl (ts, server_id) VALUES (NOW(), $server_id)";
|
|
MKDEBUG && _d($sql);
|
|
# This may fail if the table already existed and already had this row.
|
|
# We eval to ignore this possibility.
|
|
eval { $dbh->do($sql); };
|
|
}
|
|
|
|
# ########################################################################
|
|
# Get and check heartbeat table structure.
|
|
# ########################################################################
|
|
my $tbl_def = $dbh->selectrow_arrayref("SHOW CREATE TABLE $db_tbl");
|
|
my $tbl_struct = $tp->parse($tbl_def->[1]);
|
|
|
|
die "Heartbeat table $db_tbl does not have a ts column"
|
|
unless $tbl_struct->{is_col}->{ts};
|
|
|
|
my $hires_ts = $tbl_struct->{type_for}->{ts} =~ m/char/i ? 1 : 0;
|
|
MKDEBUG && _d("Hi-res ts:", ($hires_ts ? 'yes' : 'no'));
|
|
|
|
my $id = $tbl_struct->{is_col}->{id}; # legacy table struct
|
|
die "Heartbeat table $db_tbl does not have a server_id or id column"
|
|
unless $tbl_struct->{is_col}->{server_id} || $id;
|
|
|
|
# If there's an id column, then we're running in legacy mode. If there's
|
|
# a server_id column, then we're running in the new mode which supports
|
|
# multiple --update instances.
|
|
if ( $tbl_struct->{is_col}->{id} && $tbl_struct->{is_col}->{server_id} ) {
|
|
die "Heartbeat table $db_tbl cannot have both an id column and "
|
|
. "a server_id column";
|
|
}
|
|
|
|
# pk_col and pk_val are used to identify the heartbeat row to update or
|
|
# or monitor.
|
|
my ($pk_col, $pk_val);
|
|
if ( $id ) {
|
|
# Legacy mode: update heartbeat row WHERE id=1 and monitor heartbeat
|
|
# row WHERE id=1.
|
|
$pk_col = 'id';
|
|
$pk_val = '1';
|
|
}
|
|
elsif ( $tbl_struct->{is_col}->{server_id} ) {
|
|
# Multi-update mode: update heartbeat row WHERE server_id=@@server_id
|
|
# and monitor heartbeat row WHERE server_id=master_server_id.
|
|
if ( $o->get('update') ) {
|
|
$pk_col = 'server_id';
|
|
$pk_val = $server_id;
|
|
}
|
|
else { # monitor or check
|
|
my $master_server_id = $o->get('master-server-id');
|
|
if ( !$master_server_id ) {
|
|
eval {
|
|
my $vp = VersionParser->new();
|
|
my $ms = MasterSlave->new(VersionParser => $vp);
|
|
my $master_dsn = $ms->get_master_dsn($dbh, $dsn, $dp)
|
|
or die "This server is not a slave";
|
|
my $master_dbh = $dp->get_dbh($dp->get_cxn_params($master_dsn),
|
|
{ AutoCommit => 1 });
|
|
($master_server_id)
|
|
= $master_dbh->selectrow_array('SELECT @@server_id');
|
|
$master_dbh->disconnect;
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
MKDEBUG && _d("Error determining master id:", $EVAL_ERROR);
|
|
}
|
|
}
|
|
if ( !$master_server_id ) {
|
|
die "The --master-server-id option must be specified because "
|
|
. "the heartbeat table $db_tbl uses the server_id column "
|
|
. "for --update or --check but the server's master could "
|
|
. "not be automatically determined.\n"
|
|
. "Please read the DESCRIPTION section of the pt-heartbeat POD.\n";
|
|
}
|
|
$pk_col = 'server_id';
|
|
$pk_val = $master_server_id;
|
|
}
|
|
}
|
|
else {
|
|
die "Heartbeat table $db_tbl does not have a server_id or id column";
|
|
}
|
|
MKDEBUG && _d('Heartbeat row primary key:', $pk_col, '=', $pk_val);
|
|
|
|
# Check that heartbeat table has at least 1 row unless --replace because
|
|
# --replace will create the row if it doesn't exist.
|
|
if ( !$o->get('replace') ) {
|
|
my $sql = "SELECT 1 FROM $db_tbl WHERE $pk_col='$pk_val' LIMIT 1";
|
|
MKDEBUG && _d($sql);
|
|
my $row = $dbh->selectall_arrayref($sql);
|
|
if ( scalar @$row == 0 ) {
|
|
MKDEBUG && _d('No heartbeat row in table');
|
|
if ( $o->get('insert-heartbeat-row') ) {
|
|
my $sql = "INSERT INTO $db_tbl ($pk_col, ts) "
|
|
. "VALUES ('$pk_val', NOW())";
|
|
MKDEBUG && _d($sql);
|
|
$dbh->do($sql);
|
|
}
|
|
else {
|
|
if ( $id ) {
|
|
die "The heartbeat table is empty.\n"
|
|
. "At least one row must be inserted into the heartbeat "
|
|
. "table.\nPlease read the DESCRIPTION section of the "
|
|
. "pt-heartbeat POD.\n";
|
|
}
|
|
else {
|
|
die "No row found in heartbeat table for server_id $pk_val.\n"
|
|
. "At least one row must be inserted into the heartbeat "
|
|
. "table for server_id $pk_val.\nPlease read the "
|
|
. "DESCRIPTION section of the pt-heartbeat POD.\n";
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
# ########################################################################
|
|
# Make sth for updating or checking the heartbeat table.
|
|
# ########################################################################
|
|
my ($heartbeat_sql, $heartbeat_sth);
|
|
my ($get_delay, $update_heartbeat);
|
|
|
|
if ( $o->get('update') ) {
|
|
my @master_status_cols = grep { $tbl_struct->{is_col}->{$_} }
|
|
qw(file position);
|
|
MKDEBUG && _d("Master status columns:", join(', ', @master_status_cols));
|
|
|
|
my @slave_status_cols = grep { $tbl_struct->{is_col}->{$_} }
|
|
qw(relay_master_log_file exec_master_log_pos);
|
|
MKDEBUG && _d("Slave status columns:", join(', ', @slave_status_cols));
|
|
|
|
# Just a shortcut so I don't have to check both arrays when creating
|
|
# SQL statement below.
|
|
my @extra_cols = (@master_status_cols, @slave_status_cols);
|
|
|
|
if ( $o->get('replace') ) {
|
|
$heartbeat_sql
|
|
= "REPLACE INTO $db_tbl (ts, $pk_col"
|
|
. (@extra_cols ? ", " . join(', ', @extra_cols) : '')
|
|
. ") VALUES (?, '$pk_val'"
|
|
. (@extra_cols ? ", " . join(', ', map { '?' } @extra_cols) : '')
|
|
. ")";
|
|
}
|
|
else {
|
|
$heartbeat_sql
|
|
= "UPDATE $db_tbl SET ts=?"
|
|
. (@extra_cols ? ", " . join(', ', map { "$_=?" } @extra_cols) : "")
|
|
. " WHERE $pk_col='$pk_val'";
|
|
}
|
|
MKDEBUG && _d("UPDATE SQL:", $heartbeat_sql);
|
|
|
|
$heartbeat_sth = $dbh->prepare($heartbeat_sql);
|
|
|
|
$update_heartbeat = sub {
|
|
my ($sth) = @_;
|
|
my @vals;
|
|
|
|
my $sql;
|
|
if ( @master_status_cols ) {
|
|
$sql = "SHOW MASTER STATUS";
|
|
MKDEBUG && _d($dbh, $sql);
|
|
my $row = $dbh->selectrow_hashref($sql);
|
|
if ( !$row ) {
|
|
MKDEBUG && _d("No row from", $sql);
|
|
push @vals, map { undef } @master_status_cols;
|
|
}
|
|
else {
|
|
push @vals, map { $row->{$_} } @master_status_cols;
|
|
}
|
|
}
|
|
|
|
if ( @slave_status_cols ) {
|
|
$sql = "SHOW SLAVE STATUS";
|
|
MKDEBUG && _d($dbh, $sql);
|
|
my $row = $dbh->selectrow_hashref($sql);
|
|
if ( !$row ) {
|
|
MKDEBUG && _d("No row from", $sql);
|
|
push @vals, map { undef } @slave_status_cols;
|
|
}
|
|
else {
|
|
push @vals, map { $row->{$_} } @slave_status_cols;
|
|
}
|
|
}
|
|
|
|
$sth->execute(ts(time), @vals);
|
|
MKDEBUG && _d($sth->{Statement});
|
|
$sth->finish();
|
|
|
|
return;
|
|
};
|
|
}
|
|
else { # --monitor or --check
|
|
my $dbi_driver = lc $o->get('dbi-driver');
|
|
|
|
$heartbeat_sql
|
|
= "SELECT ts"
|
|
. ($dbi_driver eq 'mysql' ? '/*!50038, @@hostname AS host*/' : '')
|
|
. ($id ? "" : ", server_id")
|
|
. " FROM $db_tbl "
|
|
. "WHERE $pk_col='$pk_val' "
|
|
. "LIMIT 1";
|
|
MKDEBUG && _d("SELECT SQL:", $heartbeat_sql);
|
|
|
|
$heartbeat_sth = $dbh->prepare($heartbeat_sql);
|
|
|
|
$get_delay = sub {
|
|
my ($sth) = @_;
|
|
$sth->execute();
|
|
MKDEBUG && _d($sth->{Statement});
|
|
my ($ts, $hostname, $server_id) = $sth->fetchrow_array();
|
|
my $now = time;
|
|
MKDEBUG && _d("Heartbeat from server", $server_id, "\n",
|
|
" now:", ts($now), "\n",
|
|
" ts:", $ts, "\n",
|
|
"skew:", $skew);
|
|
my $delay = $now - unix_timestamp($ts) - $skew;
|
|
MKDEBUG && _d('Delay', sprintf('%.6f', $delay), 'on', $hostname);
|
|
|
|
# Because we adjust for skew, if the ts are less than skew seconds
|
|
# apart (i.e. replication is very fast) then delay will be negative.
|
|
# So it's effectively 0 seconds of lag.
|
|
$delay = 0.00 if $delay < 0;
|
|
|
|
$sth->finish();
|
|
return ($delay, $hostname, $pk_val);
|
|
};
|
|
}
|
|
|
|
# Do a little check just to make sure the table is there, so there's one last
|
|
# chance to catch errors before daemonizing.
|
|
if ( $o->get('update') ) {
|
|
$update_heartbeat->($heartbeat_sth);
|
|
}
|
|
else {
|
|
$get_delay->($heartbeat_sth);
|
|
}
|
|
$heartbeat_sth->finish();
|
|
|
|
# ########################################################################
|
|
# Daemonize only after (potentially) asking for passwords for --ask-pass.
|
|
# ########################################################################
|
|
my $daemon;
|
|
if ( $o->get('daemonize') ) {
|
|
$daemon = Daemon->new(o=>$o);
|
|
$daemon->daemonize();
|
|
MKDEBUG && _d('I am a daemon now');
|
|
}
|
|
elsif ( $o->get('pid') ) {
|
|
# We're not daemoninzing, it just handles PID stuff.
|
|
$daemon = Daemon->new(o=>$o);
|
|
$daemon->make_PID_file();
|
|
}
|
|
|
|
# ########################################################################
|
|
# --check and exit if --check was given.
|
|
# ########################################################################
|
|
if ( $o->get('check') ) {
|
|
MKDEBUG && _d('--check and exit');
|
|
check_delay(
|
|
dsn => $dsn,
|
|
dbh => $dbh,
|
|
sth => $heartbeat_sth,
|
|
sql => $heartbeat_sql,
|
|
get_delay => $get_delay,
|
|
interval => $interval,
|
|
skew => $skew,
|
|
hires_ts => $hires_ts,
|
|
OptionParser => $o,
|
|
DSNParser => $dp,
|
|
);
|
|
disconnect($dbh, $heartbeat_sth);
|
|
return 0;
|
|
}
|
|
|
|
# ########################################################################
|
|
# Setup moving averages for --frames.
|
|
# ########################################################################
|
|
my @samples;
|
|
my $limit = max(@$frames);
|
|
|
|
# 2.00s [ 0.05s, 0.01s, 0.00s ]
|
|
my $format = ($hires_ts ? '%.2f' : '%4d') . "s "
|
|
. "[ " . join(", ", map { "%5.2fs" } @$frames) . " ]"
|
|
. ($o->get('print-master-server-id') ? " %d" : '')
|
|
. "\n";
|
|
|
|
# ########################################################################
|
|
# Monitor or update the heartbeat table.
|
|
# ########################################################################
|
|
my $end = $o->get('run-time') ? int(time + $o->get('run-time')) : 0;
|
|
MKDEBUG && _d($end ? ('Will exit at', ts($end)) : 'Running forever');
|
|
|
|
my $get_next_interval = make_interval_iter($interval, $skew);
|
|
|
|
while ( # Stop if...
|
|
(!$end || int(time) < $end) # runtime exceeded, or
|
|
&& !-f $sentinel # sentinel file created
|
|
) {
|
|
eval {
|
|
my $next_interval = $get_next_interval->();
|
|
if ( time >= $next_interval ) {
|
|
do { $next_interval = $get_next_interval->() }
|
|
until $next_interval > time;
|
|
MKDEBUG && _d("Missed last interval; next interval:",
|
|
ts($next_interval));
|
|
}
|
|
sleep $next_interval - time;
|
|
MKDEBUG && _d('Woke up at', ts(time));
|
|
|
|
# Connect or reconnect if necessary.
|
|
if ( !$dbh->ping() ) {
|
|
$dbh = $dp->get_dbh($dp->get_cxn_params($dsn), { AutoCommit => 1 });
|
|
$dbh->{InactiveDestroy} = 1; # Don't disconnect on fork
|
|
$dbh->{FetchHashKeyName} = 'NAME_lc';
|
|
$dbh->do("USE `$db`");
|
|
|
|
$heartbeat_sth = undef;
|
|
}
|
|
|
|
if ( $o->get('monitor') ) {
|
|
$heartbeat_sth ||= $dbh->prepare($heartbeat_sql);
|
|
my ($delay) = $get_delay->($heartbeat_sth);
|
|
|
|
unshift @samples, $delay;
|
|
pop @samples if @samples > $limit;
|
|
|
|
# Calculate and print results
|
|
my @vals = map {
|
|
my $bound = min($_, scalar(@samples));
|
|
sum(@samples[0 .. $bound-1]) / $_;
|
|
} @$frames;
|
|
|
|
my $output = sprintf $format, $delay, @vals, $pk_val;
|
|
if ( my $file = $o->get('file') ) {
|
|
open my $file, '>', $file
|
|
or die "Can't open $file: $OS_ERROR";
|
|
print $file $output
|
|
or die "Can't print to $file: $OS_ERROR";
|
|
close $file
|
|
or die "Can't close $file: $OS_ERROR";
|
|
}
|
|
else {
|
|
print $output;
|
|
}
|
|
}
|
|
else { # --update mode
|
|
$heartbeat_sth ||= $dbh->prepare($heartbeat_sql);
|
|
$update_heartbeat->($heartbeat_sth);
|
|
}
|
|
};
|
|
if ( $EVAL_ERROR ) {
|
|
my ( $err ) = $EVAL_ERROR =~ m/^(?:DBI|DBD).*failed: (.*?)\s*at \S+ line .*/;
|
|
if ( $err ) {
|
|
warn "$err\n";
|
|
}
|
|
else {
|
|
die $EVAL_ERROR;
|
|
}
|
|
}
|
|
}
|
|
|
|
disconnect($dbh, $heartbeat_sth);
|
|
return 0;
|
|
}
|
|
|
|
# ############################################################################
|
|
# Subroutines.
|
|
# ############################################################################
|
|
|
|
# Check the delay on a single server. Optionally recurse to all its slaves.
|
|
sub check_delay {
|
|
my ( %args ) = @_;
|
|
my @required_args = qw(dsn dbh sth sql get_delay interval skew OptionParser DSNParser);
|
|
foreach my $arg ( @required_args ) {
|
|
die "I need a $arg argument" unless $args{$arg};
|
|
}
|
|
my ($dsn, $dbh, $sth, $sql, $get_delay, $interval, $skew, $o, $dp)
|
|
= @args{@required_args};
|
|
MKDEBUG && _d('Checking slave delay');
|
|
|
|
# Collect a list of connections to the slaves.
|
|
if ( $o->get('recurse') ) {
|
|
MKDEBUG && _d('Recursing to slaves');
|
|
my $vp = VersionParser->new();
|
|
my $ms = MasterSlave->new(VersionParser => $vp);
|
|
$ms->recurse_to_slaves(
|
|
{ dbh => $dbh,
|
|
dsn => $dsn,
|
|
dsn_parser => $dp,
|
|
recurse => $o->get('recurse'),
|
|
callback => sub {
|
|
my ( $dsn, $dbh, $level ) = @_;
|
|
push @dbhs, $dbh;
|
|
MKDEBUG && _d("Found slave", $dp->as_string($dsn));
|
|
push @sths, [ $dsn, $dbh->prepare($sql) ];
|
|
},
|
|
method => $o->get('recursion-method'),
|
|
},
|
|
);
|
|
}
|
|
else {
|
|
push @sths, [ $dsn, $sth ];
|
|
}
|
|
|
|
my $format_delay = ($args{hires_ts} ? '%.2f' : '%d')
|
|
. ($o->get('print-master-server-id') ? " %d" : "")
|
|
. "\n";
|
|
my $format_host = "%-20s $format_delay";
|
|
|
|
# Before hi-res ts, we could check all slaves at one interval, assuming
|
|
# the checks were fast, i.e. able to be done within one interval. But
|
|
# now we have intervals up to 0.01 fast and that's too short to check all
|
|
# slaves. So for each slave we sleep until the next interval.
|
|
my $get_next_interval = make_interval_iter($interval, $skew);
|
|
|
|
SLAVE:
|
|
foreach my $thing ( @sths ) {
|
|
my ( $dsn, $sth ) = @$thing;
|
|
MKDEBUG && _d('Checking slave', $dp->as_string($dsn));
|
|
|
|
my $next_interval = $get_next_interval->();
|
|
if ( time >= $next_interval ) {
|
|
do { $next_interval = $get_next_interval->() }
|
|
until $next_interval > time;
|
|
MKDEBUG && _d("Missed last interval; next interval:",
|
|
ts($next_interval));
|
|
}
|
|
sleep $next_interval - time;
|
|
MKDEBUG && _d('Woke up at', ts(time));
|
|
my ($delay, $hostname, $master_server_id) = $get_delay->($sth);
|
|
|
|
if ( $o->get('recurse') ) {
|
|
# Must print not only the delay, but the server's hostname if
|
|
# available. Prefer the hostname from the DSN, then the hostname
|
|
# from @@hostname, then fall back to Socket or default File.
|
|
my $host = $dsn->{h} || $hostname || $dsn->{S} || $dsn->{F} || '';
|
|
if ( $dsn->{P} && $dsn->{P} ne '3306' ) {
|
|
$host .= ":$dsn->{P}";
|
|
}
|
|
printf($format_host, $host, $delay, $master_server_id);
|
|
}
|
|
else {
|
|
# Just print the delay.
|
|
printf($format_delay, $delay, $master_server_id);
|
|
}
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
# The interval iterator works by first returning the next whole second.
|
|
# So if the current time (since epoch) is 5.123, then the next whole second
|
|
# is 6.0, plus an optional skew. The next interval is 6.0 * the interval.
|
|
# If the interval is 0.5s, then the next interval is 6.5, plus an optional
|
|
# skew. Therefore, we always start on a whole second and return when the
|
|
# next interval is or should be. The caller can then sleep(time-next_interval)
|
|
# to wake up at that interval. If the caller misses the next interval,
|
|
# they just call the iterator until the next interval is later then the
|
|
# current time.
|
|
sub make_interval_iter {
|
|
my ( $interval, $skew ) = @_;
|
|
die "I need an interval argument" unless defined $interval;
|
|
my ($s) = gettimeofday();
|
|
my $start_s = $s + 1;
|
|
my $i = 0;
|
|
my $get_next_interval = sub {
|
|
return $start_s + ($interval * $i++) + $skew;
|
|
};
|
|
return $get_next_interval;
|
|
}
|
|
|
|
sub disconnect {
|
|
my ( $dbh, $sth ) = @_;
|
|
MKDEBUG && _d('Disconnecting');
|
|
$sth->finish() if $sth;
|
|
foreach my $handle ( @sths ) {
|
|
my $sth = $handle->[1];
|
|
$sth->finish() if $sth;
|
|
}
|
|
foreach my $handle ( $dbh, @dbhs ) {
|
|
$handle->disconnect() if $handle;
|
|
}
|
|
return;
|
|
}
|
|
|
|
sub _d {
|
|
my ($package, undef, $line) = caller 0;
|
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
|
map { defined $_ ? $_ : 'undef' }
|
|
@_;
|
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
|
}
|
|
|
|
# ############################################################################
|
|
# Run the program.
|
|
# ############################################################################
|
|
if ( !caller ) { exit main(@ARGV); }
|
|
|
|
1; # Because this is a module as well as a script.
|
|
|
|
# ############################################################################
|
|
# Documentation.
|
|
# ############################################################################
|
|
|
|
=pod
|
|
|
|
=head1 NAME
|
|
|
|
pt-heartbeat - Monitor MySQL replication delay.
|
|
|
|
=head1 SYNOPSIS
|
|
|
|
Usage: pt-heartbeat [OPTION...] [DSN] --update|--monitor|--check|--stop
|
|
|
|
pt-heartbeat measures replication lag on a MySQL or PostgreSQL server. You can
|
|
use it to update a master or monitor a replica. If possible, MySQL connection
|
|
options are read from your .my.cnf file.
|
|
|
|
Start daemonized process to update test.heartbeat table on master:
|
|
|
|
pt-heartbeat -D test --update -h master-server --daemonize
|
|
|
|
Monitor replication lag on slave:
|
|
|
|
pt-heartbeat -D test --monitor -h slave-server
|
|
|
|
pt-heartbeat -D test --monitor -h slave-server --dbi-driver Pg
|
|
|
|
Check slave lag once and exit (using optional DSN to specify slave host):
|
|
|
|
pt-heartbeat -D test --check h=slave-server
|
|
|
|
=head1 RISKS
|
|
|
|
The following section is included to inform users about the potential risks,
|
|
whether known or unknown, of using this tool. The two main categories of risks
|
|
are those created by the nature of the tool (e.g. read-only tools vs. read-write
|
|
tools) and those created by bugs.
|
|
|
|
pt-heartbeat merely reads and writes a single record in a table. It should be
|
|
very low-risk.
|
|
|
|
At the time of this release, we know of no bugs that could cause serious harm to
|
|
users.
|
|
|
|
The authoritative source for updated information is always the online issue
|
|
tracking system. Issues that affect this tool will be marked as such. You can
|
|
see a list of such issues at the following URL:
|
|
L<http://www.percona.com/bugs/pt-heartbeat>.
|
|
|
|
See also L<"BUGS"> for more information on filing bugs and getting help.
|
|
|
|
=head1 DESCRIPTION
|
|
|
|
pt-heartbeat is a two-part MySQL and PostgreSQL replication delay monitoring
|
|
system that measures delay by looking at actual replicated data. This
|
|
avoids reliance on the replication mechanism itself, which is unreliable. (For
|
|
example, C<SHOW SLAVE STATUS> on MySQL).
|
|
|
|
The first part is an L<"--update"> instance of pt-heartbeat that connects to
|
|
a master and updates a timestamp ("heartbeat record") every L<"--interval">
|
|
seconds. Since the heartbeat table may contain records from multiple
|
|
masters (see L<"MULTI-SLAVE HIERARCHY">), the server's ID (@@server_id) is
|
|
used to identify records.
|
|
|
|
The second part is a L<"--monitor"> or L<"--check"> instance of pt-heartbeat
|
|
that connects to a slave, examines the replicated heartbeat record from its
|
|
immediate master or the specified L<"--master-server-id">, and computes the
|
|
difference from the current system time. If replication between the slave and
|
|
the master is delayed or broken, the computed difference will be greater than
|
|
zero and potentially increase if L<"--monitor"> is specified.
|
|
|
|
You must either manually create the heartbeat table on the master or use
|
|
L<"--create-table">. See L<"--create-table"> for the proper heartbeat
|
|
table structure. The C<MEMORY> storage engine is suggested, but not
|
|
required of course, for MySQL.
|
|
|
|
The heartbeat table must contain a heartbeat row. By default, a heartbeat
|
|
row is inserted if it doesn't exist. This feature can be disabled with the
|
|
L<"--[no]insert-heartbeat-row"> option in case the database user does not
|
|
have INSERT privileges.
|
|
|
|
pt-heartbeat depends only on the heartbeat record being replicated to the slave,
|
|
so it works regardless of the replication mechanism (built-in replication, a
|
|
system such as Continuent Tungsten, etc). It works at any depth in the
|
|
replication hierarchy; for example, it will reliably report how far a slave lags
|
|
its master's master's master. And if replication is stopped, it will continue
|
|
to work and report (accurately!) that the slave is falling further and further
|
|
behind the master.
|
|
|
|
pt-heartbeat has a maximum resolution of 0.01 second. The clocks on the
|
|
master and slave servers must be closely synchronized via NTP. By default,
|
|
L<"--update"> checks happen on the edge of the second (e.g. 00:01) and
|
|
L<"--monitor"> checks happen halfway between seconds (e.g. 00:01.5).
|
|
As long as the servers' clocks are closely synchronized and replication
|
|
events are propagating in less than half a second, pt-heartbeat will report
|
|
zero seconds of delay.
|
|
|
|
pt-heartbeat will try to reconnect if the connection has an error, but will
|
|
not retry if it can't get a connection when it first starts.
|
|
|
|
The L<"--dbi-driver"> option lets you use pt-heartbeat to monitor PostgreSQL
|
|
as well. It is reported to work well with Slony-1 replication.
|
|
|
|
=head1 MULTI-SLAVE HIERARCHY
|
|
|
|
If the replication hierarchy has multiple slaves which are masters of
|
|
other slaves, like "master -> slave1 -> slave2", L<"--update"> instances
|
|
can be ran on the slaves as well as the master. The default heartbeat
|
|
table (see L<"--create-table">) is keyed on the C<server_id> column, so
|
|
each server will update the row where C<server_id=@@server_id>.
|
|
|
|
For L<"--monitor"> and L<"--check">, if L<"--master-server-id"> is not
|
|
specified, the tool tries to discover and use the slave's immediate master.
|
|
If this fails, or if you want monitor lag from another master, then you can
|
|
specify the L<"--master-server-id"> to use.
|
|
|
|
For example, if the replication hierarchy is "master -> slave1 -> slave2"
|
|
with corresponding server IDs 1, 2 and 3, you can:
|
|
|
|
pt-heartbeat --daemonize -D test --update -h master
|
|
pt-heartbeat --daemonize -D test --update -h slave1
|
|
|
|
Then check (or monitor) the replication delay from master to slave2:
|
|
|
|
pt-heartbeat -D test --master-server-id 1 --check slave2
|
|
|
|
Or check the replication delay from slave1 to slave2:
|
|
|
|
pt-heartbeat -D test --master-server-id 2 --check slave2
|
|
|
|
Stopping the L<"--update"> instance one slave1 will not affect the instance
|
|
on master.
|
|
|
|
=head1 MASTER AND SLAVE STATUS
|
|
|
|
The default heartbeat table (see L<"--create-table">) has columns for saving
|
|
information from C<SHOW MASTER STATUS> and C<SHOW SLAVE STATUS>. These
|
|
columns are optional. If any are present, their corresponding information
|
|
will be saved.
|
|
|
|
=head1 OPTIONS
|
|
|
|
Specify at least one of L<"--stop">, L<"--update">, L<"--monitor">, or L<"--check">.
|
|
|
|
L<"--update">, L<"--monitor">, and L<"--check"> are mutually exclusive.
|
|
|
|
L<"--daemonize"> and L<"--check"> are mutually exclusive.
|
|
|
|
This tool accepts additional command-line arguments. Refer to the
|
|
L<"SYNOPSIS"> and usage information for details.
|
|
|
|
=over
|
|
|
|
=item --ask-pass
|
|
|
|
Prompt for a password when connecting to MySQL.
|
|
|
|
=item --charset
|
|
|
|
short form: -A; type: string
|
|
|
|
Default character set. If the value is utf8, sets Perl's binmode on STDOUT to
|
|
utf8, passes the mysql_enable_utf8 option to DBD::mysql, and runs SET NAMES UTF8
|
|
after connecting to MySQL. Any other value sets binmode on STDOUT without the
|
|
utf8 layer, and runs SET NAMES after connecting to MySQL.
|
|
|
|
=item --check
|
|
|
|
Check slave delay once and exit. If you also specify L<"--recurse">, the
|
|
tool will try to discover slave's of the given slave and check and print
|
|
their lag, too. The hostname or IP and port for each slave is printed
|
|
before its delay. L<"--recurse"> only works with MySQL.
|
|
|
|
=item --config
|
|
|
|
type: Array
|
|
|
|
Read this comma-separated list of config files; if specified, this must be the
|
|
first option on the command line.
|
|
|
|
=item --create-table
|
|
|
|
Create the heartbeat L<"--table"> if it does not exist.
|
|
|
|
This option causes the table specified by L<"--database"> and L<"--table"> to
|
|
be created with the following MAGIC_create_heartbeat table definition:
|
|
|
|
CREATE TABLE heartbeat (
|
|
ts varchar(26) NOT NULL,
|
|
server_id int unsigned NOT NULL PRIMARY KEY,
|
|
file varchar(255) DEFAULT NULL, -- SHOW MASTER STATUS
|
|
position bigint unsigned DEFAULT NULL, -- SHOW MASTER STATUS
|
|
relay_master_log_file varchar(255) DEFAULT NULL, -- SHOW SLAVE STATUS
|
|
exec_master_log_pos bigint unsigned DEFAULT NULL -- SHOW SLAVE STATUS
|
|
);
|
|
|
|
The heartbeat table requires at least one row. If you manually create the
|
|
heartbeat table, then you must insert a row by doing:
|
|
|
|
INSERT INTO heartbeat (ts, server_id) VALUES (NOW(), N);
|
|
|
|
where C<N> is the server's ID; do not use @@server_id because it will replicate
|
|
and slaves will insert their own server ID instead of the master's server ID.
|
|
|
|
This is done automatically by L<"--create-table">.
|
|
|
|
A legacy version of the heartbeat table is still supported:
|
|
|
|
CREATE TABLE heartbeat (
|
|
id int NOT NULL PRIMARY KEY,
|
|
ts datetime NOT NULL
|
|
);
|
|
|
|
Legacy tables do not support L<"--update"> instances on each slave
|
|
of a multi-slave hierarchy like "master -> slave1 -> slave2".
|
|
To manually insert the one required row into a legacy table:
|
|
|
|
INSERT INTO heartbeat (id, ts) VALUES (1, NOW());
|
|
|
|
The tool automatically detects if the heartbeat table is legacy.
|
|
|
|
See also L<"MULTI-SLAVE HIERARCHY">.
|
|
|
|
=item --daemonize
|
|
|
|
Fork to the background and detach from the shell. POSIX operating systems only.
|
|
|
|
=item --database
|
|
|
|
short form: -D; type: string
|
|
|
|
The database to use for the connection.
|
|
|
|
=item --dbi-driver
|
|
|
|
default: mysql; type: string
|
|
|
|
Specify a driver for the connection; C<mysql> and C<Pg> are supported.
|
|
|
|
=item --defaults-file
|
|
|
|
short form: -F; type: string
|
|
|
|
Only read mysql options from the given file. You must give an absolute
|
|
pathname.
|
|
|
|
=item --file
|
|
|
|
type: string
|
|
|
|
Print latest L<"--monitor"> output to this file.
|
|
|
|
When L<"--monitor"> is given, prints output to the specified file instead of to
|
|
STDOUT. The file is opened, truncated, and closed every interval, so it will
|
|
only contain the most recent statistics. Useful when L<"--daemonize"> is given.
|
|
|
|
=item --frames
|
|
|
|
type: string; default: 1m,5m,15m
|
|
|
|
Timeframes for averages.
|
|
|
|
Specifies the timeframes over which to calculate moving averages when
|
|
L<"--monitor"> is given. Specify as a comma-separated list of numbers with
|
|
suffixes. The suffix can be s for seconds, m for minutes, h for hours, or d for
|
|
days. The size of the largest frame determines the maximum memory usage, as up
|
|
to the specified number of per-second samples are kept in memory to calculate
|
|
the averages. You can specify as many timeframes as you like.
|
|
|
|
=item --help
|
|
|
|
Show help and exit.
|
|
|
|
=item --host
|
|
|
|
short form: -h; type: string
|
|
|
|
Connect to host.
|
|
|
|
=item --[no]insert-heartbeat-row
|
|
|
|
default: yes
|
|
|
|
Insert a heartbeat row in the L<"--table"> if one doesn't exist.
|
|
|
|
The heartbeat L<"--table"> requires a heartbeat row, else there's nothing
|
|
to L<"--update">, L<"--monitor">, or L<"--check">! By default, the tool will
|
|
insert a heartbeat row if one is not already present. You can disable this
|
|
feature by specifying C<--no-insert-heartbeat-row> in case the database user
|
|
does not have INSERT privileges.
|
|
|
|
=item --interval
|
|
|
|
type: float; default: 1.0
|
|
|
|
How often to update or check the heartbeat L<"--table">. Updates and checks
|
|
begin on the first whole second then repeat every L<"--interval"> seconds
|
|
for L<"--update"> and every L<"--interval"> plus L<"--skew"> seconds for
|
|
L<"--monitor">.
|
|
|
|
For example, if at 00:00.4 an L<"--update"> instance is started at 0.5 second
|
|
intervals, the first update happens at 00:01.0, the next at 00:01.5, etc.
|
|
If at 00:10.7 a L<"--monitor"> instance is started at 0.05 second intervals
|
|
with the default 0.5 second L<"--skew">, then the first check happens at
|
|
00:11.5 (00:11.0 + 0.5) which will be L<"--skew"> seconds after the last update
|
|
which, because the instances are checking at synchronized intervals, happened
|
|
at 00:11.0.
|
|
|
|
The tool waits for and begins on the first whole second just to make the
|
|
interval calculations simpler. Therefore, the tool could wait up to 1 second
|
|
before updating or checking.
|
|
|
|
The minimum (fastest) interval is 0.01, and the maximum precision is two
|
|
decimal places, so 0.015 will be rounded to 0.02.
|
|
|
|
If a legacy heartbeat table (see L<"--create-table">) is used, then the
|
|
maximum precision is 1s because the C<ts> column is type C<datetime>.
|
|
|
|
=item --log
|
|
|
|
type: string
|
|
|
|
Print all output to this file when daemonized.
|
|
|
|
=item --master-server-id
|
|
|
|
type: string
|
|
|
|
Calculate delay from this master server ID for L<"--monitor"> or L<"--check">.
|
|
If not given, pt-heartbeat attempts to connect to the server's master and
|
|
determine its server id.
|
|
|
|
=item --monitor
|
|
|
|
Monitor slave delay continuously.
|
|
|
|
Specifies that pt-heartbeat should check the slave's delay every second and
|
|
report to STDOUT (or if L<"--file"> is given, to the file instead). The output
|
|
is the current delay followed by moving averages over the timeframe given in
|
|
L<"--frames">. For example,
|
|
|
|
5s [ 0.25s, 0.05s, 0.02s ]
|
|
|
|
=item --password
|
|
|
|
short form: -p; type: string
|
|
|
|
Password to use when connecting.
|
|
|
|
=item --pid
|
|
|
|
type: string
|
|
|
|
Create the given PID file when daemonized. The file contains the process ID of
|
|
the daemonized instance. The PID file is removed when the daemonized instance
|
|
exits. The program checks for the existence of the PID file when starting; if
|
|
it exists and the process with the matching PID exists, the program exits.
|
|
|
|
=item --port
|
|
|
|
short form: -P; type: int
|
|
|
|
Port number to use for connection.
|
|
|
|
=item --print-master-server-id
|
|
|
|
Print the auto-detected or given L<"--master-server-id">. If L<"--check">
|
|
or L<"--monitor"> is specified, specifying this option will print the
|
|
auto-detected or given L<"--master-server-id"> at the end of each line.
|
|
|
|
=item --recurse
|
|
|
|
type: int
|
|
|
|
Check slaves recursively to this depth in L<"--check"> mode.
|
|
|
|
Try to discover slave servers recursively, to the specified depth. After
|
|
discovering servers, run the check on each one of them and print the hostname
|
|
(if possible), followed by the slave delay.
|
|
|
|
This currently works only with MySQL. See L<"--recursion-method">.
|
|
|
|
=item --recursion-method
|
|
|
|
type: string
|
|
|
|
Preferred recursion method used to find slaves.
|
|
|
|
Possible methods are:
|
|
|
|
METHOD USES
|
|
=========== ================
|
|
processlist SHOW PROCESSLIST
|
|
hosts SHOW SLAVE HOSTS
|
|
|
|
The processlist method is preferred because SHOW SLAVE HOSTS is not reliable.
|
|
However, the hosts method is required if the server uses a non-standard
|
|
port (not 3306). Usually pt-heartbeat does the right thing and finds
|
|
the slaves, but you may give a preferred method and it will be used first.
|
|
If it doesn't find any slaves, the other methods will be tried.
|
|
|
|
=item --replace
|
|
|
|
Use C<REPLACE> instead of C<UPDATE> for --update.
|
|
|
|
When running in L<"--update"> mode, use C<REPLACE> instead of C<UPDATE> to set
|
|
the heartbeat table's timestamp. The C<REPLACE> statement is a MySQL extension
|
|
to SQL. This option is useful when you don't know whether the table contains
|
|
any rows or not. It must be used in conjunction with --update.
|
|
|
|
=item --run-time
|
|
|
|
type: time
|
|
|
|
Time to run before exiting.
|
|
|
|
=item --sentinel
|
|
|
|
type: string; default: /tmp/pt-heartbeat-sentinel
|
|
|
|
Exit if this file exists.
|
|
|
|
=item --set-vars
|
|
|
|
type: string; default: wait_timeout=10000
|
|
|
|
Set these MySQL variables. Immediately after connecting to MySQL, this string
|
|
will be appended to SET and executed.
|
|
|
|
=item --skew
|
|
|
|
type: float; default: 0.5
|
|
|
|
How long to delay checks.
|
|
|
|
The default is to delay checks one half second. Since the update happens as
|
|
soon as possible after the beginning of the second on the master, this allows
|
|
one half second of replication delay before reporting that the slave lags the
|
|
master by one second. If your clocks are not completely accurate or there is
|
|
some other reason you'd like to delay the slave more or less, you can tweak this
|
|
value. Try setting the C<MKDEBUG> environment variable to see the effect this
|
|
has.
|
|
|
|
=item --socket
|
|
|
|
short form: -S; type: string
|
|
|
|
Socket file to use for connection.
|
|
|
|
=item --stop
|
|
|
|
Stop running instances by creating the sentinel file.
|
|
|
|
This should have the effect of stopping all running
|
|
instances which are watching the same sentinel file. If none of
|
|
L<"--update">, L<"--monitor"> or L<"--check"> is specified, C<pt-heartbeat>
|
|
will exit after creating the file. If one of these is specified,
|
|
C<pt-heartbeat> will wait the interval given by L<"--interval">, then remove
|
|
the file and continue working.
|
|
|
|
You might find this handy to stop cron jobs gracefully if necessary, or to
|
|
replace one running instance with another. For example, if you want to stop
|
|
and restart C<pt-heartbeat> every hour (just to make sure that it is restarted
|
|
every hour, in case of a server crash or some other problem), you could use a
|
|
C<crontab> line like this:
|
|
|
|
0 * * * * pt-heartbeat --update -D test --stop \
|
|
--sentinel /tmp/pt-heartbeat-hourly
|
|
|
|
The non-default L<"--sentinel"> will make sure the hourly C<cron> job stops
|
|
only instances previously started with the same options (that is, from the
|
|
same C<cron> job).
|
|
|
|
See also L<"--sentinel">.
|
|
|
|
=item --table
|
|
|
|
type: string; default: heartbeat
|
|
|
|
The table to use for the heartbeat.
|
|
|
|
Don't specify database.table; use L<"--database"> to specify the database.
|
|
|
|
See L<"--create-table">.
|
|
|
|
=item --update
|
|
|
|
Update a master's heartbeat.
|
|
|
|
=item --user
|
|
|
|
short form: -u; type: string
|
|
|
|
User for login if not current user.
|
|
|
|
=item --version
|
|
|
|
Show version and exit.
|
|
|
|
=back
|
|
|
|
=head1 DSN OPTIONS
|
|
|
|
These DSN options are used to create a DSN. Each option is given like
|
|
C<option=value>. The options are case-sensitive, so P and p are not the
|
|
same option. There cannot be whitespace before or after the C<=> and
|
|
if the value contains whitespace it must be quoted. DSN options are
|
|
comma-separated. See the L<percona-toolkit> manpage for full details.
|
|
|
|
=over
|
|
|
|
=item * A
|
|
|
|
dsn: charset; copy: yes
|
|
|
|
Default character set.
|
|
|
|
=item * D
|
|
|
|
dsn: database; copy: yes
|
|
|
|
Default database.
|
|
|
|
=item * F
|
|
|
|
dsn: mysql_read_default_file; copy: yes
|
|
|
|
Only read default options from the given file
|
|
|
|
=item * h
|
|
|
|
dsn: host; copy: yes
|
|
|
|
Connect to host.
|
|
|
|
=item * p
|
|
|
|
dsn: password; copy: yes
|
|
|
|
Password to use when connecting.
|
|
|
|
=item * P
|
|
|
|
dsn: port; copy: yes
|
|
|
|
Port number to use for connection.
|
|
|
|
=item * S
|
|
|
|
dsn: mysql_socket; copy: yes
|
|
|
|
Socket file to use for connection.
|
|
|
|
=item * u
|
|
|
|
dsn: user; copy: yes
|
|
|
|
User for login if not current user.
|
|
|
|
=back
|
|
|
|
=head1 ENVIRONMENT
|
|
|
|
The environment variable C<PTDEBUG> enables verbose debugging output to STDERR.
|
|
To enable debugging and capture all output to a file, run the tool like:
|
|
|
|
PTDEBUG=1 pt-heartbeat ... > FILE 2>&1
|
|
|
|
Be careful: debugging output is voluminous and can generate several megabytes
|
|
of output.
|
|
|
|
=head1 SYSTEM REQUIREMENTS
|
|
|
|
You need Perl, DBI, DBD::mysql, and some core packages that ought to be
|
|
installed in any reasonably new version of Perl.
|
|
|
|
=head1 BUGS
|
|
|
|
For a list of known bugs, see L<http://www.percona.com/bugs/pt-heartbeat>.
|
|
|
|
Please report bugs at L<https://bugs.launchpad.net/percona-toolkit>.
|
|
Include the following information in your bug report:
|
|
|
|
=over
|
|
|
|
=item * Complete command-line used to run the tool
|
|
|
|
=item * Tool L<"--version">
|
|
|
|
=item * MySQL version of all servers involved
|
|
|
|
=item * Output from the tool including STDERR
|
|
|
|
=item * Input files (log/dump/config files, etc.)
|
|
|
|
=back
|
|
|
|
If possible, include debugging output by running the tool with C<PTDEBUG>;
|
|
see L<"ENVIRONMENT">.
|
|
|
|
=head1 DOWNLOADING
|
|
|
|
Visit L<http://www.percona.com/software/percona-toolkit/> to download the
|
|
latest release of Percona Toolkit. Or, get the latest release from the
|
|
command line:
|
|
|
|
wget percona.com/get/percona-toolkit.tar.gz
|
|
|
|
wget percona.com/get/percona-toolkit.rpm
|
|
|
|
wget percona.com/get/percona-toolkit.deb
|
|
|
|
You can also get individual tools from the latest release:
|
|
|
|
wget percona.com/get/TOOL
|
|
|
|
Replace C<TOOL> with the name of any tool.
|
|
|
|
=head1 AUTHORS
|
|
|
|
Proven Scaling LLC, SixApart Ltd, Baron Schwartz, and Daniel Nichter
|
|
|
|
=head1 ABOUT PERCONA TOOLKIT
|
|
|
|
This tool is part of Percona Toolkit, a collection of advanced command-line
|
|
tools developed by Percona for MySQL support and consulting. Percona Toolkit
|
|
was forked from two projects in June, 2011: Maatkit and Aspersa. Those
|
|
projects were created by Baron Schwartz and developed primarily by him and
|
|
Daniel Nichter, both of whom are employed by Percona. Visit
|
|
L<http://www.percona.com/software/> for more software developed by Percona.
|
|
|
|
=head1 COPYRIGHT, LICENSE, AND WARRANTY
|
|
|
|
This program is copyright 2006 Proven Scaling LLC and Six Apart Ltd,
|
|
2007-2011 Percona Inc.
|
|
Feedback and improvements are welcome.
|
|
|
|
Feedback and improvements are welcome.
|
|
|
|
THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED
|
|
WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
|
|
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
|
|
|
|
This program is free software; you can redistribute it and/or modify it under
|
|
the terms of the GNU General Public License as published by the Free Software
|
|
Foundation, version 2; OR the Perl Artistic License. On UNIX and similar
|
|
systems, you can issue `man perlgpl' or `man perlartistic' to read these
|
|
licenses.
|
|
|
|
You should have received a copy of the GNU General Public License along with
|
|
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
|
Place, Suite 330, Boston, MA 02111-1307 USA.
|
|
|
|
=head1 VERSION
|
|
|
|
pt-heartbeat 1.0.1
|
|
|
|
=cut
|