mirror of
https://github.com/percona/percona-toolkit.git
synced 2025-09-09 18:30:16 +00:00
Merge pt-osc-pxc-tests.
This commit is contained in:
@@ -37,6 +37,7 @@ BEGIN {
|
|||||||
VersionCheck
|
VersionCheck
|
||||||
HTTPMicro
|
HTTPMicro
|
||||||
Pingback
|
Pingback
|
||||||
|
Percona::XtraDB::Cluster
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -5021,6 +5022,7 @@ sub new {
|
|||||||
: join(', ', map { $q->quote($_) } @cols))
|
: join(', ', map { $q->quote($_) } @cols))
|
||||||
. " FROM $tbl->{name}"
|
. " FROM $tbl->{name}"
|
||||||
. ($where ? " WHERE $where" : '')
|
. ($where ? " WHERE $where" : '')
|
||||||
|
. ($args{lock_in_share_mode} ? " LOCK IN SHARE MODE" : "")
|
||||||
. " /*$comments{bite}*/";
|
. " /*$comments{bite}*/";
|
||||||
PTDEBUG && _d('One nibble statement:', $nibble_sql);
|
PTDEBUG && _d('One nibble statement:', $nibble_sql);
|
||||||
|
|
||||||
@@ -5030,6 +5032,7 @@ sub new {
|
|||||||
: join(', ', map { $q->quote($_) } @cols))
|
: join(', ', map { $q->quote($_) } @cols))
|
||||||
. " FROM $tbl->{name}"
|
. " FROM $tbl->{name}"
|
||||||
. ($where ? " WHERE $where" : '')
|
. ($where ? " WHERE $where" : '')
|
||||||
|
. ($args{lock_in_share_mode} ? " LOCK IN SHARE MODE" : "")
|
||||||
. " /*explain $comments{bite}*/";
|
. " /*explain $comments{bite}*/";
|
||||||
PTDEBUG && _d('Explain one nibble statement:', $explain_nibble_sql);
|
PTDEBUG && _d('Explain one nibble statement:', $explain_nibble_sql);
|
||||||
|
|
||||||
@@ -5113,6 +5116,7 @@ sub new {
|
|||||||
. " AND " . $asc->{boundaries}->{'<='} # upper boundary
|
. " AND " . $asc->{boundaries}->{'<='} # upper boundary
|
||||||
. ($where ? " AND ($where)" : '')
|
. ($where ? " AND ($where)" : '')
|
||||||
. ($args{order_by} ? " ORDER BY $order_by" : "")
|
. ($args{order_by} ? " ORDER BY $order_by" : "")
|
||||||
|
. ($args{lock_in_share_mode} ? " LOCK IN SHARE MODE" : "")
|
||||||
. " /*$comments{nibble}*/";
|
. " /*$comments{nibble}*/";
|
||||||
PTDEBUG && _d('Nibble statement:', $nibble_sql);
|
PTDEBUG && _d('Nibble statement:', $nibble_sql);
|
||||||
|
|
||||||
@@ -5125,6 +5129,7 @@ sub new {
|
|||||||
. " AND " . $asc->{boundaries}->{'<='} # upper boundary
|
. " AND " . $asc->{boundaries}->{'<='} # upper boundary
|
||||||
. ($where ? " AND ($where)" : '')
|
. ($where ? " AND ($where)" : '')
|
||||||
. ($args{order_by} ? " ORDER BY $order_by" : "")
|
. ($args{order_by} ? " ORDER BY $order_by" : "")
|
||||||
|
. ($args{lock_in_share_mode} ? " LOCK IN SHARE MODE" : "")
|
||||||
. " /*explain $comments{nibble}*/";
|
. " /*explain $comments{nibble}*/";
|
||||||
PTDEBUG && _d('Explain nibble statement:', $explain_nibble_sql);
|
PTDEBUG && _d('Explain nibble statement:', $explain_nibble_sql);
|
||||||
|
|
||||||
@@ -7529,6 +7534,83 @@ sub _d {
|
|||||||
# End Pingback package
|
# End Pingback package
|
||||||
# ###########################################################################
|
# ###########################################################################
|
||||||
|
|
||||||
|
# ###########################################################################
|
||||||
|
# Percona::XtraDB::Cluster package
|
||||||
|
# This package is a copy without comments from the original. The original
|
||||||
|
# with comments and its test file can be found in the Bazaar repository at,
|
||||||
|
# lib/Percona/XtraDB/Cluster.pm
|
||||||
|
# t/lib/Percona/XtraDB/Cluster.t
|
||||||
|
# See https://launchpad.net/percona-toolkit for more information.
|
||||||
|
# ###########################################################################
|
||||||
|
{
|
||||||
|
package Percona::XtraDB::Cluster;
|
||||||
|
|
||||||
|
use strict;
|
||||||
|
use warnings FATAL => 'all';
|
||||||
|
use English qw(-no_match_vars);
|
||||||
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
||||||
|
|
||||||
|
use Mo;
|
||||||
|
use Data::Dumper;
|
||||||
|
|
||||||
|
sub get_cluster_name {
|
||||||
|
my ($self, $cxn) = @_;
|
||||||
|
my $sql = "SHOW VARIABLES LIKE 'wsrep\_cluster\_name'";
|
||||||
|
PTDEBUG && _d($cxn->name, $sql);
|
||||||
|
my (undef, $cluster_name) = $cxn->dbh->selectrow_array($sql);
|
||||||
|
return $cluster_name;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub is_cluster_node {
|
||||||
|
my ($self, $cxn) = @_;
|
||||||
|
|
||||||
|
my $sql = "SHOW VARIABLES LIKE 'wsrep\_on'";
|
||||||
|
PTDEBUG && _d($cxn->name, $sql);
|
||||||
|
my $row = $cxn->dbh->selectrow_arrayref($sql);
|
||||||
|
PTDEBUG && _d(Dumper($row));
|
||||||
|
return unless $row && $row->[1] && ($row->[1] eq 'ON' || $row->[1] eq '1');
|
||||||
|
|
||||||
|
my $cluster_name = $self->get_cluster_name($cxn);
|
||||||
|
return $cluster_name;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub same_node {
|
||||||
|
my ($self, $cxn1, $cxn2) = @_;
|
||||||
|
|
||||||
|
my $sql = "SHOW VARIABLES LIKE 'wsrep\_sst\_receive\_address'";
|
||||||
|
PTDEBUG && _d($cxn1->name, $sql);
|
||||||
|
my (undef, $val1) = $cxn1->dbh->selectrow_array($sql);
|
||||||
|
PTDEBUG && _d($cxn2->name, $sql);
|
||||||
|
my (undef, $val2) = $cxn2->dbh->selectrow_array($sql);
|
||||||
|
|
||||||
|
return ($val1 || '') eq ($val2 || '');
|
||||||
|
}
|
||||||
|
|
||||||
|
sub same_cluster {
|
||||||
|
my ($self, $cxn1, $cxn2) = @_;
|
||||||
|
|
||||||
|
return 0 if !$self->is_cluster_node($cxn1) || !$self->is_cluster_node($cxn2);
|
||||||
|
|
||||||
|
my $cluster1 = $self->get_cluster_name($cxn1);
|
||||||
|
my $cluster2 = $self->get_cluster_name($cxn2);
|
||||||
|
|
||||||
|
return ($cluster1 || '') eq ($cluster2 || '');
|
||||||
|
}
|
||||||
|
|
||||||
|
sub _d {
|
||||||
|
my ($package, undef, $line) = caller 0;
|
||||||
|
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
|
||||||
|
map { defined $_ ? $_ : 'undef' }
|
||||||
|
@_;
|
||||||
|
print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
1;
|
||||||
|
}
|
||||||
|
# ###########################################################################
|
||||||
|
# End Percona::XtraDB::Cluster package
|
||||||
|
# ###########################################################################
|
||||||
|
|
||||||
# ###########################################################################
|
# ###########################################################################
|
||||||
# This is a combination of modules and programs in one -- a runnable module.
|
# This is a combination of modules and programs in one -- a runnable module.
|
||||||
# http://www.perl.com/pub/a/2006/07/13/lightning-articles.html?page=last
|
# http://www.perl.com/pub/a/2006/07/13/lightning-articles.html?page=last
|
||||||
@@ -7744,6 +7826,35 @@ sub main {
|
|||||||
|
|
||||||
my $cxn = $make_cxn->(dsn => $dsn);
|
my $cxn = $make_cxn->(dsn => $dsn);
|
||||||
|
|
||||||
|
my $cluster = Percona::XtraDB::Cluster->new;
|
||||||
|
if ( $cluster->is_cluster_node($cxn) ) {
|
||||||
|
# Because of https://bugs.launchpad.net/codership-mysql/+bug/1040108
|
||||||
|
# ptc and pt-osc check Threads_running by default for --max-load.
|
||||||
|
# Strictly speaking, they can run on 5.5.27 as long as that bug doesn't
|
||||||
|
# manifest itself. If it does, however, then the tools will wait forever.
|
||||||
|
my $pxc_version = VersionParser->new($cxn->dbh);
|
||||||
|
if ( $pxc_version < '5.5.28' ) {
|
||||||
|
die "Percona XtraDB Cluster 5.5.28 or newer is required to run "
|
||||||
|
. "this tool on a cluster, but node " . $cxn->name
|
||||||
|
. " is running version " . $pxc_version->version
|
||||||
|
. ". Please upgrade the node, or run the tool on a newer node, "
|
||||||
|
. "or contact Percona for support.\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
# If wsrep_OSU_method=RSU the "DDL will be only processed locally at
|
||||||
|
# the node." So _table_new (the altered version of table) will not
|
||||||
|
# replicate to other nodes but our INSERT..SELECT operations on it
|
||||||
|
# will, thereby crashing all other nodes.
|
||||||
|
my (undef, $wsrep_osu_method) = $cxn->dbh->selectrow_array(
|
||||||
|
"SHOW VARIABLES LIKE 'wsrep\_OSU\_method'");
|
||||||
|
if ( lc($wsrep_osu_method || '') ne 'toi' ) {
|
||||||
|
die "wsrep_OSU_method=TOI is required because "
|
||||||
|
. $cxn->name . " is a cluster node. wsrep_OSU_method is "
|
||||||
|
. "currently set to " . ($wsrep_osu_method || '') . ". "
|
||||||
|
. "Set it to TOI, or contact Percona for support.\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
# ########################################################################
|
# ########################################################################
|
||||||
# Check if MySQL is new enough to have the triggers we need.
|
# Check if MySQL is new enough to have the triggers we need.
|
||||||
# Although triggers were introduced in 5.0.2, "Prior to MySQL 5.0.10,
|
# Although triggers were introduced in 5.0.2, "Prior to MySQL 5.0.10,
|
||||||
@@ -7934,7 +8045,7 @@ sub main {
|
|||||||
# ########################################################################
|
# ########################################################################
|
||||||
# Setup and check the original table.
|
# Setup and check the original table.
|
||||||
# ########################################################################
|
# ########################################################################
|
||||||
my $tp = new TableParser(Quoter => $q);
|
my $tp = TableParser->new(Quoter => $q);
|
||||||
|
|
||||||
# Common table data struct (that modules like NibbleIterator expect).
|
# Common table data struct (that modules like NibbleIterator expect).
|
||||||
my $orig_tbl = {
|
my $orig_tbl = {
|
||||||
@@ -8092,6 +8203,35 @@ sub main {
|
|||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# ########################################################################
|
||||||
|
# Check the --alter statement.
|
||||||
|
# ########################################################################
|
||||||
|
my $renamed_cols = {};
|
||||||
|
if ( my $alter = $o->get('alter') ) {
|
||||||
|
$renamed_cols = find_renamed_cols(
|
||||||
|
alter => $o->get('alter'),
|
||||||
|
TableParser => $tp,
|
||||||
|
);
|
||||||
|
|
||||||
|
if ( $o->get('check-alter') ) {
|
||||||
|
check_alter(
|
||||||
|
tbl => $orig_tbl,
|
||||||
|
alter => $alter,
|
||||||
|
dry_run => $o->get('dry-run'),
|
||||||
|
renamed_cols => $renamed_cols,
|
||||||
|
Cxn => $cxn,
|
||||||
|
TableParser => $tp,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( %$renamed_cols && !$o->get('dry-run') ) {
|
||||||
|
print "Renaming columns:\n"
|
||||||
|
. join("\n", map { " $_ to $renamed_cols->{$_}" }
|
||||||
|
sort keys %$renamed_cols)
|
||||||
|
. "\n";
|
||||||
|
}
|
||||||
|
|
||||||
# ########################################################################
|
# ########################################################################
|
||||||
# Check and create PID file if user specified --pid.
|
# Check and create PID file if user specified --pid.
|
||||||
# ########################################################################
|
# ########################################################################
|
||||||
@@ -8179,8 +8319,6 @@ sub main {
|
|||||||
# Step 2: Alter the new, empty table. This should be very quick,
|
# Step 2: Alter the new, empty table. This should be very quick,
|
||||||
# or die if the user specified a bad alter statement.
|
# or die if the user specified a bad alter statement.
|
||||||
# #####################################################################
|
# #####################################################################
|
||||||
my %renamed_cols;
|
|
||||||
|
|
||||||
if ( my $alter = $o->get('alter') ) {
|
if ( my $alter = $o->get('alter') ) {
|
||||||
print "Altering new table...\n";
|
print "Altering new table...\n";
|
||||||
my $sql = "ALTER TABLE $new_tbl->{name} $alter";
|
my $sql = "ALTER TABLE $new_tbl->{name} $alter";
|
||||||
@@ -8193,27 +8331,6 @@ sub main {
|
|||||||
die "Error altering new table $new_tbl->{name}: $EVAL_ERROR\n"
|
die "Error altering new table $new_tbl->{name}: $EVAL_ERROR\n"
|
||||||
}
|
}
|
||||||
print "Altered $new_tbl->{name} OK.\n";
|
print "Altered $new_tbl->{name} OK.\n";
|
||||||
|
|
||||||
# Check for renamed columns.
|
|
||||||
# https://bugs.launchpad.net/percona-toolkit/+bug/1068562
|
|
||||||
%renamed_cols = find_renamed_cols($alter, $tp);
|
|
||||||
PTDEBUG && _d("Renamed columns (old => new): ", Dumper(\%renamed_cols));
|
|
||||||
if ( %renamed_cols && $o->get('check-alter') ) {
|
|
||||||
# sort is just for making output consistent for testing
|
|
||||||
my $msg = "--alter appears to rename these columns: "
|
|
||||||
. join(", ", map { "$_ to $renamed_cols{$_}" }
|
|
||||||
sort keys %renamed_cols);
|
|
||||||
if ( $o->get('dry-run') ) {
|
|
||||||
print $msg . "\n"
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
die $msg
|
|
||||||
. ". The tool should handle this correctly, but you should "
|
|
||||||
. "test it first because if it fails the renamed columns' "
|
|
||||||
. "data will be lost! Specify --no-check-alter to disable "
|
|
||||||
. "this check and perform the --alter.\n";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Get the new table struct. This shouldn't die because
|
# Get the new table struct. This shouldn't die because
|
||||||
@@ -8236,9 +8353,9 @@ sub main {
|
|||||||
my $col_posn = $orig_tbl->{tbl_struct}->{col_posn};
|
my $col_posn = $orig_tbl->{tbl_struct}->{col_posn};
|
||||||
my $orig_cols = $orig_tbl->{tbl_struct}->{is_col};
|
my $orig_cols = $orig_tbl->{tbl_struct}->{is_col};
|
||||||
my $new_cols = $new_tbl->{tbl_struct}->{is_col};
|
my $new_cols = $new_tbl->{tbl_struct}->{is_col};
|
||||||
my @common_cols = map { +{ old => $_, new => $renamed_cols{$_} || $_ } }
|
my @common_cols = map { +{ old => $_, new => $renamed_cols->{$_} || $_ } }
|
||||||
sort { $col_posn->{$a} <=> $col_posn->{$b} }
|
sort { $col_posn->{$a} <=> $col_posn->{$b} }
|
||||||
grep { $new_cols->{$_} || $renamed_cols{$_} }
|
grep { $new_cols->{$_} || $renamed_cols->{$_} }
|
||||||
keys %$orig_cols;
|
keys %$orig_cols;
|
||||||
PTDEBUG && _d('Common columns', Dumper(\@common_cols));
|
PTDEBUG && _d('Common columns', Dumper(\@common_cols));
|
||||||
|
|
||||||
@@ -8581,6 +8698,7 @@ sub main {
|
|||||||
dml => $dml,
|
dml => $dml,
|
||||||
select => $select,
|
select => $select,
|
||||||
callbacks => $callbacks,
|
callbacks => $callbacks,
|
||||||
|
lock_in_share_mode => 1,
|
||||||
OptionParser => $o,
|
OptionParser => $o,
|
||||||
Quoter => $q,
|
Quoter => $q,
|
||||||
TableParser => $tp,
|
TableParser => $tp,
|
||||||
@@ -8794,8 +8912,75 @@ sub main {
|
|||||||
# Subroutines.
|
# Subroutines.
|
||||||
# ############################################################################
|
# ############################################################################
|
||||||
|
|
||||||
|
sub check_alter {
|
||||||
|
my (%args) = @_;
|
||||||
|
my @required_args = qw(alter tbl dry_run Cxn TableParser);
|
||||||
|
foreach my $arg ( @required_args ) {
|
||||||
|
die "I need a $arg argument" unless exists $args{$arg};
|
||||||
|
}
|
||||||
|
my ($alter, $tbl, $dry_run, $cxn, $tp) = @args{@required_args};
|
||||||
|
|
||||||
|
my $ok = 1;
|
||||||
|
|
||||||
|
# ########################################################################
|
||||||
|
# Check for renamed columns.
|
||||||
|
# https://bugs.launchpad.net/percona-toolkit/+bug/1068562
|
||||||
|
# ########################################################################
|
||||||
|
my $renamed_cols = $args{renamed_cols};
|
||||||
|
if ( %$renamed_cols ) {
|
||||||
|
# sort is just for making output consistent for testing
|
||||||
|
my $msg = "--alter appears to rename these columns:\n"
|
||||||
|
. join("\n", map { " $_ to $renamed_cols->{$_}" }
|
||||||
|
sort keys %$renamed_cols)
|
||||||
|
. "\n";
|
||||||
|
if ( $dry_run ) {
|
||||||
|
print $msg;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
$ok = 0;
|
||||||
|
warn $msg
|
||||||
|
. "The tool should handle this correctly, but you should "
|
||||||
|
. "test it first because if it fails the renamed columns' "
|
||||||
|
. "data will be lost! Specify --no-check-alter to disable "
|
||||||
|
. "this check and perform the --alter.\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# ########################################################################
|
||||||
|
# If it's a cluster node, check for MyISAM which does not work.
|
||||||
|
# ########################################################################
|
||||||
|
my $cluster = Percona::XtraDB::Cluster->new;
|
||||||
|
if ( $cluster->is_cluster_node($cxn) ) {
|
||||||
|
if ( ($tbl->{tbl_struct}->{engine} || '') =~ m/MyISAM/i ) {
|
||||||
|
$ok = 0;
|
||||||
|
warn $cxn->name . " is a cluster node and the table is MyISAM, "
|
||||||
|
. "but MyISAM tables "
|
||||||
|
. "do not work with clusters and this tool. To alter the "
|
||||||
|
. "table, you must manually convert it to InnoDB first.\n";
|
||||||
|
}
|
||||||
|
elsif ( $alter =~ m/ENGINE=MyISAM/i ) {
|
||||||
|
$ok = 0;
|
||||||
|
warn $cxn->name . " is a cluster node and the table is being "
|
||||||
|
. "converted to MyISAM (ENGINE=MyISAM), but MyISAM tables "
|
||||||
|
. "do not work with clusters and this tool. To alter the "
|
||||||
|
. "table, you must manually convert it to InnoDB first.\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( !$ok ) {
|
||||||
|
die "--check-alter failed.\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
sub find_renamed_cols {
|
sub find_renamed_cols {
|
||||||
my ($alter, $tp) = @_;
|
my (%args) = @_;
|
||||||
|
my @required_args = qw(alter TableParser);
|
||||||
|
foreach my $arg ( @required_args ) {
|
||||||
|
die "I need a $arg argument" unless $args{$arg};
|
||||||
|
}
|
||||||
|
my ($alter, $tp) = @args{@required_args};
|
||||||
|
|
||||||
my $unquoted_ident = qr/
|
my $unquoted_ident = qr/
|
||||||
(?!\p{Digit}+[.\s]) # Not all digits
|
(?!\p{Digit}+[.\s]) # Not all digits
|
||||||
@@ -8810,12 +8995,12 @@ sub find_renamed_cols {
|
|||||||
# The following alternation is there because something like (?<=.)
|
# The following alternation is there because something like (?<=.)
|
||||||
# would match if this regex was used like /.$re/,
|
# would match if this regex was used like /.$re/,
|
||||||
# or even more tellingly, would match on "``" =~ /`$re`/
|
# or even more tellingly, would match on "``" =~ /`$re`/
|
||||||
$quoted_ident_character+ # One or more characters
|
$quoted_ident_character+ # One or more characters
|
||||||
(?: `` $quoted_ident_character* )* # possibly followed by `` and
|
(?:``$quoted_ident_character*)* # possibly followed by `` and
|
||||||
# more characters, zero or more times
|
# more characters, zero or more times
|
||||||
| $quoted_ident_character* # OR, zero or more characters
|
|$quoted_ident_character* # OR, zero or more characters
|
||||||
(?: `` $quoted_ident_character* )+ # Followed by `` and maybe more
|
(?:``$quoted_ident_character* )+ # Followed by `` and maybe more
|
||||||
# characters, one or more times.
|
# characters, one or more times.
|
||||||
}x
|
}x
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -8838,7 +9023,8 @@ sub find_renamed_cols {
|
|||||||
next if lc($orig_tbl) eq lc($new_tbl);
|
next if lc($orig_tbl) eq lc($new_tbl);
|
||||||
$renames{$orig_tbl} = $new_tbl;
|
$renames{$orig_tbl} = $new_tbl;
|
||||||
}
|
}
|
||||||
return %renames;
|
PTDEBUG && _d("Renamed columns (old => new): ", Dumper(\%renames));
|
||||||
|
return \%renames;
|
||||||
}
|
}
|
||||||
|
|
||||||
sub nibble_is_safe {
|
sub nibble_is_safe {
|
||||||
@@ -9819,8 +10005,21 @@ transactions. See L<"--lock-wait-timeout"> for details.
|
|||||||
The tool refuses to alter the table if foreign key constraints reference it,
|
The tool refuses to alter the table if foreign key constraints reference it,
|
||||||
unless you specify L<"--alter-foreign-keys-method">.
|
unless you specify L<"--alter-foreign-keys-method">.
|
||||||
|
|
||||||
|
=item *
|
||||||
|
|
||||||
|
The tool cannot alter MyISAM tables on L<"Percona XtraDB Cluster"> nodes.
|
||||||
|
|
||||||
=back
|
=back
|
||||||
|
|
||||||
|
=head1 Percona XtraDB Cluster
|
||||||
|
|
||||||
|
pt-online-schema-change works with Percona XtraDB Cluster (PXC) 5.5.28-23.7
|
||||||
|
and newer, but there are two limitations: only InnoDB tables can be altered,
|
||||||
|
and C<wsrep_OSU_method> must be set to C<TOI> (total order isolation).
|
||||||
|
The tool exits with an error if the host is a cluster node and the table
|
||||||
|
is MyISAM or is being converted to MyISAM (C<ENGINE=MyISAM>), or if
|
||||||
|
C<wsrep_OSU_method> is not C<TOI>. There is no way to disable these checks.
|
||||||
|
|
||||||
=head1 OUTPUT
|
=head1 OUTPUT
|
||||||
|
|
||||||
The tool prints information about its activities to STDOUT so that you can see
|
The tool prints information about its activities to STDOUT so that you can see
|
||||||
|
@@ -5611,6 +5611,7 @@ sub new {
|
|||||||
: join(', ', map { $q->quote($_) } @cols))
|
: join(', ', map { $q->quote($_) } @cols))
|
||||||
. " FROM $tbl->{name}"
|
. " FROM $tbl->{name}"
|
||||||
. ($where ? " WHERE $where" : '')
|
. ($where ? " WHERE $where" : '')
|
||||||
|
. ($args{lock_in_share_mode} ? " LOCK IN SHARE MODE" : "")
|
||||||
. " /*$comments{bite}*/";
|
. " /*$comments{bite}*/";
|
||||||
PTDEBUG && _d('One nibble statement:', $nibble_sql);
|
PTDEBUG && _d('One nibble statement:', $nibble_sql);
|
||||||
|
|
||||||
@@ -5620,6 +5621,7 @@ sub new {
|
|||||||
: join(', ', map { $q->quote($_) } @cols))
|
: join(', ', map { $q->quote($_) } @cols))
|
||||||
. " FROM $tbl->{name}"
|
. " FROM $tbl->{name}"
|
||||||
. ($where ? " WHERE $where" : '')
|
. ($where ? " WHERE $where" : '')
|
||||||
|
. ($args{lock_in_share_mode} ? " LOCK IN SHARE MODE" : "")
|
||||||
. " /*explain $comments{bite}*/";
|
. " /*explain $comments{bite}*/";
|
||||||
PTDEBUG && _d('Explain one nibble statement:', $explain_nibble_sql);
|
PTDEBUG && _d('Explain one nibble statement:', $explain_nibble_sql);
|
||||||
|
|
||||||
@@ -5703,6 +5705,7 @@ sub new {
|
|||||||
. " AND " . $asc->{boundaries}->{'<='} # upper boundary
|
. " AND " . $asc->{boundaries}->{'<='} # upper boundary
|
||||||
. ($where ? " AND ($where)" : '')
|
. ($where ? " AND ($where)" : '')
|
||||||
. ($args{order_by} ? " ORDER BY $order_by" : "")
|
. ($args{order_by} ? " ORDER BY $order_by" : "")
|
||||||
|
. ($args{lock_in_share_mode} ? " LOCK IN SHARE MODE" : "")
|
||||||
. " /*$comments{nibble}*/";
|
. " /*$comments{nibble}*/";
|
||||||
PTDEBUG && _d('Nibble statement:', $nibble_sql);
|
PTDEBUG && _d('Nibble statement:', $nibble_sql);
|
||||||
|
|
||||||
@@ -5715,6 +5718,7 @@ sub new {
|
|||||||
. " AND " . $asc->{boundaries}->{'<='} # upper boundary
|
. " AND " . $asc->{boundaries}->{'<='} # upper boundary
|
||||||
. ($where ? " AND ($where)" : '')
|
. ($where ? " AND ($where)" : '')
|
||||||
. ($args{order_by} ? " ORDER BY $order_by" : "")
|
. ($args{order_by} ? " ORDER BY $order_by" : "")
|
||||||
|
. ($args{lock_in_share_mode} ? " LOCK IN SHARE MODE" : "")
|
||||||
. " /*explain $comments{nibble}*/";
|
. " /*explain $comments{nibble}*/";
|
||||||
PTDEBUG && _d('Explain nibble statement:', $explain_nibble_sql);
|
PTDEBUG && _d('Explain nibble statement:', $explain_nibble_sql);
|
||||||
|
|
||||||
@@ -8644,6 +8648,21 @@ sub main {
|
|||||||
my %cluster_name_for;
|
my %cluster_name_for;
|
||||||
$cluster_name_for{$master_cxn} = $cluster->is_cluster_node($master_cxn);
|
$cluster_name_for{$master_cxn} = $cluster->is_cluster_node($master_cxn);
|
||||||
|
|
||||||
|
if ( $cluster_name_for{$master_cxn} ) {
|
||||||
|
# Because of https://bugs.launchpad.net/codership-mysql/+bug/1040108
|
||||||
|
# ptc and pt-osc check Threads_running by default for --max-load.
|
||||||
|
# Strictly speaking, they can run on 5.5.27 as long as that bug doesn't
|
||||||
|
# manifest itself. If it does, however, then the tools will wait forever.
|
||||||
|
my $pxc_version = VersionParser->new($master_dbh);
|
||||||
|
if ( $pxc_version < '5.5.28' ) {
|
||||||
|
die "Percona XtraDB Cluster 5.5.28 or newer is required to run "
|
||||||
|
. "this tool on a cluster, but node " . $master_cxn->name
|
||||||
|
. " is running version " . $pxc_version->version
|
||||||
|
. ". Please upgrade the node, or run the tool on a newer node, "
|
||||||
|
. "or contact Percona for support.\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
# ########################################################################
|
# ########################################################################
|
||||||
# If this is not a dry run (--explain was not specified), then we're
|
# If this is not a dry run (--explain was not specified), then we're
|
||||||
# going to checksum the tables, so do the necessary preparations and
|
# going to checksum the tables, so do the necessary preparations and
|
||||||
@@ -10775,7 +10794,7 @@ can try something like the following:
|
|||||||
|
|
||||||
=head1 Percona XtraDB Cluster
|
=head1 Percona XtraDB Cluster
|
||||||
|
|
||||||
pt-table-checksum works with Percona XtraDB Cluster (PXC) 5.5.27-23.6 and newer.
|
pt-table-checksum works with Percona XtraDB Cluster (PXC) 5.5.28-23.7 and newer.
|
||||||
The number of possible Percona XtraDB Cluster setups is large given that
|
The number of possible Percona XtraDB Cluster setups is large given that
|
||||||
it can be used with regular replication as well. Therefore, only the setups
|
it can be used with regular replication as well. Therefore, only the setups
|
||||||
listed below are supported and known to work. Other setups, like cluster
|
listed below are supported and known to work. Other setups, like cluster
|
||||||
|
@@ -97,6 +97,7 @@ sub new {
|
|||||||
: join(', ', map { $q->quote($_) } @cols))
|
: join(', ', map { $q->quote($_) } @cols))
|
||||||
. " FROM $tbl->{name}"
|
. " FROM $tbl->{name}"
|
||||||
. ($where ? " WHERE $where" : '')
|
. ($where ? " WHERE $where" : '')
|
||||||
|
. ($args{lock_in_share_mode} ? " LOCK IN SHARE MODE" : "")
|
||||||
. " /*$comments{bite}*/";
|
. " /*$comments{bite}*/";
|
||||||
PTDEBUG && _d('One nibble statement:', $nibble_sql);
|
PTDEBUG && _d('One nibble statement:', $nibble_sql);
|
||||||
|
|
||||||
@@ -106,6 +107,7 @@ sub new {
|
|||||||
: join(', ', map { $q->quote($_) } @cols))
|
: join(', ', map { $q->quote($_) } @cols))
|
||||||
. " FROM $tbl->{name}"
|
. " FROM $tbl->{name}"
|
||||||
. ($where ? " WHERE $where" : '')
|
. ($where ? " WHERE $where" : '')
|
||||||
|
. ($args{lock_in_share_mode} ? " LOCK IN SHARE MODE" : "")
|
||||||
. " /*explain $comments{bite}*/";
|
. " /*explain $comments{bite}*/";
|
||||||
PTDEBUG && _d('Explain one nibble statement:', $explain_nibble_sql);
|
PTDEBUG && _d('Explain one nibble statement:', $explain_nibble_sql);
|
||||||
|
|
||||||
@@ -210,6 +212,7 @@ sub new {
|
|||||||
. " AND " . $asc->{boundaries}->{'<='} # upper boundary
|
. " AND " . $asc->{boundaries}->{'<='} # upper boundary
|
||||||
. ($where ? " AND ($where)" : '')
|
. ($where ? " AND ($where)" : '')
|
||||||
. ($args{order_by} ? " ORDER BY $order_by" : "")
|
. ($args{order_by} ? " ORDER BY $order_by" : "")
|
||||||
|
. ($args{lock_in_share_mode} ? " LOCK IN SHARE MODE" : "")
|
||||||
. " /*$comments{nibble}*/";
|
. " /*$comments{nibble}*/";
|
||||||
PTDEBUG && _d('Nibble statement:', $nibble_sql);
|
PTDEBUG && _d('Nibble statement:', $nibble_sql);
|
||||||
|
|
||||||
@@ -222,6 +225,7 @@ sub new {
|
|||||||
. " AND " . $asc->{boundaries}->{'<='} # upper boundary
|
. " AND " . $asc->{boundaries}->{'<='} # upper boundary
|
||||||
. ($where ? " AND ($where)" : '')
|
. ($where ? " AND ($where)" : '')
|
||||||
. ($args{order_by} ? " ORDER BY $order_by" : "")
|
. ($args{order_by} ? " ORDER BY $order_by" : "")
|
||||||
|
. ($args{lock_in_share_mode} ? " LOCK IN SHARE MODE" : "")
|
||||||
. " /*explain $comments{nibble}*/";
|
. " /*explain $comments{nibble}*/";
|
||||||
PTDEBUG && _d('Explain nibble statement:', $explain_nibble_sql);
|
PTDEBUG && _d('Explain nibble statement:', $explain_nibble_sql);
|
||||||
|
|
||||||
|
@@ -135,11 +135,12 @@ sub get_dbh_for {
|
|||||||
my $dbh;
|
my $dbh;
|
||||||
# This is primarily for the benefit of CompareResults, but it's
|
# This is primarily for the benefit of CompareResults, but it's
|
||||||
# also quite convenient when using an affected OS
|
# also quite convenient when using an affected OS
|
||||||
|
# TODO: this fails if the server isn't started yet.
|
||||||
$cxn_ops->{L} = 1 if !exists $cxn_ops->{L}
|
$cxn_ops->{L} = 1 if !exists $cxn_ops->{L}
|
||||||
&& !$self->can_load_data('master');
|
&& !$self->can_load_data('master');
|
||||||
eval { $dbh = $dp->get_dbh($dp->get_cxn_params($dsn), $cxn_ops) };
|
eval { $dbh = $dp->get_dbh($dp->get_cxn_params($dsn), $cxn_ops) };
|
||||||
if ( $EVAL_ERROR ) {
|
if ( $EVAL_ERROR ) {
|
||||||
die 'Failed to get dbh for' . $server . ': ' . $EVAL_ERROR;
|
die 'Failed to get dbh for ' . $server . ': ' . $EVAL_ERROR;
|
||||||
}
|
}
|
||||||
$dbh->{InactiveDestroy} = 1; # Prevent destroying on fork.
|
$dbh->{InactiveDestroy} = 1; # Prevent destroying on fork.
|
||||||
$dbh->{FetchHashKeyName} = 'NAME_lc' unless $cxn_ops && $cxn_ops->{no_lc};
|
$dbh->{FetchHashKeyName} = 'NAME_lc' unless $cxn_ops && $cxn_ops->{no_lc};
|
||||||
|
@@ -1,55 +1,36 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
err() {
|
die() {
|
||||||
echo
|
echo
|
||||||
for msg; do
|
for msg; do
|
||||||
echo $msg
|
echo $msg
|
||||||
done
|
done
|
||||||
exit_status=1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
# ###########################################################################
|
# ###########################################################################
|
||||||
# Sanity check the cmd line options.
|
# Sanity check the cmd line options.
|
||||||
# ###########################################################################
|
# ###########################################################################
|
||||||
if [ $# -lt 1 ]; then
|
if [ $# -lt 1 ]; then
|
||||||
err "Usage: load-sakila-db PORT"
|
die "Usage: load-sakila-db PORT"
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
PORT=$1
|
PORT=$1
|
||||||
|
|
||||||
if [ ! -d "/tmp/$PORT" ]; then
|
if [ ! -d "/tmp/$PORT" ]; then
|
||||||
err "MySQL test server does not exist: /tmp/$PORT"
|
die "MySQL test server does not exist: /tmp/$PORT"
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# ###########################################################################
|
# ###########################################################################
|
||||||
# Sanity check the environment.
|
# Sanity check the environment.
|
||||||
# ###########################################################################
|
# ###########################################################################
|
||||||
if [ -z "$PERCONA_TOOLKIT_BRANCH" ]; then
|
if [ -z "$PERCONA_TOOLKIT_BRANCH" ]; then
|
||||||
err "PERCONA_TOOLKIT_BRANCH environment variable is not set."
|
die "PERCONA_TOOLKIT_BRANCH environment variable is not set."
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -d "$PERCONA_TOOLKIT_BRANCH" ]; then
|
if [ ! -d "$PERCONA_TOOLKIT_BRANCH" ]; then
|
||||||
err "Invalid PERCONA_TOOLKIT_BRANCH directory: $PERCONA_TOOLKIT_BRANCH"
|
die "Invalid PERCONA_TOOLKIT_BRANCH directory: $PERCONA_TOOLKIT_BRANCH"
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd $PERCONA_TOOLKIT_BRANCH/sandbox
|
/tmp/$PORT/use < $PERCONA_TOOLKIT_BRANCH/sandbox/sakila.sql
|
||||||
|
exit $?
|
||||||
exit_status=0
|
|
||||||
|
|
||||||
/tmp/$PORT/use < sakila-db/sakila-schema.sql
|
|
||||||
exit_status=$((exit_status | $?))
|
|
||||||
|
|
||||||
# We can remove this once PXC's triggers can handle myisam tables
|
|
||||||
if [ "${2:-""}" = "cluster" ]; then
|
|
||||||
/tmp/$PORT/use -e "ALTER TABLE sakila.film_text DROP KEY idx_title_description"
|
|
||||||
/tmp/$PORT/use -e "ALTER TABLE sakila.film_text ENGINE = 'InnoDB'"
|
|
||||||
fi
|
|
||||||
|
|
||||||
/tmp/$PORT/use < sakila-db/sakila-data.sql
|
|
||||||
exit_status=$((exit_status | $?))
|
|
||||||
|
|
||||||
exit $exit_status
|
|
||||||
|
818
sandbox/sakila.sql
Normal file
818
sandbox/sakila.sql
Normal file
File diff suppressed because one or more lines are too long
@@ -395,39 +395,38 @@ $ni = make_nibble_iter(
|
|||||||
my $row = $ni->next();
|
my $row = $ni->next();
|
||||||
is_deeply(
|
is_deeply(
|
||||||
$row,
|
$row,
|
||||||
[25, 'da79784d'],
|
[25, 'd9c52498'],
|
||||||
"SELECT chunk checksum 1 FROM sakila.country"
|
"SELECT chunk checksum 1 FROM sakila.country"
|
||||||
) or diag(Dumper($row));
|
) or diag(Dumper($row));
|
||||||
|
|
||||||
$row = $ni->next();
|
$row = $ni->next();
|
||||||
is_deeply(
|
is_deeply(
|
||||||
$row,
|
$row,
|
||||||
[25, 'e860c4f9'],
|
[25, 'ebdc982c'],
|
||||||
"SELECT chunk checksum 2 FROM sakila.country"
|
"SELECT chunk checksum 2 FROM sakila.country"
|
||||||
) or diag(Dumper($row));
|
) or diag(Dumper($row));
|
||||||
|
|
||||||
$row = $ni->next();
|
$row = $ni->next();
|
||||||
is_deeply(
|
is_deeply(
|
||||||
$row,
|
$row,
|
||||||
[25, 'eb651f58'],
|
[25, 'e8d9438d'],
|
||||||
"SELECT chunk checksum 3 FROM sakila.country"
|
"SELECT chunk checksum 3 FROM sakila.country"
|
||||||
) or diag(Dumper($row));
|
) or diag(Dumper($row));
|
||||||
|
|
||||||
$row = $ni->next();
|
$row = $ni->next();
|
||||||
is_deeply(
|
is_deeply(
|
||||||
$row,
|
$row,
|
||||||
[25, '2d87d588'],
|
[25, '2e3b895d'],
|
||||||
"SELECT chunk checksum 4 FROM sakila.country"
|
"SELECT chunk checksum 4 FROM sakila.country"
|
||||||
) or diag(Dumper($row));
|
) or diag(Dumper($row));
|
||||||
|
|
||||||
$row = $ni->next();
|
$row = $ni->next();
|
||||||
is_deeply(
|
is_deeply(
|
||||||
$row,
|
$row,
|
||||||
[9, 'beb4a180'],
|
[9, 'bd08fd55'],
|
||||||
"SELECT chunk checksum 5 FROM sakila.country"
|
"SELECT chunk checksum 5 FROM sakila.country"
|
||||||
) or diag(Dumper($row));
|
) or diag(Dumper($row));
|
||||||
|
|
||||||
|
|
||||||
# #########################################################################
|
# #########################################################################
|
||||||
# exec_nibble callback and explain_sth
|
# exec_nibble callback and explain_sth
|
||||||
# #########################################################################
|
# #########################################################################
|
||||||
|
@@ -14,6 +14,7 @@ use Test::More;
|
|||||||
use PerconaTest;
|
use PerconaTest;
|
||||||
use Sandbox;
|
use Sandbox;
|
||||||
require "$trunk/bin/pt-online-schema-change";
|
require "$trunk/bin/pt-online-schema-change";
|
||||||
|
require VersionParser;
|
||||||
|
|
||||||
use Time::HiRes qw(sleep);
|
use Time::HiRes qw(sleep);
|
||||||
use Data::Dumper;
|
use Data::Dumper;
|
||||||
@@ -72,23 +73,27 @@ sub get_ids {
|
|||||||
my @lines = <$fh>;
|
my @lines = <$fh>;
|
||||||
close $fh;
|
close $fh;
|
||||||
|
|
||||||
my %ids;
|
my %ids = (
|
||||||
|
updated => '',
|
||||||
|
deleted => '',
|
||||||
|
inserted => '',
|
||||||
|
);
|
||||||
foreach my $line ( @lines ) {
|
foreach my $line ( @lines ) {
|
||||||
my ($stmt, $ids) = split(':', $line);
|
my ($stmt, $ids) = split(':', $line);
|
||||||
chomp $ids;
|
chomp $ids;
|
||||||
$ids{$stmt} = $ids;
|
$ids{$stmt} = $ids || '';
|
||||||
}
|
}
|
||||||
|
|
||||||
return \%ids;
|
return \%ids;
|
||||||
}
|
}
|
||||||
|
|
||||||
sub check_ids {
|
sub check_ids {
|
||||||
my ( $db, $tbl, $pkcol, $ids ) = @_;
|
my ( $db, $tbl, $pkcol, $ids, $test ) = @_;
|
||||||
my $rows;
|
my $rows;
|
||||||
|
|
||||||
my $n_updated = $ids->{updated} ? ($ids->{updated} =~ tr/,//) : 0;
|
my $n_updated = $ids->{updated} ? ($ids->{updated} =~ tr/,//) : 0;
|
||||||
my $n_deleted = $ids->{deleted} ? ($ids->{deleted} =~ tr/,//) : 0;
|
my $n_deleted = $ids->{deleted} ? ($ids->{deleted} =~ tr/,//) : 0;
|
||||||
my $n_inserted = ($ids->{inserted} =~ tr/,//);
|
my $n_inserted = $ids->{inserted} ?($ids->{inserted} =~ tr/,//) : 0;
|
||||||
|
|
||||||
# "1,1"=~tr/,// returns 1 but is 2 values
|
# "1,1"=~tr/,// returns 1 but is 2 values
|
||||||
$n_updated++ if $n_updated;
|
$n_updated++ if $n_updated;
|
||||||
@@ -100,16 +105,16 @@ sub check_ids {
|
|||||||
is(
|
is(
|
||||||
$rows->[0],
|
$rows->[0],
|
||||||
500 + $n_inserted - $n_deleted,
|
500 + $n_inserted - $n_deleted,
|
||||||
"New table row count: 500 original + $n_inserted inserted - $n_deleted deleted"
|
"$test: new table rows: 500 original + $n_inserted inserted - $n_deleted deleted"
|
||||||
) or print Dumper($rows);
|
) or diag(Dumper($rows));
|
||||||
|
|
||||||
$rows = $master_dbh->selectall_arrayref(
|
$rows = $master_dbh->selectall_arrayref(
|
||||||
"SELECT $pkcol FROM $db.$tbl WHERE $pkcol > 500 AND $pkcol NOT IN ($ids->{inserted})");
|
"SELECT $pkcol FROM $db.$tbl WHERE $pkcol > 500 AND $pkcol NOT IN ($ids->{inserted})");
|
||||||
is_deeply(
|
is_deeply(
|
||||||
$rows,
|
$rows,
|
||||||
[],
|
[],
|
||||||
"No extra rows inserted in new table"
|
"$test: no extra rows inserted in new table"
|
||||||
) or print Dumper($rows);
|
) or diag(Dumper($rows));
|
||||||
|
|
||||||
if ( $n_deleted ) {
|
if ( $n_deleted ) {
|
||||||
$rows = $master_dbh->selectall_arrayref(
|
$rows = $master_dbh->selectall_arrayref(
|
||||||
@@ -117,13 +122,13 @@ sub check_ids {
|
|||||||
is_deeply(
|
is_deeply(
|
||||||
$rows,
|
$rows,
|
||||||
[],
|
[],
|
||||||
"No deleted rows present in new table"
|
"$test: no deleted rows present in new table"
|
||||||
) or print Dumper($rows);
|
) or diag(Dumper($rows));
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
ok(
|
ok(
|
||||||
1,
|
1,
|
||||||
"No rows deleted"
|
"$test: no rows deleted"
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -134,13 +139,13 @@ sub check_ids {
|
|||||||
is_deeply(
|
is_deeply(
|
||||||
$rows,
|
$rows,
|
||||||
[],
|
[],
|
||||||
"Updated rows correct in new table"
|
"$test: updated rows correct in new table"
|
||||||
) or print Dumper($rows);
|
) or diag(Dumper($rows));
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
ok(
|
ok(
|
||||||
1,
|
1,
|
||||||
"No rows updated"
|
"$test: no rows updated"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -151,18 +156,19 @@ sub check_ids {
|
|||||||
# Attempt to alter a table while another process is changing it.
|
# Attempt to alter a table while another process is changing it.
|
||||||
# #############################################################################
|
# #############################################################################
|
||||||
|
|
||||||
# Load 500 rows.
|
my $db_flavor = VersionParser->new($master_dbh)->flavor();
|
||||||
diag('Loading sample dataset...');
|
if ( $db_flavor =~ m/XtraDB Cluster/ ) {
|
||||||
$sb->load_file('master', "$sample/basic_no_fks.sql");
|
$sb->load_file('master', "$sample/basic_no_fks_innodb.sql");
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
$sb->load_file('master', "$sample/basic_no_fks.sql");
|
||||||
|
}
|
||||||
$master_dbh->do("USE pt_osc");
|
$master_dbh->do("USE pt_osc");
|
||||||
$master_dbh->do("TRUNCATE TABLE t");
|
$master_dbh->do("TRUNCATE TABLE t");
|
||||||
$master_dbh->do("LOAD DATA INFILE '$trunk/t/pt-online-schema-change/samples/basic_no_fks.data' INTO TABLE t");
|
$master_dbh->do("LOAD DATA INFILE '$trunk/t/pt-online-schema-change/samples/basic_no_fks.data' INTO TABLE t");
|
||||||
$master_dbh->do("ANALYZE TABLE t");
|
$master_dbh->do("ANALYZE TABLE t");
|
||||||
$sb->wait_for_slaves();
|
$sb->wait_for_slaves();
|
||||||
|
|
||||||
$rows = $master_dbh->selectrow_hashref('show master status');
|
|
||||||
diag('Binlog position before altering table: ', $rows->{file}, '/', $rows->{position});
|
|
||||||
|
|
||||||
# Start inserting, updating, and deleting rows at random.
|
# Start inserting, updating, and deleting rows at random.
|
||||||
start_query_table(qw(pt_osc t id));
|
start_query_table(qw(pt_osc t id));
|
||||||
|
|
||||||
@@ -178,29 +184,31 @@ start_query_table(qw(pt_osc t id));
|
|||||||
# Stop changing the table's data.
|
# Stop changing the table's data.
|
||||||
stop_query_table();
|
stop_query_table();
|
||||||
|
|
||||||
like($output, qr/Successfully altered `pt_osc`.`t`/, 'Altered OK');
|
like(
|
||||||
|
$output,
|
||||||
|
qr/Successfully altered `pt_osc`.`t`/,
|
||||||
|
'Change engine: altered OK'
|
||||||
|
);
|
||||||
|
|
||||||
$rows = $master_dbh->selectall_hashref('SHOW TABLE STATUS FROM pt_osc', 'name');
|
$rows = $master_dbh->selectall_hashref('SHOW TABLE STATUS FROM pt_osc', 'name');
|
||||||
is(
|
is(
|
||||||
$rows->{t}->{engine},
|
$rows->{t}->{engine},
|
||||||
'InnoDB',
|
'InnoDB',
|
||||||
"New table ENGINE=InnoDB"
|
"Change engine: new table ENGINE=InnoDB"
|
||||||
) or warn Dumper($rows);
|
) or warn Dumper($rows);
|
||||||
|
|
||||||
is(
|
is(
|
||||||
scalar keys %$rows,
|
scalar keys %$rows,
|
||||||
1,
|
1,
|
||||||
"Dropped old table"
|
"Change engine: dropped old table"
|
||||||
);
|
);
|
||||||
|
|
||||||
is(
|
is(
|
||||||
$exit,
|
$exit,
|
||||||
0,
|
0,
|
||||||
"Exit status 0"
|
"Change engine: exit status 0"
|
||||||
);
|
);
|
||||||
|
|
||||||
check_ids(qw(pt_osc t id), get_ids());
|
|
||||||
|
|
||||||
# #############################################################################
|
# #############################################################################
|
||||||
# Check that triggers work when renaming a column
|
# Check that triggers work when renaming a column
|
||||||
# #############################################################################
|
# #############################################################################
|
||||||
@@ -211,8 +219,6 @@ $master_dbh->do("LOAD DATA INFILE '$trunk/t/pt-online-schema-change/samples/basi
|
|||||||
$master_dbh->do("ANALYZE TABLE t");
|
$master_dbh->do("ANALYZE TABLE t");
|
||||||
$sb->wait_for_slaves();
|
$sb->wait_for_slaves();
|
||||||
|
|
||||||
my $orig_rows = $master_dbh->selectall_arrayref(qq{SELECT id,d FROM pt_osc.t});
|
|
||||||
|
|
||||||
# Start inserting, updating, and deleting rows at random.
|
# Start inserting, updating, and deleting rows at random.
|
||||||
start_query_table(qw(pt_osc t id));
|
start_query_table(qw(pt_osc t id));
|
||||||
|
|
||||||
@@ -230,33 +236,19 @@ start_query_table(qw(pt_osc t id));
|
|||||||
# Stop changing the table's data.
|
# Stop changing the table's data.
|
||||||
stop_query_table();
|
stop_query_table();
|
||||||
|
|
||||||
like($output, qr/Successfully altered `pt_osc`.`t`/, 'Altered OK');
|
like(
|
||||||
|
$output,
|
||||||
|
qr/Successfully altered `pt_osc`.`t`/,
|
||||||
|
'Rename column: altered OK'
|
||||||
|
);
|
||||||
|
|
||||||
is(
|
is(
|
||||||
$exit,
|
$exit,
|
||||||
0,
|
0,
|
||||||
"Exit status 0"
|
"Rename columnn: exit status 0"
|
||||||
);
|
);
|
||||||
|
|
||||||
my $ids = get_ids();
|
check_ids(qw(pt_osc t id), get_ids(), "Rename column");
|
||||||
my %deleted_ids = map { $_ => 1 } split /,/, $ids->{deleted};
|
|
||||||
my %updated_ids = map { $_ => 1 } split /,/, $ids->{updated};
|
|
||||||
|
|
||||||
$rows = $master_dbh->selectall_arrayref(
|
|
||||||
qq{SELECT id,q FROM pt_osc.t WHERE id}
|
|
||||||
. ($ids->{updated} ? qq{ AND id NOT IN ($ids->{updated})} : '')
|
|
||||||
. ($ids->{inserted} ? qq{ AND id NOT IN ($ids->{inserted})} : '')
|
|
||||||
);
|
|
||||||
|
|
||||||
my @filtered_orig_rows = grep {
|
|
||||||
!$deleted_ids{$_->[0]} && !$updated_ids{$_->[0]}
|
|
||||||
} @$orig_rows;
|
|
||||||
|
|
||||||
is_deeply(
|
|
||||||
$rows,
|
|
||||||
\@filtered_orig_rows,
|
|
||||||
"Triggers work if renaming a column"
|
|
||||||
);
|
|
||||||
|
|
||||||
# #############################################################################
|
# #############################################################################
|
||||||
# Done.
|
# Done.
|
||||||
|
@@ -15,6 +15,7 @@ use Time::HiRes qw(sleep);
|
|||||||
use PerconaTest;
|
use PerconaTest;
|
||||||
use Sandbox;
|
use Sandbox;
|
||||||
require "$trunk/bin/pt-online-schema-change";
|
require "$trunk/bin/pt-online-schema-change";
|
||||||
|
require VersionParser;
|
||||||
|
|
||||||
use Data::Dumper;
|
use Data::Dumper;
|
||||||
$Data::Dumper::Indent = 1;
|
$Data::Dumper::Indent = 1;
|
||||||
@@ -46,7 +47,7 @@ my $rows;
|
|||||||
# Tool shouldn't run without --execute (bug 933232).
|
# Tool shouldn't run without --execute (bug 933232).
|
||||||
# #############################################################################
|
# #############################################################################
|
||||||
|
|
||||||
$sb->load_file('master', "$sample/basic_no_fks.sql");
|
$sb->load_file('master', "$sample/basic_no_fks_innodb.sql");
|
||||||
|
|
||||||
($output, $exit) = full_output(
|
($output, $exit) = full_output(
|
||||||
sub { pt_online_schema_change::main(@args, "$dsn,D=pt_osc,t=t",
|
sub { pt_online_schema_change::main(@args, "$dsn,D=pt_osc,t=t",
|
||||||
@@ -57,7 +58,7 @@ like(
|
|||||||
$output,
|
$output,
|
||||||
qr/neither --dry-run nor --execute was specified/,
|
qr/neither --dry-run nor --execute was specified/,
|
||||||
"Doesn't run without --execute (bug 933232)"
|
"Doesn't run without --execute (bug 933232)"
|
||||||
) or warn $output;
|
) or diag($output);
|
||||||
|
|
||||||
my $ddl = $master_dbh->selectrow_arrayref("show create table pt_osc.t");
|
my $ddl = $master_dbh->selectrow_arrayref("show create table pt_osc.t");
|
||||||
like(
|
like(
|
||||||
@@ -100,9 +101,9 @@ sub test_alter_table {
|
|||||||
my $tbl_struct = $tp->parse($ddl);
|
my $tbl_struct = $tp->parse($ddl);
|
||||||
|
|
||||||
my $cols = '*';
|
my $cols = '*';
|
||||||
if ( $test_type eq 'drop_col' && !grep { $_ eq '--dry-run' } @$cmds ) {
|
if ( $test_type =~ m/(?:add|drop)_col/ && !grep { $_ eq '--dry-run' } @$cmds ) {
|
||||||
# Don't select the column being dropped.
|
# Don't select the column being dropped.
|
||||||
my $col = $args{drop_col};
|
my $col = $args{drop_col} || $args{new_col};
|
||||||
die "I need a drop_col argument" unless $col;
|
die "I need a drop_col argument" unless $col;
|
||||||
$cols = join(', ', grep { $_ ne $col } @{$tbl_struct->{cols}});
|
$cols = join(', ', grep { $_ ne $col } @{$tbl_struct->{cols}});
|
||||||
}
|
}
|
||||||
@@ -147,6 +148,7 @@ sub test_alter_table {
|
|||||||
);
|
);
|
||||||
|
|
||||||
my $new_ddl = $tp->get_create_table($master_dbh, $db, $tbl);
|
my $new_ddl = $tp->get_create_table($master_dbh, $db, $tbl);
|
||||||
|
my $new_tbl_struct = $tp->parse($new_ddl);
|
||||||
my $fail = 0;
|
my $fail = 0;
|
||||||
|
|
||||||
is(
|
is(
|
||||||
@@ -164,7 +166,7 @@ sub test_alter_table {
|
|||||||
) or $fail = 1;
|
) or $fail = 1;
|
||||||
|
|
||||||
# Rows in the original and new table should be identical.
|
# Rows in the original and new table should be identical.
|
||||||
my $new_rows = $master_dbh->selectall_arrayref("SELECT * FROM $table ORDER BY `$pk_col`");
|
my $new_rows = $master_dbh->selectall_arrayref("SELECT $cols FROM $table ORDER BY `$pk_col`");
|
||||||
is_deeply(
|
is_deeply(
|
||||||
$new_rows,
|
$new_rows,
|
||||||
$orig_rows,
|
$orig_rows,
|
||||||
@@ -173,7 +175,7 @@ sub test_alter_table {
|
|||||||
|
|
||||||
if ( grep { $_ eq '--no-drop-new-table' } @$cmds ) {
|
if ( grep { $_ eq '--no-drop-new-table' } @$cmds ) {
|
||||||
$new_rows = $master_dbh->selectall_arrayref(
|
$new_rows = $master_dbh->selectall_arrayref(
|
||||||
"SELECT * FROM `$db`.`$new_tbl` ORDER BY `$pk_col`");
|
"SELECT $cols FROM `$db`.`$new_tbl` ORDER BY `$pk_col`");
|
||||||
is_deeply(
|
is_deeply(
|
||||||
$new_rows,
|
$new_rows,
|
||||||
$orig_rows,
|
$orig_rows,
|
||||||
@@ -216,6 +218,18 @@ sub test_alter_table {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
elsif ( $test_type eq 'add_col' ) {
|
elsif ( $test_type eq 'add_col' ) {
|
||||||
|
if ( $args{no_change} ) {
|
||||||
|
ok(
|
||||||
|
!$new_tbl_struct->{is_col}->{$args{new_col}},
|
||||||
|
"$name $args{new_col} not added"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
ok(
|
||||||
|
$new_tbl_struct->{is_col}->{$args{new_col}},
|
||||||
|
"$name $args{new_col} added"
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
elsif ( $test_type eq 'new_engine' ) {
|
elsif ( $test_type eq 'new_engine' ) {
|
||||||
my $new_engine = lc($args{new_engine});
|
my $new_engine = lc($args{new_engine});
|
||||||
@@ -305,36 +319,71 @@ sub test_alter_table {
|
|||||||
# The most basic: alter a small table with no fks that's not active.
|
# The most basic: alter a small table with no fks that's not active.
|
||||||
# #############################################################################
|
# #############################################################################
|
||||||
|
|
||||||
test_alter_table(
|
my $db_flavor = VersionParser->new($master_dbh)->flavor();
|
||||||
name => "Basic no fks --dry-run",
|
if ( $db_flavor =~ m/XtraDB Cluster/ ) {
|
||||||
table => "pt_osc.t",
|
test_alter_table(
|
||||||
file => "basic_no_fks.sql",
|
name => "Basic no fks --dry-run",
|
||||||
max_id => 20,
|
table => "pt_osc.t",
|
||||||
test_type => "new_engine",
|
file => "basic_no_fks_innodb.sql",
|
||||||
new_engine => "MyISAM",
|
max_id => 20,
|
||||||
cmds => [qw(--dry-run --alter ENGINE=InnoDB)],
|
test_type => "drop_col",
|
||||||
);
|
drop_col => "d",
|
||||||
|
cmds => [qw(--dry-run --alter), 'DROP COLUMN d'],
|
||||||
|
);
|
||||||
|
|
||||||
test_alter_table(
|
test_alter_table(
|
||||||
name => "Basic no fks --execute",
|
name => "Basic no fks --execute",
|
||||||
table => "pt_osc.t",
|
table => "pt_osc.t",
|
||||||
# The previous test should not have modified the table.
|
# The previous test should not have modified the table.
|
||||||
# file => "basic_no_fks.sql",
|
# file => "basic_no_fks_innodb.sql",
|
||||||
# max_id => 20,
|
# max_id => 20,
|
||||||
test_type => "new_engine",
|
test_type => "drop_col",
|
||||||
new_engine => "InnoDB",
|
drop_col => "d",
|
||||||
cmds => [qw(--execute --alter ENGINE=InnoDB)],
|
cmds => [qw(--execute --alter), 'DROP COLUMN d'],
|
||||||
);
|
);
|
||||||
|
|
||||||
test_alter_table(
|
test_alter_table(
|
||||||
name => "--execute but no --alter",
|
name => "--execute but no --alter",
|
||||||
table => "pt_osc.t",
|
table => "pt_osc.t",
|
||||||
file => "basic_no_fks.sql",
|
file => "basic_no_fks_innodb.sql",
|
||||||
max_id => 20,
|
max_id => 20,
|
||||||
test_type => "new_engine",
|
test_type => "new_engine", # When there's no change, we just check
|
||||||
new_engine => "MyISAM",
|
new_engine => "InnoDB", # the engine as a NOP. Any other
|
||||||
cmds => [qw(--execute)],
|
cmds => [qw(--execute)], # unintended changes are still detected.
|
||||||
);
|
);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
test_alter_table(
|
||||||
|
name => "Basic no fks --dry-run",
|
||||||
|
table => "pt_osc.t",
|
||||||
|
file => "basic_no_fks.sql",
|
||||||
|
max_id => 20,
|
||||||
|
test_type => "new_engine",
|
||||||
|
new_engine => "MyISAM",
|
||||||
|
cmds => [qw(--dry-run --alter ENGINE=InnoDB)],
|
||||||
|
);
|
||||||
|
|
||||||
|
test_alter_table(
|
||||||
|
name => "Basic no fks --execute",
|
||||||
|
table => "pt_osc.t",
|
||||||
|
# The previous test should not have modified the table.
|
||||||
|
# file => "basic_no_fks.sql",
|
||||||
|
# max_id => 20,
|
||||||
|
test_type => "new_engine",
|
||||||
|
new_engine => "InnoDB",
|
||||||
|
cmds => [qw(--execute --alter ENGINE=InnoDB)],
|
||||||
|
);
|
||||||
|
|
||||||
|
test_alter_table(
|
||||||
|
name => "--execute but no --alter",
|
||||||
|
table => "pt_osc.t",
|
||||||
|
file => "basic_no_fks.sql",
|
||||||
|
max_id => 20,
|
||||||
|
test_type => "new_engine",
|
||||||
|
new_engine => "MyISAM",
|
||||||
|
cmds => [qw(--execute)],
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
# ############################################################################
|
# ############################################################################
|
||||||
# Alter a table with foreign keys.
|
# Alter a table with foreign keys.
|
||||||
@@ -520,7 +569,7 @@ SKIP: {
|
|||||||
);
|
);
|
||||||
|
|
||||||
# Restore the original fks.
|
# Restore the original fks.
|
||||||
diag('Restoring original Sakila foreign keys...');
|
diag('Restoring sakila...');
|
||||||
diag(`$trunk/sandbox/load-sakila-db 12345`);
|
diag(`$trunk/sandbox/load-sakila-db 12345`);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -528,23 +577,18 @@ SKIP: {
|
|||||||
# --alther-foreign-keys-method=none. This intentionally breaks fks because
|
# --alther-foreign-keys-method=none. This intentionally breaks fks because
|
||||||
# they're not updated so they'll point to the old table that is dropped.
|
# they're not updated so they'll point to the old table that is dropped.
|
||||||
# #############################################################################
|
# #############################################################################
|
||||||
diag('Loading file and waiting for replication...');
|
|
||||||
$sb->load_file('master', "$sample/basic_with_fks.sql");
|
|
||||||
|
|
||||||
# Specify --alter-foreign-keys-method for a table with no child tables.
|
# Specify --alter-foreign-keys-method for a table with no child tables.
|
||||||
test_alter_table(
|
test_alter_table(
|
||||||
name => "Update fk method none",
|
name => "Update fk method none",
|
||||||
|
file => "basic_with_fks.sql",
|
||||||
table => "pt_osc.country",
|
table => "pt_osc.country",
|
||||||
pk_col => "country_id",
|
pk_col => "country_id",
|
||||||
file => "basic_with_fks.sql",
|
max_id => 20,
|
||||||
test_type => "new_engine",
|
test_type => "new_engine",
|
||||||
new_engine => "innodb",
|
new_engine => "innodb",
|
||||||
cmds => [
|
cmds => [
|
||||||
qw(
|
qw(--execute --alter-foreign-keys-method none --alter ENGINE=INNODB)
|
||||||
--execute
|
|
||||||
--alter-foreign-keys-method none
|
|
||||||
),
|
|
||||||
'--alter', 'ENGINE=INNODB',
|
|
||||||
],
|
],
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -613,22 +657,28 @@ test_table(
|
|||||||
test_alter_table(
|
test_alter_table(
|
||||||
name => "--no-swap-tables",
|
name => "--no-swap-tables",
|
||||||
table => "pt_osc.t",
|
table => "pt_osc.t",
|
||||||
file => "basic_no_fks.sql",
|
file => "basic_no_fks_innodb.sql",
|
||||||
max_id => 20,
|
max_id => 20,
|
||||||
test_type => "new_engine", # Engine doesn't actually change
|
test_type => "add_col",
|
||||||
new_engine => "MyISAM", # because the tables aren't swapped
|
new_col => "foo",
|
||||||
cmds => [qw(--execute --alter ENGINE=InnoDB --no-swap-tables)],
|
no_change => 1,
|
||||||
|
cmds => [
|
||||||
|
qw(--execute --no-swap-tables), '--alter', 'ADD COLUMN foo INT'
|
||||||
|
],
|
||||||
);
|
);
|
||||||
|
|
||||||
test_alter_table(
|
test_alter_table(
|
||||||
name => "--no-swap-tables --no-drop-new-table",
|
name => "--no-swap-tables --no-drop-new-table",
|
||||||
table => "pt_osc.t",
|
table => "pt_osc.t",
|
||||||
file => "basic_no_fks.sql",
|
file => "basic_no_fks_innodb.sql",
|
||||||
max_id => 20,
|
max_id => 20,
|
||||||
test_type => "new_engine", # Engine doesn't actually change
|
test_type => "add_col",
|
||||||
new_engine => "MyISAM", # because the tables aren't swapped
|
new_col => "foo",
|
||||||
cmds => [qw(--execute --alter ENGINE=InnoDB --no-swap-tables),
|
no_change => 1,
|
||||||
qw(--no-drop-new-table)],
|
cmds => [
|
||||||
|
qw(--execute --no-swap-tables), '--alter', 'ADD COLUMN foo INT',
|
||||||
|
qw(--no-drop-new-table),
|
||||||
|
],
|
||||||
);
|
);
|
||||||
|
|
||||||
# #############################################################################
|
# #############################################################################
|
||||||
@@ -654,7 +704,7 @@ ok(
|
|||||||
'--execute', '--statistics',
|
'--execute', '--statistics',
|
||||||
'--alter', "modify column val ENUM('M','E','H') NOT NULL")
|
'--alter', "modify column val ENUM('M','E','H') NOT NULL")
|
||||||
},
|
},
|
||||||
($sandbox_version ge '5.5'
|
($sandbox_version ge '5.5' && $db_flavor !~ m/XtraDB Cluster/
|
||||||
? "$sample/stats-execute-5.5.txt"
|
? "$sample/stats-execute-5.5.txt"
|
||||||
: "$sample/stats-execute.txt"),
|
: "$sample/stats-execute.txt"),
|
||||||
),
|
),
|
||||||
@@ -689,7 +739,6 @@ SKIP: {
|
|||||||
# #############################################################################
|
# #############################################################################
|
||||||
# Done.
|
# Done.
|
||||||
# #############################################################################
|
# #############################################################################
|
||||||
$master_dbh->do("UPDATE mysql.proc SET created='2012-06-05 00:00:00', modified='2012-06-05 00:00:00'");
|
|
||||||
$sb->wipe_clean($master_dbh);
|
$sb->wipe_clean($master_dbh);
|
||||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||||
done_testing;
|
done_testing;
|
||||||
|
@@ -25,8 +25,11 @@ sub test_func {
|
|||||||
die "No renamed_cols arg" unless $renamed_cols;
|
die "No renamed_cols arg" unless $renamed_cols;
|
||||||
(my $show_alter = $alter) =~ s/\n/\\n/g;
|
(my $show_alter = $alter) =~ s/\n/\\n/g;
|
||||||
|
|
||||||
my %got_renamed_cols = eval {
|
my $got_renamed_cols = eval {
|
||||||
pt_online_schema_change::find_renamed_cols($alter, $tp);
|
pt_online_schema_change::find_renamed_cols(
|
||||||
|
alter => $alter,
|
||||||
|
TableParser => $tp,
|
||||||
|
);
|
||||||
};
|
};
|
||||||
if ( $EVAL_ERROR ) {
|
if ( $EVAL_ERROR ) {
|
||||||
is_deeply(
|
is_deeply(
|
||||||
@@ -37,10 +40,10 @@ sub test_func {
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
is_deeply(
|
is_deeply(
|
||||||
\%got_renamed_cols,
|
$got_renamed_cols,
|
||||||
$renamed_cols,
|
$renamed_cols,
|
||||||
$show_alter,
|
$show_alter,
|
||||||
) or diag(Dumper(\%got_renamed_cols));
|
) or diag(Dumper($got_renamed_cols));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -209,15 +212,13 @@ test_func(
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
TODO: {
|
# TODO
|
||||||
local $::TODO = "We don't parse the entire alter statement, what looks like a CHANGE COLUMNS";
|
## Not really an alter, pathological
|
||||||
# Not really an alter, pathological
|
#test_func(
|
||||||
test_func(
|
# "MODIFY `CHANGE a z VARCHAR(255) NOT NULL` FLOAT",
|
||||||
"MODIFY `CHANGE a z VARCHAR(255) NOT NULL` FLOAT",
|
# {
|
||||||
{
|
# },
|
||||||
},
|
#);
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
# #############################################################################
|
# #############################################################################
|
||||||
# Done.
|
# Done.
|
||||||
|
@@ -38,9 +38,6 @@ elsif ( !$slave2_dbh ) {
|
|||||||
elsif ( !@{$master_dbh->selectall_arrayref("show databases like 'sakila'")} ) {
|
elsif ( !@{$master_dbh->selectall_arrayref("show databases like 'sakila'")} ) {
|
||||||
plan skip_all => 'sakila database is not loaded';
|
plan skip_all => 'sakila database is not loaded';
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
plan tests => 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
# The sandbox servers run with lock_wait_timeout=3 and it's not dynamic
|
# The sandbox servers run with lock_wait_timeout=3 and it's not dynamic
|
||||||
# so we need to specify --lock-wait-timeout=3 else the tool will die.
|
# so we need to specify --lock-wait-timeout=3 else the tool will die.
|
||||||
@@ -58,7 +55,7 @@ my $sample = "t/pt-online-schema-change/samples/";
|
|||||||
diag(`/tmp/12345/use -u root < $trunk/$sample/osc-user.sql`);
|
diag(`/tmp/12345/use -u root < $trunk/$sample/osc-user.sql`);
|
||||||
PerconaTest::wait_for_table($slave1_dbh, "mysql.tables_priv", "user='osc_user'");
|
PerconaTest::wait_for_table($slave1_dbh, "mysql.tables_priv", "user='osc_user'");
|
||||||
|
|
||||||
$sb->load_file('master', "$sample/basic_no_fks.sql");
|
$sb->load_file('master', "$sample/basic_no_fks_innodb.sql");
|
||||||
|
|
||||||
($output, $exit_status) = full_output(
|
($output, $exit_status) = full_output(
|
||||||
sub { $exit_status = pt_online_schema_change::main(@args,
|
sub { $exit_status = pt_online_schema_change::main(@args,
|
||||||
@@ -97,4 +94,4 @@ wait_until(
|
|||||||
# #############################################################################
|
# #############################################################################
|
||||||
$sb->wipe_clean($master_dbh);
|
$sb->wipe_clean($master_dbh);
|
||||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||||
exit;
|
done_testing;
|
||||||
|
143
t/pt-online-schema-change/pxc.t
Normal file
143
t/pt-online-schema-change/pxc.t
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
#!/usr/bin/env perl
|
||||||
|
|
||||||
|
BEGIN {
|
||||||
|
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
|
||||||
|
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
|
||||||
|
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
|
||||||
|
};
|
||||||
|
|
||||||
|
use strict;
|
||||||
|
use warnings FATAL => 'all';
|
||||||
|
use English qw(-no_match_vars);
|
||||||
|
use Test::More;
|
||||||
|
use Data::Dumper;
|
||||||
|
|
||||||
|
# Hostnames make testing less accurate. Tests need to see
|
||||||
|
# that such-and-such happened on specific slave hosts, but
|
||||||
|
# the sandbox servers are all on one host so all slaves have
|
||||||
|
# the same hostname.
|
||||||
|
$ENV{PERCONA_TOOLKIT_TEST_USE_DSN_NAMES} = 1;
|
||||||
|
|
||||||
|
use PerconaTest;
|
||||||
|
use Sandbox;
|
||||||
|
|
||||||
|
require "$trunk/bin/pt-online-schema-change";
|
||||||
|
# Do this after requiring ptc, since it uses Mo
|
||||||
|
require VersionParser;
|
||||||
|
|
||||||
|
my $dp = new DSNParser(opts=>$dsn_opts);
|
||||||
|
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
||||||
|
my $node1 = $sb->get_dbh_for('node1');
|
||||||
|
my $node2 = $sb->get_dbh_for('node2');
|
||||||
|
my $node3 = $sb->get_dbh_for('node3');
|
||||||
|
|
||||||
|
if ( !$node1 ) {
|
||||||
|
plan skip_all => 'Cannot connect to cluster node1';
|
||||||
|
}
|
||||||
|
elsif ( !$node2 ) {
|
||||||
|
plan skip_all => 'Cannot connect to cluster node2';
|
||||||
|
}
|
||||||
|
elsif ( !$node3 ) {
|
||||||
|
plan skip_all => 'Cannot connect to cluster node3';
|
||||||
|
}
|
||||||
|
|
||||||
|
my $db_flavor = VersionParser->new($node1)->flavor();
|
||||||
|
if ( $db_flavor !~ /XtraDB Cluster/ ) {
|
||||||
|
plan skip_all => "PXC tests";
|
||||||
|
}
|
||||||
|
|
||||||
|
# The sandbox servers run with lock_wait_timeout=3 and it's not dynamic
|
||||||
|
# so we need to specify --lock-wait-timeout=3 else the tool will die.
|
||||||
|
my $node1_dsn = $sb->dsn_for('node1');
|
||||||
|
my $output;
|
||||||
|
my $exit;
|
||||||
|
my $sample = "t/pt-online-schema-change/samples/";
|
||||||
|
|
||||||
|
# #############################################################################
|
||||||
|
# Can't alter a MyISAM table.
|
||||||
|
# #############################################################################
|
||||||
|
|
||||||
|
$sb->load_file('node1', "$sample/basic_no_fks.sql");
|
||||||
|
|
||||||
|
($output, $exit) = full_output(
|
||||||
|
sub { pt_online_schema_change::main(
|
||||||
|
"$node1_dsn,D=pt_osc,t=t",
|
||||||
|
qw(--lock-wait-timeout 5),
|
||||||
|
qw(--print --execute --alter ENGINE=InnoDB)) },
|
||||||
|
stderr => 1,
|
||||||
|
);
|
||||||
|
|
||||||
|
ok(
|
||||||
|
$exit,
|
||||||
|
"Table is MyISAM: non-zero exit"
|
||||||
|
) or diag($output);
|
||||||
|
|
||||||
|
like(
|
||||||
|
$output,
|
||||||
|
qr/is a cluster node and the table is MyISAM/,
|
||||||
|
"Table is MyISAM: error message"
|
||||||
|
);
|
||||||
|
|
||||||
|
# #############################################################################
|
||||||
|
# Can't alter a table converted to MyISAM.
|
||||||
|
# #############################################################################
|
||||||
|
|
||||||
|
$sb->load_file('node1', "$sample/basic_no_fks_innodb.sql");
|
||||||
|
|
||||||
|
($output, $exit) = full_output(
|
||||||
|
sub { pt_online_schema_change::main(
|
||||||
|
"$node1_dsn,D=pt_osc,t=t",
|
||||||
|
qw(--lock-wait-timeout 5),
|
||||||
|
qw(--print --execute --alter ENGINE=MyISAM)) },
|
||||||
|
stderr => 1,
|
||||||
|
);
|
||||||
|
|
||||||
|
ok(
|
||||||
|
$exit,
|
||||||
|
"Convert table to MyISAM: non-zero exit"
|
||||||
|
) or diag($output);
|
||||||
|
|
||||||
|
like(
|
||||||
|
$output,
|
||||||
|
qr/is a cluster node and the table is being converted to MyISAM/,
|
||||||
|
"Convert table to MyISAM: error message"
|
||||||
|
);
|
||||||
|
|
||||||
|
# #############################################################################
|
||||||
|
# Require wsrep_OSU_method=TOI
|
||||||
|
# #############################################################################
|
||||||
|
|
||||||
|
$node1->do("SET GLOBAL wsrep_OSU_method='RSU'");
|
||||||
|
|
||||||
|
($output, $exit) = full_output(
|
||||||
|
sub { pt_online_schema_change::main(
|
||||||
|
"$node1_dsn,D=pt_osc,t=t",
|
||||||
|
qw(--lock-wait-timeout 5),
|
||||||
|
qw(--print --execute --alter ENGINE=MyISAM)) },
|
||||||
|
stderr => 1,
|
||||||
|
);
|
||||||
|
|
||||||
|
ok(
|
||||||
|
$exit,
|
||||||
|
"wsrep_OSU_method=RSU: non-zero exit"
|
||||||
|
) or diag($output);
|
||||||
|
print $output;
|
||||||
|
like(
|
||||||
|
$output,
|
||||||
|
qr/wsrep_OSU_method=TOI is required.+?currently set to RSU/,
|
||||||
|
"wsrep_OSU_method=RSU: error message"
|
||||||
|
);
|
||||||
|
|
||||||
|
$node1->do("SET GLOBAL wsrep_OSU_method='TOI'");
|
||||||
|
is_deeply(
|
||||||
|
$node1->selectrow_arrayref("SHOW VARIABLES LIKE 'wsrep_OSU_method'"),
|
||||||
|
[qw(wsrep_OSU_method TOI)],
|
||||||
|
"Restored wsrep_OSU_method=TOI"
|
||||||
|
) or BAIL_OUT("Failed to restore wsrep_OSU_method=TOI");
|
||||||
|
|
||||||
|
# #############################################################################
|
||||||
|
# Done.
|
||||||
|
# #############################################################################
|
||||||
|
$sb->wipe_clean($node1);
|
||||||
|
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||||
|
done_testing;
|
@@ -51,11 +51,10 @@ $sb->load_file('master', "$sample/data-loss-bug-1068562.sql");
|
|||||||
qw(--execute)) },
|
qw(--execute)) },
|
||||||
);
|
);
|
||||||
|
|
||||||
is(
|
ok(
|
||||||
$exit_status,
|
$exit_status,
|
||||||
255,
|
|
||||||
"Die if --execute without --no-check-alter"
|
"Die if --execute without --no-check-alter"
|
||||||
);
|
) or diag($output);
|
||||||
|
|
||||||
like(
|
like(
|
||||||
$output,
|
$output,
|
||||||
@@ -95,7 +94,7 @@ is(
|
|||||||
$exit_status,
|
$exit_status,
|
||||||
0,
|
0,
|
||||||
"sakila.city: Exit status 0",
|
"sakila.city: Exit status 0",
|
||||||
);
|
) or diag($output);
|
||||||
|
|
||||||
my $mod = $master_dbh->selectall_arrayref(q{SELECT some_cities FROM sakila.city});
|
my $mod = $master_dbh->selectall_arrayref(q{SELECT some_cities FROM sakila.city});
|
||||||
|
|
||||||
@@ -177,7 +176,7 @@ is(
|
|||||||
|
|
||||||
like(
|
like(
|
||||||
$output,
|
$output,
|
||||||
qr/first_name to first_name_mod, last_name to last_name_mod/ms,
|
qr/first_name to first_name_mod.+?last_name to last_name_mod/ms,
|
||||||
"--dry-run warns about renaming columns"
|
"--dry-run warns about renaming columns"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@@ -8,23 +8,23 @@ CREATE TABLE t (
|
|||||||
unique index (c(32))
|
unique index (c(32))
|
||||||
) ENGINE=MyISAM;
|
) ENGINE=MyISAM;
|
||||||
INSERT INTO pt_osc.t VALUES
|
INSERT INTO pt_osc.t VALUES
|
||||||
(null, 'a', now()),
|
(1, 'a', now()),
|
||||||
(null, 'b', now()),
|
(2, 'b', now()),
|
||||||
(null, 'c', now()),
|
(3, 'c', now()),
|
||||||
(null, 'd', now()),
|
(4, 'd', now()),
|
||||||
(null, 'e', now()),
|
(5, 'e', now()),
|
||||||
(null, 'f', now()),
|
(6, 'f', now()),
|
||||||
(null, 'g', now()),
|
(7, 'g', now()),
|
||||||
(null, 'h', now()),
|
(8, 'h', now()),
|
||||||
(null, 'i', now()),
|
(9, 'i', now()),
|
||||||
(null, 'j', now()), -- 10
|
(10, 'j', now()), -- 10
|
||||||
(null, 'k', now()),
|
(11, 'k', now()),
|
||||||
(null, 'l', now()),
|
(12, 'l', now()),
|
||||||
(null, 'm', now()),
|
(13, 'm', now()),
|
||||||
(null, 'n', now()),
|
(14, 'n', now()),
|
||||||
(null, 'o', now()),
|
(15, 'o', now()),
|
||||||
(null, 'p', now()),
|
(16, 'p', now()),
|
||||||
(null, 'q', now()),
|
(17, 'q', now()),
|
||||||
(null, 'r', now()),
|
(18, 'r', now()),
|
||||||
(null, 's', now()),
|
(19, 's', now()),
|
||||||
(null, 't', now()); -- 20
|
(20, 't', now()); -- 20
|
||||||
|
30
t/pt-online-schema-change/samples/basic_no_fks_innodb.sql
Normal file
30
t/pt-online-schema-change/samples/basic_no_fks_innodb.sql
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
DROP DATABASE IF EXISTS pt_osc;
|
||||||
|
CREATE DATABASE pt_osc;
|
||||||
|
USE pt_osc;
|
||||||
|
CREATE TABLE t (
|
||||||
|
id int auto_increment primary key,
|
||||||
|
c char(32),
|
||||||
|
d date,
|
||||||
|
unique index (c(32))
|
||||||
|
) ENGINE=InnoDB;
|
||||||
|
INSERT INTO pt_osc.t VALUES
|
||||||
|
(1, 'a', now()),
|
||||||
|
(2, 'b', now()),
|
||||||
|
(3, 'c', now()),
|
||||||
|
(4, 'd', now()),
|
||||||
|
(5, 'e', now()),
|
||||||
|
(6, 'f', now()),
|
||||||
|
(7, 'g', now()),
|
||||||
|
(8, 'h', now()),
|
||||||
|
(9, 'i', now()),
|
||||||
|
(10, 'j', now()), -- 10
|
||||||
|
(11, 'k', now()),
|
||||||
|
(12, 'l', now()),
|
||||||
|
(13, 'm', now()),
|
||||||
|
(14, 'n', now()),
|
||||||
|
(15, 'o', now()),
|
||||||
|
(16, 'p', now()),
|
||||||
|
(17, 'q', now()),
|
||||||
|
(18, 'r', now()),
|
||||||
|
(19, 's', now()),
|
||||||
|
(20, 't', now()); -- 20
|
@@ -40,17 +40,17 @@ INSERT INTO pt_osc.country VALUES
|
|||||||
(5, 'Spain', null);
|
(5, 'Spain', null);
|
||||||
|
|
||||||
INSERT INTO pt_osc.city VALUES
|
INSERT INTO pt_osc.city VALUES
|
||||||
(null, 'Montréal', 1, null),
|
(1, 'Montréal', 1, null),
|
||||||
(null, 'New York', 2, null),
|
(2, 'New York', 2, null),
|
||||||
(null, 'Durango', 3, null),
|
(3, 'Durango', 3, null),
|
||||||
(null, 'Paris', 4, null),
|
(4, 'Paris', 4, null),
|
||||||
(null, 'Madrid', 5, null);
|
(5, 'Madrid', 5, null);
|
||||||
|
|
||||||
INSERT INTO pt_osc.address VALUES
|
INSERT INTO pt_osc.address VALUES
|
||||||
(null, 'addy 1', 1, '10000', null),
|
(1, 'addy 1', 1, '10000', null),
|
||||||
(null, 'addy 2', 2, '20000', null),
|
(2, 'addy 2', 2, '20000', null),
|
||||||
(null, 'addy 3', 3, '30000', null),
|
(3, 'addy 3', 3, '30000', null),
|
||||||
(null, 'addy 4', 4, '40000', null),
|
(4, 'addy 4', 4, '40000', null),
|
||||||
(null, 'addy 5', 5, '50000', null);
|
(5, 'addy 5', 5, '50000', null);
|
||||||
|
|
||||||
SET foreign_key_checks=1;
|
SET foreign_key_checks=1;
|
||||||
|
@@ -4,9 +4,12 @@ use strict;
|
|||||||
use warnings FATAL => 'all';
|
use warnings FATAL => 'all';
|
||||||
use English qw(-no_match_vars);
|
use English qw(-no_match_vars);
|
||||||
use DBI;
|
use DBI;
|
||||||
use Time::HiRes qw(usleep time);
|
use Time::HiRes qw(sleep time);
|
||||||
|
use Test::More qw();
|
||||||
|
|
||||||
my ($host, $port, $db, $tbl, $pkcol, $stop_file, $pid_file, $sleep_time) = @ARGV;
|
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
||||||
|
|
||||||
|
my ($host, $port, $db, $tbl, $pkcol, $stop_file, $pid_file, $sleep) = @ARGV;
|
||||||
|
|
||||||
die "I need a pid_file argument" unless $pid_file;
|
die "I need a pid_file argument" unless $pid_file;
|
||||||
open my $fh, '>', $pid_file or die $OS_ERROR;
|
open my $fh, '>', $pid_file or die $OS_ERROR;
|
||||||
@@ -19,63 +22,109 @@ my $dbh = DBI->connect(
|
|||||||
{RaiseError => 1, AutoCommit => 0, ShowErrorStatement => 1, PrintError => 0},
|
{RaiseError => 1, AutoCommit => 0, ShowErrorStatement => 1, PrintError => 0},
|
||||||
);
|
);
|
||||||
|
|
||||||
my $sleep = ($sleep_time || 0.001) * 1_000_000;
|
my $del_sql = "DELETE FROM $db.$tbl WHERE $pkcol=?";
|
||||||
my $cnt = 0;
|
my $ins_sql = "INSERT INTO $db.$tbl ($pkcol, c) VALUES (?, ?)";
|
||||||
my @del;
|
my $upd_sql = "UPDATE $db.$tbl SET c=? WHERE $pkcol=?";
|
||||||
my @upd;
|
|
||||||
my @ins;
|
|
||||||
|
|
||||||
my $start_xa = "START TRANSACTION /*!40108 WITH CONSISTENT SNAPSHOT */";
|
my $del_sth = $dbh->prepare($del_sql);
|
||||||
$dbh->do($start_xa);
|
my $ins_sth = $dbh->prepare($ins_sql);
|
||||||
|
my $upd_sth = $dbh->prepare($upd_sql);
|
||||||
|
|
||||||
for my $i ( 1..5_000 ) {
|
$sleep ||= 0.01;
|
||||||
last if -f $stop_file;
|
|
||||||
|
|
||||||
|
use constant TYPE_DELETE => 1;
|
||||||
|
use constant TYPE_UPDATE => 2;
|
||||||
|
|
||||||
|
my (@del, %del);
|
||||||
|
my (@upd, %upd);
|
||||||
|
my (@ins, %ins);
|
||||||
|
my $cnt = 0;
|
||||||
|
my $id = 0;
|
||||||
|
my $type = 0;
|
||||||
|
|
||||||
|
sub reset_counters {
|
||||||
|
@del = ();
|
||||||
|
@ins = ();
|
||||||
|
@upd = ();
|
||||||
|
$cnt = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub commit {
|
||||||
eval {
|
eval {
|
||||||
# We do roughly 25% DELETE, 25% UPDATE and 50% INSERT.
|
$dbh->commit;
|
||||||
my $x = int(rand(5));
|
|
||||||
if ($x == 1) {
|
|
||||||
my $id = int(rand(500)) || 1;
|
|
||||||
$dbh->do("delete from $db.$tbl where $pkcol=$id");
|
|
||||||
# To challenge the tool, we *do* (or can) delete the same id twice.
|
|
||||||
# But to keep the numbers straight, we only record each deleted
|
|
||||||
# id once.
|
|
||||||
push @del, $id unless grep { $_ == $id } @del;
|
|
||||||
}
|
|
||||||
elsif ($x == 2) {
|
|
||||||
my $id = int(rand(500)) || 1;
|
|
||||||
if ( !grep { $_ == $id } @del ) {
|
|
||||||
my $t=time;
|
|
||||||
$dbh->do("update $db.$tbl set c='updated row $t' where $pkcol=$id");
|
|
||||||
push @upd, $id;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
my $id = 500 + $i;
|
|
||||||
my $t=time;
|
|
||||||
$dbh->do("insert ignore into $db.$tbl ($pkcol, c) values ($id, 'new row $t')");
|
|
||||||
push @ins, $id;
|
|
||||||
}
|
|
||||||
|
|
||||||
# COMMIT every N statements
|
|
||||||
if ( $cnt++ > 5 ) {
|
|
||||||
$dbh->do('COMMIT');
|
|
||||||
$cnt = 0;
|
|
||||||
usleep($sleep);
|
|
||||||
$dbh->do($start_xa);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
if ( $EVAL_ERROR ) {
|
if ( $EVAL_ERROR ) {
|
||||||
warn $EVAL_ERROR;
|
#Test::More::diag($EVAL_ERROR);
|
||||||
last;
|
#Test::More::diag("lost deleted: @del");
|
||||||
|
#Test::More::diag("lost updated: @upd");
|
||||||
|
#Test::More::diag("lost inserted: @ins");
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
map { $del{$_}++ } @del;
|
||||||
|
map { $ins{$_}++ } @ins;
|
||||||
|
map { $upd{$_}++ } @upd;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
$dbh->do('COMMIT');
|
$dbh->do("START TRANSACTION");
|
||||||
|
|
||||||
|
for my $i ( 1..5_000 ) {
|
||||||
|
last if -f $stop_file;
|
||||||
|
eval {
|
||||||
|
my $type = int(rand(5)); # roughly 25% DELETE, 25% UPDATE, 50% INSERT
|
||||||
|
|
||||||
|
if ( $type == TYPE_DELETE ) {
|
||||||
|
$id = int(rand(500)) || 1;
|
||||||
|
$del_sth->execute($id);
|
||||||
|
push @del, $id if $del_sth->rows;
|
||||||
|
}
|
||||||
|
elsif ( $type == TYPE_UPDATE ) {
|
||||||
|
$id = int(rand(500)) || 1;
|
||||||
|
if ( !$del{$id} && ($id <= 500 || $ins{$id}) ) {
|
||||||
|
my $t = time;
|
||||||
|
$upd_sth->execute("updated row $t", $id);
|
||||||
|
push @upd, $id;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else { # INSERT
|
||||||
|
$id = 500 + $i;
|
||||||
|
my $t = time;
|
||||||
|
$ins_sth->execute($id, "new row $t");
|
||||||
|
push @ins, $id;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if ( $EVAL_ERROR ) {
|
||||||
|
#Test::More::diag($EVAL_ERROR);
|
||||||
|
#Test::More::diag("lost deleted: @del");
|
||||||
|
#Test::More::diag("lost updated: @upd");
|
||||||
|
#Test::More::diag("lost inserted: @ins");
|
||||||
|
reset_counters();
|
||||||
|
sleep $sleep;
|
||||||
|
$dbh->do("START TRANSACTION");
|
||||||
|
}
|
||||||
|
|
||||||
|
# COMMIT every N statements. With PXC this can fail.
|
||||||
|
if ( ++$cnt >= 5 ) {
|
||||||
|
commit();
|
||||||
|
reset_counters();
|
||||||
|
sleep $sleep;
|
||||||
|
# TODO: somehow this can fail if called very near when
|
||||||
|
# the old table is dropped.
|
||||||
|
eval { $dbh->do("START TRANSACTION"); };
|
||||||
|
if ( $EVAL_ERROR ) {
|
||||||
|
#Test::More::diag($EVAL_ERROR);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
sleep 0.001;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
commit();
|
||||||
$dbh->disconnect();
|
$dbh->disconnect();
|
||||||
|
|
||||||
print "deleted:" . join(',', @del) . "\n";
|
print "deleted:" . join(',', sort keys %del) . "\n";
|
||||||
print "updated:" . join(',', @upd) . "\n";
|
print "updated:" . join(',', sort keys %upd) . "\n";
|
||||||
print "inserted:" . join(',', @ins) . "\n";
|
print "inserted:" . join(',', sort keys %ins) . "\n";
|
||||||
|
|
||||||
exit 0;
|
exit 0;
|
||||||
|
@@ -28,9 +28,6 @@ my $slave_dbh = $sb->get_dbh_for('slave1');
|
|||||||
if ( !$master_dbh ) {
|
if ( !$master_dbh ) {
|
||||||
plan skip_all => 'Cannot connect to sandbox master';
|
plan skip_all => 'Cannot connect to sandbox master';
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
plan tests => 6;
|
|
||||||
}
|
|
||||||
|
|
||||||
my $q = new Quoter();
|
my $q = new Quoter();
|
||||||
my $tp = new TableParser(Quoter => $q);
|
my $tp = new TableParser(Quoter => $q);
|
||||||
@@ -66,7 +63,7 @@ like( $output,
|
|||||||
"Original table must exist"
|
"Original table must exist"
|
||||||
);
|
);
|
||||||
|
|
||||||
$sb->load_file('master', "$sample/basic_no_fks.sql");
|
$sb->load_file('master', "$sample/basic_no_fks_innodb.sql");
|
||||||
$master_dbh->do("USE pt_osc");
|
$master_dbh->do("USE pt_osc");
|
||||||
$slave_dbh->do("USE pt_osc");
|
$slave_dbh->do("USE pt_osc");
|
||||||
|
|
||||||
@@ -100,7 +97,7 @@ like( $output,
|
|||||||
# Checks for the new table.
|
# Checks for the new table.
|
||||||
# #############################################################################
|
# #############################################################################
|
||||||
|
|
||||||
$sb->load_file('master', "$sample/basic_no_fks.sql");
|
$sb->load_file('master', "$sample/basic_no_fks_innodb.sql");
|
||||||
$master_dbh->do("USE pt_osc");
|
$master_dbh->do("USE pt_osc");
|
||||||
$slave_dbh->do("USE pt_osc");
|
$slave_dbh->do("USE pt_osc");
|
||||||
|
|
||||||
@@ -126,4 +123,4 @@ like(
|
|||||||
# #############################################################################
|
# #############################################################################
|
||||||
$sb->wipe_clean($master_dbh);
|
$sb->wipe_clean($master_dbh);
|
||||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||||
exit;
|
done_testing;
|
||||||
|
@@ -10,7 +10,7 @@ ERRORS DIFFS ROWS SKIPPED TABLE
|
|||||||
0 0 0 0 mysql.host
|
0 0 0 0 mysql.host
|
||||||
0 0 0 0 mysql.ndb_binlog_index
|
0 0 0 0 mysql.ndb_binlog_index
|
||||||
0 0 0 0 mysql.plugin
|
0 0 0 0 mysql.plugin
|
||||||
0 0 6 0 mysql.proc
|
0 0 0 0 mysql.proc
|
||||||
0 0 0 0 mysql.procs_priv
|
0 0 0 0 mysql.procs_priv
|
||||||
0 0 0 0 mysql.servers
|
0 0 0 0 mysql.servers
|
||||||
0 0 0 0 mysql.tables_priv
|
0 0 0 0 mysql.tables_priv
|
||||||
|
@@ -10,7 +10,7 @@ ERRORS DIFFS ROWS CHUNKS SKIPPED TABLE
|
|||||||
0 0 0 1 0 mysql.host
|
0 0 0 1 0 mysql.host
|
||||||
0 0 0 1 0 mysql.ndb_binlog_index
|
0 0 0 1 0 mysql.ndb_binlog_index
|
||||||
0 0 0 1 0 mysql.plugin
|
0 0 0 1 0 mysql.plugin
|
||||||
0 0 6 1 0 mysql.proc
|
0 0 0 1 0 mysql.proc
|
||||||
0 0 0 1 0 mysql.procs_priv
|
0 0 0 1 0 mysql.procs_priv
|
||||||
0 0 0 1 0 mysql.servers
|
0 0 0 1 0 mysql.servers
|
||||||
0 0 0 1 0 mysql.tables_priv
|
0 0 0 1 0 mysql.tables_priv
|
||||||
|
Reference in New Issue
Block a user