Merge pt-archiver-pxc-tests

This commit is contained in:
Daniel Nichter
2012-11-30 13:02:56 -07:00
16 changed files with 580 additions and 83 deletions

View File

@@ -6332,6 +6332,41 @@ in long table scans if you're trying to nibble from the end of the table by an
index other than the one it prefers. See L<"--source"> and read the
documentation on the C<i> part if this applies to you.
=head1 Percona XtraDB Cluster
pt-archiver works with Percona XtraDB Cluster (PXC) 5.5.28-23.7 and newer,
but there are three limitations you should consider before archiving on
a cluster:
=over
=item Error on commit
pt-archiver does not check for error when it commits transactions.
Commits on PXC can fail, but the tool does not yet check for or retry the
transaction when this happens. If it happens, the tool will die.
=item MyISAM tables
Archiving MyISAM tables works, but MyISAM support in PXC is still
experimental at the time of this release. There are several known bugs with
PXC, MyISAM tables, and C<AUTO_INCREMENT> columns. Therefore, you must ensure
that archiving will not directly or indirectly result in the use of default
C<AUTO_INCREMENT> values for a MyISAM table. For example, this happens with
L<"--dest"> if L<"--columns"> is used and the C<AUTO_INCREMENT> column is not
included. The tool does not check for this!
=item Non-cluster options
Certain options may or may not work. For example, if a cluster node
is not also a slave, then L<"--check-slave-lag"> does not work. And since PXC
tables are usually InnoDB, but InnoDB doesn't support C<INSERT DELAYED>, then
L<"--delayed-insert"> does not work. Other options may also not work, but
the tool does not check them, therefore you should test archiving on a test
cluster before archiving on your real cluster.
=back
=head1 OUTPUT
If you specify L<"--progress">, the output is a header row, plus status output

View File

@@ -331,9 +331,9 @@ sub ok {
# Dings a heartbeat on the master, and waits until the slave catches up fully.
sub wait_for_slaves {
my ($self, $slave) = @_;
my $master_dbh = $self->get_dbh_for('master');
my $slave2_dbh = $self->get_dbh_for($slave || 'slave2');
my ($self, %args) = @_;
my $master_dbh = $self->get_dbh_for($args{master} || 'master');
my $slave2_dbh = $self->get_dbh_for($args{slave} || 'slave2');
my ($ping) = $master_dbh->selectrow_array("SELECT MD5(RAND())");
$master_dbh->do("UPDATE percona_test.sentinel SET ping='$ping' WHERE id=1");
PerconaTest::wait_until(
@@ -409,19 +409,24 @@ sub clear_genlogs {
return;
}
sub is_cluster_mode {
my ($self) = @_;
return 0 unless $self->is_cluster_node('node1');
return 0 unless $self->is_cluster_node('node2');
return 0 unless $self->is_cluster_node('node3');
return 1;
}
sub is_cluster_node {
my ($self, $server) = @_;
my $sql = "SHOW VARIABLES LIKE 'wsrep_on'";
PTDEBUG && _d($sql);
my $row = $self->use($server, qq{-ss -e "$sql"});
PTDEBUG && _d($row);
$row = [split " ", $row];
return $row && $row->[1]
? ($row->[1] eq 'ON' || $row->[1] eq '1')
: 0;
return $row && $row->[1] && ($row->[1] eq 'ON' || $row->[1] eq '1');
}
sub can_load_data {
@@ -531,6 +536,12 @@ sub port_for {
return $port_for{$server};
}
sub config_file_for {
my ($self, $server) = @_;
my $port = $self->port_for($server);
return "/tmp/$port/my.sandbox.cnf"
}
sub _d {
my ($package, undef, $line) = caller 0;
@_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }

View File

@@ -187,20 +187,21 @@ SKIP: {
$sb->load_file('master', "t/pt-archiver/samples/bulk_regular_insert.sql");
$sb->wait_for_slaves();
my $original_rows = $slave1_dbh->selectall_arrayref("SELECT * FROM bri.t ORDER BY id");
my $original_rows = $slave1_dbh->selectall_arrayref("SELECT * FROM bri.t ORDER BY id");
my $original_no_id = $slave1_dbh->selectall_arrayref("SELECT c,t FROM bri.t ORDER BY id");
is_deeply(
$original_rows,
$original_no_id,
[
[1, 'aa', '11:11:11'],
[2, 'bb', '11:11:12'],
[3, 'cc', '11:11:13'],
[4, 'dd', '11:11:14'],
[5, 'ee', '11:11:15'],
[6, 'ff', '11:11:16'],
[7, 'gg', '11:11:17'],
[8, 'hh', '11:11:18'],
[9, 'ii', '11:11:19'],
[10,'jj', '11:11:10'],
['aa', '11:11:11'],
['bb', '11:11:12'],
['cc', '11:11:13'],
['dd', '11:11:14'],
['ee', '11:11:15'],
['ff', '11:11:16'],
['gg', '11:11:17'],
['hh', '11:11:18'],
['ii', '11:11:19'],
['jj', '11:11:10'],
],
"Bug 903387: slave has rows"
);
@@ -213,11 +214,11 @@ SKIP: {
qw(--limit 10)) },
);
$rows = $master_dbh->selectall_arrayref("SELECT * FROM bri.t ORDER BY id");
$rows = $master_dbh->selectall_arrayref("SELECT c,t FROM bri.t ORDER BY id");
is_deeply(
$rows,
[
[10,'jj', '11:11:10'],
['jj', '11:11:10'],
],
"Bug 903387: rows deleted on master"
) or diag(Dumper($rows));

View File

@@ -59,6 +59,10 @@ like($output, qr/copy\s+$chks/, 'copy checksum');
# Issue 1260: mk-archiver --bulk-insert data loss
# ############################################################################
$sb->load_file('master', 't/pt-archiver/samples/bulk_regular_insert.sql');
my $orig_rows = $dbh->selectall_arrayref('select id from bri.t order by id');
my $lt_8 = [ grep { $_->[0] < 8 } @$orig_rows ];
my $ge_8 = [ grep { $_->[0] >= 8 } @$orig_rows ];
$output = output(
sub { pt_archiver::main(
'--where', "id < 8", qw(--limit 100000 --txn-size 1000),
@@ -69,14 +73,14 @@ $output = output(
$rows = $dbh->selectall_arrayref('select id from bri.t order by id');
is_deeply(
$rows,
[[8],[9],[10]],
$ge_8,
"--bulk-insert left 3 rows (issue 1260)"
);
$rows = $dbh->selectall_arrayref('select id from bri.t_arch order by id');
is_deeply(
$rows,
[[1],[2],[3],[4],[5],[6],[7]],
$lt_8,
"--bulk-insert archived 7 rows (issue 1260)"
);

View File

@@ -39,27 +39,27 @@ output(
sub { pt_archiver::main("--source", "F=$cnf,D=bri,t=t,L=1", qw(--dest t=t_arch --where 1=1 --bulk-insert --limit 3)) },
);
my $t_rows = $dbh->selectall_arrayref('select * from t order by id');
my $t_arch_rows = $dbh->selectall_arrayref('select * from t_arch order by id');
my $t_rows = $dbh->selectall_arrayref('select c,t from t order by id');
my $t_arch_rows = $dbh->selectall_arrayref('select c,t from t_arch order by id');
is_deeply(
$t_rows,
[ ['10', 'jj', '11:11:10'] ],
[ ['jj', '11:11:10'] ],
"Table after normal bulk insert"
);
is_deeply(
$t_arch_rows,
[
['1','aa','11:11:11'],
['2','bb','11:11:12'],
['3','cc','11:11:13'],
['4','dd','11:11:14'],
['5','ee','11:11:15'],
['6','ff','11:11:16'],
['7','gg','11:11:17'],
['8','hh','11:11:18'],
['9','ii','11:11:19'],
['aa','11:11:11'],
['bb','11:11:12'],
['cc','11:11:13'],
['dd','11:11:14'],
['ee','11:11:15'],
['ff','11:11:16'],
['gg','11:11:17'],
['hh','11:11:18'],
['ii','11:11:19'],
],
"Archive table after normal bulk insert"
);
@@ -72,8 +72,8 @@ $dbh->do('use bri');
`$cmd --source F=$cnf,D=bri,t=t,L=1 --dest t=t_arch,m=bulk_regular_insert --where "1=1" --bulk-insert --limit 3`;
my $bri_t_rows = $dbh->selectall_arrayref('select * from t order by id');
my $bri_t_arch_rows = $dbh->selectall_arrayref('select * from t_arch order by id');
my $bri_t_rows = $dbh->selectall_arrayref('select c,t from t order by id');
my $bri_t_arch_rows = $dbh->selectall_arrayref('select c,t from t_arch order by id');
is_deeply(
$bri_t_rows,

View File

@@ -26,6 +26,9 @@ if ( !$dbh ) {
elsif ( !$dbh2 ) {
plan skip_all => 'Cannot connect to sandbox slave';
}
elsif ( $sb->is_cluster_mode ) {
plan skip_all => 'Not for PXC',
}
else {
plan tests => 7;
}

View File

@@ -10,6 +10,7 @@ use strict;
use warnings FATAL => 'all';
use English qw(-no_match_vars);
use Test::More;
use Data::Dumper;
use PerconaTest;
use Sandbox;
@@ -22,9 +23,6 @@ my $dbh = $sb->get_dbh_for('master');
if ( !$dbh ) {
plan skip_all => 'Cannot connect to sandbox master';
}
else {
plan tests => 14;
}
my $output;
my $rows;
@@ -56,16 +54,34 @@ ok(scalar @$rows == 0, 'Purged all rows ok');
# This test has been changed. I manually examined the tables before
# and after the archive operation and I am convinced that the original
# expected output was incorrect.
$rows = $dbh->selectall_arrayref("select * from test.table_2", { Slice => {}});
is_deeply(
$rows,
[ { a => '1', b => '2', c => '3', d => undef },
my ($sql, $expect_rows);
if ( $sb->is_cluster_node('master') ) {
# PXC nodes have auto-inc offsets, so rather than see what they are
# and account for them, we just don't select the auto-inc col, a.
# This test is really about b, c, and d anyway.
$sql = "SELECT b, c, d FROM test.table_2 ORDER BY a";
$expect_rows = [
{ b => '2', c => '3', d => undef },
{ b => undef, c => '3', d => undef },
{ b => '2', c => '3', d => undef },
{ b => '2', c => '3', d => undef },
];
}
else {
# The original, non-PXC values.
$sql = "SELECT * FROM test.table_2 ORDER BY a";
$expect_rows = [
{ a => '1', b => '2', c => '3', d => undef },
{ a => '2', b => undef, c => '3', d => undef },
{ a => '3', b => '2', c => '3', d => undef },
{ a => '4', b => '2', c => '3', d => undef },
],
'Found rows in new table OK when archiving only some columns to another table');
];
}
$rows = $dbh->selectall_arrayref($sql, { Slice => {}});
is_deeply(
$rows,
$expect_rows,
'Found rows in new table OK when archiving only some columns to another table') or diag(Dumper($rows));
# Archive to another table with autocommit
$sb->load_file('master', 't/pt-archiver/samples/tables1-4.sql');
@@ -102,4 +118,4 @@ is($output + 0, 10, 'Rows got archived');
# #############################################################################
$sb->wipe_clean($dbh);
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
exit;
done_testing;

View File

@@ -34,11 +34,11 @@ my $output;
$sb->load_file('master', 't/pt-archiver/samples/issue_1225.sql');
$dbh->do('set names "utf8"');
my $original_rows = $dbh->selectall_arrayref('select * from issue_1225.t where i in (1, 2)');
my $original_rows = $dbh->selectall_arrayref('select c from issue_1225.t limit 2');
is_deeply(
$original_rows,
[ [ 1, 'が'], # Your terminal must be UTF8 to see this Japanese character.
[ 2, 'が'],
[ ['が'], # Your terminal must be UTF8 to see this Japanese character.
['が'],
],
"Inserted UTF8 data"
);
@@ -61,10 +61,10 @@ $output = output(
},
);
my $archived_rows = $dbh->selectall_arrayref('select * from issue_1225.a where i in (1, 2)');
my $archived_rows = $dbh->selectall_arrayref('select c from issue_1225.a limit 2');
ok(
$original_rows->[0]->[1] ne $archived_rows->[0]->[1],
$original_rows->[0]->[0] ne $archived_rows->[0]->[0],
"UTF8 characters lost when cxn isn't also UTF8"
);
@@ -78,7 +78,7 @@ $output = output(
},
);
$archived_rows = $dbh->selectall_arrayref('select * from issue_1225.a where i in (1, 2)');
$archived_rows = $dbh->selectall_arrayref('select c from issue_1225.a limit 2');
is_deeply(
$original_rows,

View File

@@ -10,6 +10,7 @@ use strict;
use warnings FATAL => 'all';
use English qw(-no_match_vars);
use Test::More;
use Data::Dumper;
use PerconaTest;
use Sandbox;
@@ -26,9 +27,6 @@ elsif ( $DBD::mysql::VERSION lt '4' ) {
plan skip_all => "DBD::mysql version $DBD::mysql::VERSION has utf8 bugs. "
. "See https://bugs.launchpad.net/percona-toolkit/+bug/932327";
}
else {
plan tests => 3;
}
my $output;
my $rows;
@@ -42,14 +40,14 @@ my $file = "/tmp/mk-archiver-file.txt";
$sb->load_file('master', 't/pt-archiver/samples/issue_1225.sql');
$dbh->do('set names "utf8"');
my $original_rows = $dbh->selectall_arrayref('select * from issue_1225.t where i in (1, 2)');
my $original_rows = $dbh->selectall_arrayref('select c from issue_1225.t where i in (1, 2)');
is_deeply(
$original_rows,
[ [ 1, 'が'], # Your terminal must be UTF8 to see this Japanese character.
[ 2, 'が'],
[ [ 'が'], # Your terminal must be UTF8 to see this Japanese character.
[ 'が'],
],
"Inserted UTF8 data"
);
) or diag(Dumper($original_rows));
diag(`rm -rf $file >/dev/null`);
@@ -62,7 +60,8 @@ $output = output(
stderr => 1,
);
my $got = `cat $file`;
my $got = slurp_file($file);
$got =~ s/^\d+//gsm;
ok(
no_diff(
$got,
@@ -79,4 +78,4 @@ diag(`rm -rf $file >/dev/null`);
# #############################################################################
$sb->wipe_clean($dbh);
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
exit;
done_testing;

408
t/pt-archiver/pxc.t Normal file
View File

@@ -0,0 +1,408 @@
#!/usr/bin/env perl
BEGIN {
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
};
use strict;
use warnings FATAL => 'all';
use English qw(-no_match_vars);
use Test::More;
use Time::HiRes qw(time);
# Hostnames make testing less accurate. Tests need to see
# that such-and-such happened on specific slave hosts, but
# the sandbox servers are all on one host so all slaves have
# the same hostname.
$ENV{PERCONA_TOOLKIT_TEST_USE_DSN_NAMES} = 1;
use PerconaTest;
use Sandbox;
use Data::Dumper;
require "$trunk/bin/pt-archiver";
my $dp = new DSNParser(opts=>$dsn_opts);
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
my $node1_dbh = $sb->get_dbh_for('node1');
my $node2_dbh = $sb->get_dbh_for('node2');
my $node3_dbh = $sb->get_dbh_for('node3');
if ( !$node1_dbh ) {
plan skip_all => 'Cannot connect to cluster node1';
}
elsif ( !$node2_dbh ) {
plan skip_all => 'Cannot connect to cluster node2';
}
elsif ( !$node3_dbh ) {
plan skip_all => 'Cannot connect to cluster node3';
}
elsif ( !$sb->is_cluster_mode ) {
plan skip_all => "PXC tests";
}
my $output;
my $count;
my $sql;
my $rows;
my $node1_cnf = $sb->config_file_for("node1");
my $node2_cnf = $sb->config_file_for("node2");
my @args = qw(--where 1=1);
$sb->create_dbs($node1_dbh, ['test']);
sub check_rows {
my (%args) = @_;
my @required_args = qw(name sql expect);
foreach my $arg ( @required_args ) {
die "I need a $arg argument" unless $args{$arg};
}
my ($name, $sql, $expect) = @args{@required_args};
$sb->wait_for_slaves;
my $rows = $node1_dbh->selectall_arrayref($sql);
is_deeply(
$rows,
$expect,
"$name on node1"
) or diag(Dumper($rows));
$rows = $node2_dbh->selectall_arrayref($sql);
is_deeply(
$rows,
$expect,
"$name on node2"
) or diag(Dumper($rows));
$rows = $node3_dbh->selectall_arrayref($sql);
is_deeply(
$rows,
$expect,
"$name on node3"
) or diag(Dumper($rows));
}
# ###########################################################################
# Purge rows.
# ###########################################################################
$sb->load_file('node1', 't/pt-archiver/samples/tables1-4.sql');
$node1_dbh->do("INSERT INTO test.table_2 SELECT * FROM test.table_1");
# Since there's no auto-inc column, all rows should be purged on all nodes.
$output = output(
sub {
pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf",
qw(--purge))
},
stderr => 1,
);
check_rows(
name => "Purged all rows",
sql => "SELECT * FROM test.table_1 ORDER BY a",
expect => [],
);
# table_2 has an auto-inc, so all rows less the max auto-inc row
# should be purged on all nodes. This is due to --[no]safe-auto-increment.
$output = output(
sub {
pt_archiver::main(@args, '--source', "D=test,t=table_2,F=$node1_cnf",
qw(--purge))
},
stderr => 1,
);
check_rows(
name => "Purged rows less max auto-inc",
sql => "SELECT * FROM test.table_2 ORDER BY a",
expect => [[qw(4 2 3), "\n"]],
);
# ###########################################################################
# Do not purge rows.
# ###########################################################################
$sb->load_file('node1', 't/pt-archiver/samples/tables1-4.sql');
my $expected_rows = $node1_dbh->selectall_arrayref(
"SELECT * FROM test.table_1 ORDER BY a");
$output = output(
sub {
pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf",
qw(--no-delete --purge))
},
stderr => 1,
);
check_rows(
name => "--no-delete left all rows",
sql => "SELECT * FROM test.table_1 ORDER BY a",
expect => $expected_rows,
);
# #############################################################################
# Archive rows to another table
# #############################################################################
# Presume the previous test ^ left tables1-4.sql loaded and that $expect_rows
# is still the real, expected rows.
# Same node
$output = output(
sub {
pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf",
qw(--dest t=table_2))
},
stderr => 1,
);
check_rows(
name => "Rows purged from table_1 (same node)",
sql => "SELECT * FROM test.table_1 ORDER BY a",
expect => [],
);
check_rows(
name => "Rows archived to table_2 (same node)",
sql => "SELECT * FROM test.table_2 ORDER BY a",
expect => $expected_rows,
);
# To another node
$sb->load_file('node1', 't/pt-archiver/samples/tables1-4.sql');
$expected_rows = $node1_dbh->selectall_arrayref(
"SELECT * FROM test.table_1 ORDER BY a");
$output = output(
sub {
pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf",
'--dest', "F=$node2_cnf,D=test,t=table_2")
},
stderr => 1,
);
check_rows(
name => "Rows purged from table_1 (cross-node)",
sql => "SELECT * FROM test.table_1 ORDER BY a",
expect => [],
);
check_rows(
name => "Rows archived to table_2 (cross-node)",
sql => "SELECT * FROM test.table_2 ORDER BY a",
expect => $expected_rows,
);
# #############################################################################
# --bulk-insert
# #############################################################################
# Same node
$sb->load_file('node1', "t/pt-archiver/samples/bulk_regular_insert.sql");
$expected_rows = $node1_dbh->selectall_arrayref(
"SELECT * FROM bri.t ORDER BY id");
# The max auto-inc col won't be archived, so:
my $max_auto_inc_row = pop @$expected_rows;
output(
sub {
pt_archiver::main(@args, '--source', "F=$node1_cnf,D=bri,t=t,L=1",
qw(--dest t=t_arch --bulk-insert --limit 3))
},
stderr => 1,
);
check_rows(
name => "--bulk-insert source table (same node)",
sql => "select * from bri.t order by id",
expect => [ $max_auto_inc_row ],
);
check_rows(
name => "--bulk-insert dest table (same node)",
sql => "select * from bri.t_arch order by id",
expect => $expected_rows,
);
# To another node
$sb->load_file('node1', "t/pt-archiver/samples/bulk_regular_insert.sql");
output(
sub {
pt_archiver::main(@args, '--source', "F=$node1_cnf,D=bri,t=t,L=1",
'--dest', "F=$node2_cnf,t=t_arch", qw(--bulk-insert --limit 3))
},
stderr => 1,
);
check_rows(
name => "--bulk-insert source table (cross-node)",
sql => "select * from bri.t order by id",
expect => [ $max_auto_inc_row ],
);
check_rows(
name => "--bulk-insert dest table (cross-node)",
sql => "select * from bri.t_arch order by id",
expect => $expected_rows,
);
# #############################################################################
# --bulk-delete
# #############################################################################
# Same node
$sb->load_file('node2', 't/pt-archiver/samples/table5.sql');
$expected_rows = $node1_dbh->selectall_arrayref(
"SELECT * FROM test.table_5 ORDER BY a,b,c,d");
$output = output(
sub {
pt_archiver::main(@args, '--source', "D=test,t=table_5,F=$node1_cnf",
qw(--no-ascend --limit 50 --bulk-delete),
qw(--statistics --dest t=table_5_dest))
},
stderr => 1,
);
check_rows(
name => "--bulk-delete source table (same node)",
sql => "select * from test.table_5",
expect => [],
);
check_rows(
name => "--bulk-delete dest table (same node)",
sql => "select * from test.table_5_dest order by a,b,c,d",
expect => $expected_rows,
);
# To another node
$sb->load_file('node2', 't/pt-archiver/samples/table5.sql');
$output = output(
sub {
pt_archiver::main(@args, '--source', "D=test,t=table_5,F=$node1_cnf",
qw(--no-ascend --limit 50 --bulk-delete),
qw(--statistics), '--dest', "F=$node2_cnf,t=table_5_dest")
},
stderr => 1,
);
check_rows(
name => "--bulk-delete source table (cross-node)",
sql => "select * from test.table_5",
expect => [],
);
check_rows(
name => "--bulk-delete dest table (cross-node)",
sql => "select * from test.table_5_dest order by a,b,c,d",
expect => $expected_rows,
);
# #############################################################################
# Repeat some of the above tests with MyISAM.
# #############################################################################
$sb->load_file('node1', 't/pt-archiver/samples/table14.sql');
$expected_rows = $node1_dbh->selectall_arrayref(
"SELECT * FROM test.table_1 ORDER BY a");
$node1_dbh->do("INSERT INTO test.table_2 SELECT * FROM test.table_1");
# Since there's no auto-inc column, all rows should be purged on all nodes.
$output = output(
sub {
pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf",
qw(--purge))
},
stderr => 1,
);
check_rows(
name => "MyISAM: Purged all rows",
sql => "SELECT * FROM test.table_1 ORDER BY a",
expect => [],
);
# table_2 has an auto-inc, so all rows less the max auto-inc row
# should be purged on all nodes. This is due to --[no]safe-auto-increment.
$output = output(
sub {
pt_archiver::main(@args, '--source', "D=test,t=table_2,F=$node1_cnf",
qw(--purge))
},
stderr => 1,
);
check_rows(
name => "MyISAM: Purged rows less max auto-inc",
sql => "SELECT * FROM test.table_2 ORDER BY a",
expect => [[qw(4 2 3), "\n"]],
);
# Archive rows to another MyISAM table.
# Same node
$sb->load_file('node1', 't/pt-archiver/samples/table14.sql');
$output = output(
sub {
pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf",
qw(--dest t=table_2))
},
stderr => 1,
);
check_rows(
name => "MyISAM: Rows purged from table_1 (same node)",
sql => "SELECT * FROM test.table_1 ORDER BY a",
expect => [],
);
check_rows(
name => "MyISAM: Rows archived to table_2 (same node)",
sql => "SELECT * FROM test.table_2 ORDER BY a",
expect => $expected_rows,
);
# To another node
$sb->load_file('node1', 't/pt-archiver/samples/table14.sql');
$output = output(
sub {
pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf",
'--dest', "F=$node2_cnf,D=test,t=table_2")
},
stderr => 1,
);
check_rows(
name => "MyISAM: Rows purged from table_1 (cross-node)",
sql => "SELECT * FROM test.table_1 ORDER BY a",
expect => [],
);
check_rows(
name => "MyISAM: Rows archived to table_2 (cross-node)",
sql => "SELECT * FROM test.table_2 ORDER BY a",
expect => $expected_rows,
);
# #############################################################################
# Done.
# #############################################################################
$sb->wipe_clean($node1_dbh);
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
done_testing;

View File

@@ -11,6 +11,6 @@ create table a (
c char(16)
) charset=utf8;
insert into t values
(null, ""),
(null, ""),
(null, "");
(1, ""),
(2, ""),
(3, "");

View File

@@ -1,2 +1,2 @@
1
2

View File

@@ -4,7 +4,7 @@ CREATE TABLE issue_131_src (
id INT AUTO_INCREMENT PRIMARY KEY,
name varchar(8)
);
INSERT INTO issue_131_src VALUES (null,'aaa'),(null,'bbb'),(null,'zzz');
INSERT INTO issue_131_src VALUES (1,'aaa'),(2,'bbb'),(3,'zzz');
DROP TABLE IF EXISTS issue_131_dst;
CREATE TABLE issue_131_dst (

View File

@@ -2,6 +2,6 @@ use test;
-- This test uses an auto_increment colum to test --safeautoinc.
drop table if exists table_12;
create table table_12( a int not null auto_increment primary key, b int);
insert into table_12(b) values(1),(1),(1);
create table table_12( a int not null auto_increment primary key, b int) engine=innodb;
insert into table_12(a,b) values(1,1),(2,1),(3,1);

View File

@@ -0,0 +1,25 @@
use test;
drop table if exists table_1;
drop table if exists table_2;
create table table_1(
a int not null primary key,
b int,
c int not null,
d varchar(50),
key(b)
) engine=myisam;
create table table_2(
a int not null primary key auto_increment,
b int,
c int not null,
d varchar(50)
) engine=myisam;
insert into table_1 values
(1, 2, 3, 4),
(2, null, 3, 4),
(3, 2, 3, "\t"),
(4, 2, 3, "\n");

View File

@@ -20,10 +20,7 @@ $ENV{PERCONA_TOOLKIT_TEST_USE_DSN_NAMES} = 1;
use PerconaTest;
use Sandbox;
require "$trunk/bin/pt-table-checksum";
# Do this after requiring ptc, since it uses Mo
require VersionParser;
my $dp = new DSNParser(opts=>$dsn_opts);
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
@@ -31,12 +28,7 @@ my $node1 = $sb->get_dbh_for('node1');
my $node2 = $sb->get_dbh_for('node2');
my $node3 = $sb->get_dbh_for('node3');
my $db_flavor = VersionParser->new($node1)->flavor();
if ( $db_flavor !~ /XtraDB Cluster/ ) {
plan skip_all => "PXC tests";
}
elsif ( !$node1 ) {
if ( !$node1 ) {
plan skip_all => 'Cannot connect to cluster node1';
}
elsif ( !$node2 ) {
@@ -45,6 +37,9 @@ elsif ( !$node2 ) {
elsif ( !$node3 ) {
plan skip_all => 'Cannot connect to cluster node3';
}
elsif ( !$sb->is_cluster_mode ) {
plan skip_all => "PXC tests";
}
# The sandbox servers run with lock_wait_timeout=3 and it's not dynamic
# so we need to specify --lock-wait-timeout=3 else the tool will die.
@@ -202,7 +197,7 @@ $node2->do("set sql_log_bin=1");
# Wait for the slave to apply the binlogs from node1 (its master).
# Then change it so it's not consistent.
PerconaTest::wait_for_table($slave_dbh, 'test.t');
$sb->wait_for_slaves('cslave1');
$sb->wait_for_slaves(master => 'node1', slave => 'cslave1');
$slave_dbh->do("update test.t set c='zebra' where c='z'");
# Another quick test first: the tool should complain about the slave's
@@ -262,7 +257,7 @@ $sb->stop_sandbox('cslave1');
# Wait for the slave to apply the binlogs from node2 (its master).
# Then change it so it's not consistent.
PerconaTest::wait_for_table($slave_dbh, 'test.t');
$sb->wait_for_slaves('cslave1');
$sb->wait_for_slaves(master => 'node1', slave => 'cslave1');
$slave_dbh->do("update test.t set c='zebra' where c='z'");
($row) = $slave_dbh->selectrow_array("select c from test.t order by c desc limit 1");