mirror of
https://github.com/percona/percona-toolkit.git
synced 2025-09-11 05:29:30 +00:00
Fix pt-archiver tests to work with PXC in cluster mode. dest.t is still a work in progress.
This commit is contained in:
@@ -409,19 +409,24 @@ sub clear_genlogs {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub is_cluster_mode {
|
||||||
|
my ($self) = @_;
|
||||||
|
return 0 unless $self->is_cluster_node('node1');
|
||||||
|
return 0 unless $self->is_cluster_node('node2');
|
||||||
|
return 0 unless $self->is_cluster_node('node3');
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
sub is_cluster_node {
|
sub is_cluster_node {
|
||||||
my ($self, $server) = @_;
|
my ($self, $server) = @_;
|
||||||
|
|
||||||
my $sql = "SHOW VARIABLES LIKE 'wsrep_on'";
|
my $sql = "SHOW VARIABLES LIKE 'wsrep_on'";
|
||||||
PTDEBUG && _d($sql);
|
PTDEBUG && _d($sql);
|
||||||
my $row = $self->use($server, qq{-ss -e "$sql"});
|
my $row = $self->use($server, qq{-ss -e "$sql"});
|
||||||
PTDEBUG && _d($row);
|
PTDEBUG && _d($row);
|
||||||
$row = [split " ", $row];
|
$row = [split " ", $row];
|
||||||
|
|
||||||
return $row && $row->[1]
|
return $row && $row->[1] && ($row->[1] eq 'ON' || $row->[1] eq '1');
|
||||||
? ($row->[1] eq 'ON' || $row->[1] eq '1')
|
|
||||||
: 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sub can_load_data {
|
sub can_load_data {
|
||||||
|
@@ -26,6 +26,9 @@ if ( !$dbh ) {
|
|||||||
elsif ( !$dbh2 ) {
|
elsif ( !$dbh2 ) {
|
||||||
plan skip_all => 'Cannot connect to sandbox slave';
|
plan skip_all => 'Cannot connect to sandbox slave';
|
||||||
}
|
}
|
||||||
|
elsif ( $sb->is_cluster_mode ) {
|
||||||
|
plan skip_all => 'Not for PXC',
|
||||||
|
}
|
||||||
else {
|
else {
|
||||||
plan tests => 7;
|
plan tests => 7;
|
||||||
}
|
}
|
||||||
|
@@ -10,6 +10,7 @@ use strict;
|
|||||||
use warnings FATAL => 'all';
|
use warnings FATAL => 'all';
|
||||||
use English qw(-no_match_vars);
|
use English qw(-no_match_vars);
|
||||||
use Test::More;
|
use Test::More;
|
||||||
|
use Data::Dumper;
|
||||||
|
|
||||||
use PerconaTest;
|
use PerconaTest;
|
||||||
use Sandbox;
|
use Sandbox;
|
||||||
@@ -22,9 +23,6 @@ my $dbh = $sb->get_dbh_for('master');
|
|||||||
if ( !$dbh ) {
|
if ( !$dbh ) {
|
||||||
plan skip_all => 'Cannot connect to sandbox master';
|
plan skip_all => 'Cannot connect to sandbox master';
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
plan tests => 14;
|
|
||||||
}
|
|
||||||
|
|
||||||
my $output;
|
my $output;
|
||||||
my $rows;
|
my $rows;
|
||||||
@@ -64,8 +62,7 @@ is_deeply(
|
|||||||
{ a => '3', b => '2', c => '3', d => undef },
|
{ a => '3', b => '2', c => '3', d => undef },
|
||||||
{ a => '4', b => '2', c => '3', d => undef },
|
{ a => '4', b => '2', c => '3', d => undef },
|
||||||
],
|
],
|
||||||
'Found rows in new table OK when archiving only some columns to another table');
|
'Found rows in new table OK when archiving only some columns to another table') or diag(Dumper($rows));
|
||||||
|
|
||||||
|
|
||||||
# Archive to another table with autocommit
|
# Archive to another table with autocommit
|
||||||
$sb->load_file('master', 't/pt-archiver/samples/tables1-4.sql');
|
$sb->load_file('master', 't/pt-archiver/samples/tables1-4.sql');
|
||||||
@@ -102,4 +99,4 @@ is($output + 0, 10, 'Rows got archived');
|
|||||||
# #############################################################################
|
# #############################################################################
|
||||||
$sb->wipe_clean($dbh);
|
$sb->wipe_clean($dbh);
|
||||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||||
exit;
|
done_testing;
|
||||||
|
@@ -10,6 +10,7 @@ use strict;
|
|||||||
use warnings FATAL => 'all';
|
use warnings FATAL => 'all';
|
||||||
use English qw(-no_match_vars);
|
use English qw(-no_match_vars);
|
||||||
use Test::More;
|
use Test::More;
|
||||||
|
use Data::Dumper;
|
||||||
|
|
||||||
use PerconaTest;
|
use PerconaTest;
|
||||||
use Sandbox;
|
use Sandbox;
|
||||||
@@ -26,9 +27,6 @@ elsif ( $DBD::mysql::VERSION lt '4' ) {
|
|||||||
plan skip_all => "DBD::mysql version $DBD::mysql::VERSION has utf8 bugs. "
|
plan skip_all => "DBD::mysql version $DBD::mysql::VERSION has utf8 bugs. "
|
||||||
. "See https://bugs.launchpad.net/percona-toolkit/+bug/932327";
|
. "See https://bugs.launchpad.net/percona-toolkit/+bug/932327";
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
plan tests => 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
my $output;
|
my $output;
|
||||||
my $rows;
|
my $rows;
|
||||||
@@ -49,7 +47,7 @@ is_deeply(
|
|||||||
[ 'が'],
|
[ 'が'],
|
||||||
],
|
],
|
||||||
"Inserted UTF8 data"
|
"Inserted UTF8 data"
|
||||||
);
|
) or diag(Dumper($original_rows));
|
||||||
|
|
||||||
diag(`rm -rf $file >/dev/null`);
|
diag(`rm -rf $file >/dev/null`);
|
||||||
|
|
||||||
@@ -80,4 +78,4 @@ diag(`rm -rf $file >/dev/null`);
|
|||||||
# #############################################################################
|
# #############################################################################
|
||||||
$sb->wipe_clean($dbh);
|
$sb->wipe_clean($dbh);
|
||||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||||
exit;
|
done_testing;
|
||||||
|
@@ -12,256 +12,309 @@ use English qw(-no_match_vars);
|
|||||||
use Test::More;
|
use Test::More;
|
||||||
use Time::HiRes qw(time);
|
use Time::HiRes qw(time);
|
||||||
|
|
||||||
|
# Hostnames make testing less accurate. Tests need to see
|
||||||
|
# that such-and-such happened on specific slave hosts, but
|
||||||
|
# the sandbox servers are all on one host so all slaves have
|
||||||
|
# the same hostname.
|
||||||
|
$ENV{PERCONA_TOOLKIT_TEST_USE_DSN_NAMES} = 1;
|
||||||
|
|
||||||
use PerconaTest;
|
use PerconaTest;
|
||||||
use Sandbox;
|
use Sandbox;
|
||||||
use Data::Dumper;
|
use Data::Dumper;
|
||||||
require "$trunk/bin/pt-archiver";
|
require "$trunk/bin/pt-archiver";
|
||||||
|
|
||||||
my $dp = new DSNParser(opts=>$dsn_opts);
|
my $dp = new DSNParser(opts=>$dsn_opts);
|
||||||
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
||||||
|
my $node1_dbh = $sb->get_dbh_for('node1');
|
||||||
|
my $node2_dbh = $sb->get_dbh_for('node2');
|
||||||
|
my $node3_dbh = $sb->get_dbh_for('node3');
|
||||||
|
|
||||||
my $node1 = $sb->get_dbh_for('node1');
|
if ( !$node1_dbh ) {
|
||||||
my $db_flavor = VersionParser->new($node1)->flavor();
|
plan skip_all => 'Cannot connect to cluster node1';
|
||||||
|
}
|
||||||
if ( $db_flavor !~ /XtraDB Cluster/ ) {
|
elsif ( !$node2_dbh ) {
|
||||||
|
plan skip_all => 'Cannot connect to cluster node2';
|
||||||
|
}
|
||||||
|
elsif ( !$node3_dbh ) {
|
||||||
|
plan skip_all => 'Cannot connect to cluster node3';
|
||||||
|
}
|
||||||
|
elsif ( !$sb->is_cluster_mode ) {
|
||||||
plan skip_all => "PXC tests";
|
plan skip_all => "PXC tests";
|
||||||
}
|
}
|
||||||
|
|
||||||
my $c = $sb->start_cluster(
|
|
||||||
nodes => [qw(node4 node5)],
|
|
||||||
env => q/CLUSTER_NAME="pt_archiver_cluster"/,
|
|
||||||
);
|
|
||||||
|
|
||||||
my $node4_dbh = $c->{node4}->{dbh};
|
|
||||||
my $node5_dbh = $c->{node5}->{dbh};
|
|
||||||
|
|
||||||
# Set this up so ->wait_for_slaves works
|
|
||||||
$node4_dbh->do("CREATE DATABASE IF NOT EXISTS percona_test");
|
|
||||||
$node4_dbh->do("CREATE TABLE IF NOT EXISTS percona_test.sentinel(id int primary key, ping varchar(64) not null default '')");
|
|
||||||
my ($ping) = $node4_dbh->selectrow_array("SELECT MD5(RAND())");
|
|
||||||
$node4_dbh->do("INSERT INTO percona_test.sentinel(id, ping) values(1, '$ping') ON DUPLICATE KEY UPDATE ping='$ping'");
|
|
||||||
sleep 1 until eval { $node5_dbh->selectrow_array("SELECT * FROM percona_test.sentinel") };
|
|
||||||
|
|
||||||
my $output;
|
my $output;
|
||||||
my $count;
|
my $count;
|
||||||
my $sql;
|
my $sql;
|
||||||
my $cnf = $sb->config_file_for("node4");
|
my $rows;
|
||||||
|
my $node1_cnf = $sb->config_file_for("node1");
|
||||||
|
my $node2_cnf = $sb->config_file_for("node2");
|
||||||
my @args = qw(--where 1=1);
|
my @args = qw(--where 1=1);
|
||||||
|
|
||||||
$sb->create_dbs($node4_dbh, ['test']);
|
$sb->create_dbs($node1_dbh, ['test']);
|
||||||
|
|
||||||
# ###########################################################################
|
sub check_rows {
|
||||||
# These are roughly the same tests as basics.t, but we also check that the
|
my (%args) = @_;
|
||||||
# other ndoes got the right data.
|
my @required_args = qw(name sql expect);
|
||||||
# ###########################################################################
|
foreach my $arg ( @required_args ) {
|
||||||
|
die "I need a $arg argument" unless $args{$arg};
|
||||||
|
}
|
||||||
|
my ($name, $sql, $expect) = @args{@required_args};
|
||||||
|
|
||||||
# Test --why-quit and --statistics output
|
$sb->wait_for_slaves;
|
||||||
$sb->load_file('node4', 't/pt-archiver/samples/tables1-4.sql');
|
|
||||||
$sb->wait_for_slaves(master => 'node4', slave => 'node5');
|
|
||||||
$output = output(sub {pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$cnf", qw(--purge --why-quit --statistics)) });
|
|
||||||
like($output, qr/Started at \d/, 'Start timestamp');
|
|
||||||
like($output, qr/Source:/, 'source');
|
|
||||||
like($output, qr/SELECT 4\nINSERT 0\nDELETE 4\n/, 'row counts');
|
|
||||||
like($output, qr/Exiting because there are no more rows/, 'Exit reason');
|
|
||||||
|
|
||||||
$sql = "SELECT * FROM test.table_1";
|
my $rows = $node1_dbh->selectall_arrayref($sql);
|
||||||
$sb->wait_for_slaves(master => 'node4', slave => 'node5');
|
|
||||||
my ($m, $n);
|
|
||||||
is_deeply(
|
|
||||||
$m = $node4_dbh->selectall_arrayref($sql),
|
|
||||||
$n = $node5_dbh->selectall_arrayref($sql),
|
|
||||||
"Node4 & Node5 remain the same after --purge"
|
|
||||||
);
|
|
||||||
|
|
||||||
# Test --no-delete.
|
|
||||||
$sb->load_file('node4', 't/pt-archiver/samples/tables1-4.sql');
|
|
||||||
output(sub {pt_archiver::main(@args, qw(--no-delete --purge --source), "D=test,t=table_1,F=$cnf") });
|
|
||||||
$sb->wait_for_slaves(master => 'node4', slave => 'node5');
|
|
||||||
is_deeply(
|
|
||||||
$node4_dbh->selectall_arrayref($sql),
|
|
||||||
$node5_dbh->selectall_arrayref($sql),
|
|
||||||
"Node4 & Node5 remain the same after --dest"
|
|
||||||
);
|
|
||||||
|
|
||||||
# --dest
|
|
||||||
$sb->load_file('node4', 't/pt-archiver/samples/tables1-4.sql');
|
|
||||||
output(sub {pt_archiver::main(@args, qw(--statistics --source), "D=test,t=table_1,F=$cnf", qw(--dest t=table_2)) });
|
|
||||||
$sb->wait_for_slaves(master => 'node4', slave => 'node5');
|
|
||||||
$sql = "SELECT * FROM test.table_1, test.table_2";
|
|
||||||
is_deeply(
|
|
||||||
$node4_dbh->selectall_arrayref($sql),
|
|
||||||
$node5_dbh->selectall_arrayref($sql),
|
|
||||||
"Node4 & Node5 remain the same after --dest"
|
|
||||||
);
|
|
||||||
|
|
||||||
# #############################################################################
|
|
||||||
# Bug 903387: pt-archiver doesn't honor b=1 flag to create SQL_LOG_BIN statement
|
|
||||||
# #############################################################################
|
|
||||||
SKIP: {
|
|
||||||
$sb->load_file('node4', "t/pt-archiver/samples/bulk_regular_insert.sql");
|
|
||||||
$sb->wait_for_slaves(master => 'node4', slave => 'node5');
|
|
||||||
|
|
||||||
my $original_rows = $node5_dbh->selectall_arrayref("SELECT * FROM bri.t ORDER BY id");
|
|
||||||
my $original_no_id = $node5_dbh->selectall_arrayref("SELECT c,t FROM bri.t ORDER BY id");
|
|
||||||
is_deeply(
|
|
||||||
$original_no_id,
|
|
||||||
[
|
|
||||||
['aa', '11:11:11'],
|
|
||||||
['bb', '11:11:12'],
|
|
||||||
['cc', '11:11:13'],
|
|
||||||
['dd', '11:11:14'],
|
|
||||||
['ee', '11:11:15'],
|
|
||||||
['ff', '11:11:16'],
|
|
||||||
['gg', '11:11:17'],
|
|
||||||
['hh', '11:11:18'],
|
|
||||||
['ii', '11:11:19'],
|
|
||||||
['jj', '11:11:10'],
|
|
||||||
],
|
|
||||||
"Bug 903387: node5 has rows"
|
|
||||||
);
|
|
||||||
|
|
||||||
$output = output(
|
|
||||||
sub { pt_archiver::main(
|
|
||||||
'--source', "D=bri,L=1,t=t,F=$cnf,b=1",
|
|
||||||
'--dest', "D=bri,t=t_arch",
|
|
||||||
qw(--where 1=1 --replace --commit-each --bulk-insert --bulk-delete),
|
|
||||||
qw(--limit 10)) },
|
|
||||||
);
|
|
||||||
|
|
||||||
$sb->wait_for_slaves(master => 'node4', slave => 'node5');
|
|
||||||
|
|
||||||
my $rows = $node4_dbh->selectall_arrayref("SELECT c,t FROM bri.t ORDER BY id");
|
|
||||||
is_deeply(
|
is_deeply(
|
||||||
$rows,
|
$rows,
|
||||||
[
|
$expect,
|
||||||
['jj', '11:11:10'],
|
"$name on node1"
|
||||||
],
|
|
||||||
"Bug 903387: rows deleted on node4"
|
|
||||||
) or diag(Dumper($rows));
|
) or diag(Dumper($rows));
|
||||||
|
|
||||||
$rows = $node5_dbh->selectall_arrayref("SELECT * FROM bri.t ORDER BY id");
|
$rows = $node2_dbh->selectall_arrayref($sql);
|
||||||
is_deeply(
|
is_deeply(
|
||||||
$rows,
|
$rows,
|
||||||
$original_rows,
|
$expect,
|
||||||
"Bug 903387: node5 still has rows"
|
"$name on node2"
|
||||||
) or diag(Dumper($rows));
|
) or diag(Dumper($rows));
|
||||||
|
|
||||||
$sql = "SELECT * FROM bri.t_arch ORDER BY id";
|
$rows = $node3_dbh->selectall_arrayref($sql);
|
||||||
is_deeply(
|
is_deeply(
|
||||||
$node5_dbh->selectall_arrayref($sql),
|
$rows,
|
||||||
$node4_dbh->selectall_arrayref($sql),
|
$expect,
|
||||||
"Bug 903387: node5 has t_arch"
|
"$name on node3"
|
||||||
);
|
) or diag(Dumper($rows));
|
||||||
|
|
||||||
$sb->load_file('node4', "t/pt-archiver/samples/bulk_regular_insert.sql");
|
|
||||||
$sb->wait_for_slaves(master => 'node4', slave => 'node5');
|
|
||||||
output(
|
|
||||||
sub { pt_archiver::main(
|
|
||||||
'--source', "D=bri,L=1,t=t,F=$cnf,b=1",
|
|
||||||
'--dest', "D=bri,t=t_arch,b=1",
|
|
||||||
qw(--where 1=1 --replace --commit-each --bulk-insert --bulk-delete),
|
|
||||||
qw(--limit 10)) },
|
|
||||||
);
|
|
||||||
|
|
||||||
is_deeply(
|
|
||||||
$node5_dbh->selectall_arrayref("SELECT * FROM bri.t_arch ORDER BY id"),
|
|
||||||
[],
|
|
||||||
"Bug 903387: ...unless b=1 was also specified for --dest"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# ###########################################################################
|
||||||
|
# Purge rows.
|
||||||
|
# ###########################################################################
|
||||||
|
|
||||||
|
$sb->load_file('node1', 't/pt-archiver/samples/tables1-4.sql');
|
||||||
|
$node1_dbh->do("INSERT INTO test.table_2 SELECT * FROM test.table_1");
|
||||||
|
|
||||||
|
# Since there's no auto-inc column, all rows should be purged on all nodes.
|
||||||
|
$output = output(
|
||||||
|
sub {
|
||||||
|
pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf",
|
||||||
|
qw(--purge))
|
||||||
|
},
|
||||||
|
stderr => 1,
|
||||||
|
);
|
||||||
|
|
||||||
|
check_rows(
|
||||||
|
name => "Purged all rows",
|
||||||
|
sql => "SELECT * FROM test.table_1 ORDER BY a",
|
||||||
|
expect => [],
|
||||||
|
);
|
||||||
|
|
||||||
|
# table_2 has an auto-inc, so all rows less the max auto-inc row
|
||||||
|
# should be purged on all nodes. This is due to --[no]safe-auto-increment.
|
||||||
|
$output = output(
|
||||||
|
sub {
|
||||||
|
pt_archiver::main(@args, '--source', "D=test,t=table_2,F=$node1_cnf",
|
||||||
|
qw(--purge))
|
||||||
|
},
|
||||||
|
stderr => 1,
|
||||||
|
);
|
||||||
|
|
||||||
|
check_rows(
|
||||||
|
name => "Purged rows less max auto-inc",
|
||||||
|
sql => "SELECT * FROM test.table_2 ORDER BY a",
|
||||||
|
expect => [[qw(4 2 3), "\n"]],
|
||||||
|
);
|
||||||
|
|
||||||
|
# ###########################################################################
|
||||||
|
# Do not purge rows.
|
||||||
|
# ###########################################################################
|
||||||
|
|
||||||
|
$sb->load_file('node1', 't/pt-archiver/samples/tables1-4.sql');
|
||||||
|
my $expected_rows = $node1_dbh->selectall_arrayref(
|
||||||
|
"SELECT * FROM test.table_1 ORDER BY a");
|
||||||
|
|
||||||
|
$output = output(
|
||||||
|
sub {
|
||||||
|
pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf",
|
||||||
|
qw(--no-delete --purge))
|
||||||
|
},
|
||||||
|
stderr => 1,
|
||||||
|
);
|
||||||
|
|
||||||
|
check_rows(
|
||||||
|
name => "--no-delete left all rows",
|
||||||
|
sql => "SELECT * FROM test.table_1 ORDER BY a",
|
||||||
|
expect => $expected_rows,
|
||||||
|
);
|
||||||
|
|
||||||
# #############################################################################
|
# #############################################################################
|
||||||
# Test --bulk-insert
|
# Archive rows to another table
|
||||||
# #############################################################################
|
# #############################################################################
|
||||||
|
|
||||||
$sb->load_file('node4', "t/pt-archiver/samples/bulk_regular_insert.sql");
|
# Presume the previous test ^ left tables1-4.sql loaded and that $expect_rows
|
||||||
|
# is still the real, expected rows.
|
||||||
|
|
||||||
|
# Same node
|
||||||
|
|
||||||
|
$output = output(
|
||||||
|
sub {
|
||||||
|
pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf",
|
||||||
|
qw(--dest t=table_2))
|
||||||
|
},
|
||||||
|
stderr => 1,
|
||||||
|
);
|
||||||
|
|
||||||
|
check_rows(
|
||||||
|
name => "Rows purged from table_1 (same node)",
|
||||||
|
sql => "SELECT * FROM test.table_1 ORDER BY a",
|
||||||
|
expect => [],
|
||||||
|
);
|
||||||
|
|
||||||
|
check_rows(
|
||||||
|
name => "Rows archived to table_2 (same node)",
|
||||||
|
sql => "SELECT * FROM test.table_2 ORDER BY a",
|
||||||
|
expect => $expected_rows,
|
||||||
|
);
|
||||||
|
|
||||||
|
# To another node
|
||||||
|
|
||||||
|
$sb->load_file('node1', 't/pt-archiver/samples/tables1-4.sql');
|
||||||
|
$expected_rows = $node1_dbh->selectall_arrayref(
|
||||||
|
"SELECT * FROM test.table_1 ORDER BY a");
|
||||||
|
|
||||||
|
$output = output(
|
||||||
|
sub {
|
||||||
|
pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf",
|
||||||
|
'--dest', "F=$node2_cnf,D=test,t=table_2")
|
||||||
|
},
|
||||||
|
stderr => 1,
|
||||||
|
);
|
||||||
|
|
||||||
|
check_rows(
|
||||||
|
name => "Rows purged from table_1 (cross-node)",
|
||||||
|
sql => "SELECT * FROM test.table_1 ORDER BY a",
|
||||||
|
expect => [],
|
||||||
|
);
|
||||||
|
|
||||||
|
check_rows(
|
||||||
|
name => "Rows archived to table_2 (cross-node)",
|
||||||
|
sql => "SELECT * FROM test.table_2 ORDER BY a",
|
||||||
|
expect => $expected_rows,
|
||||||
|
);
|
||||||
|
|
||||||
|
# #############################################################################
|
||||||
|
# --bulk-insert
|
||||||
|
# #############################################################################
|
||||||
|
|
||||||
|
# Same node
|
||||||
|
|
||||||
|
$sb->load_file('node1', "t/pt-archiver/samples/bulk_regular_insert.sql");
|
||||||
|
$expected_rows = $node1_dbh->selectall_arrayref(
|
||||||
|
"SELECT * FROM bri.t ORDER BY id");
|
||||||
|
# The max auto-inc col won't be archived, so:
|
||||||
|
my $max_auto_inc_row = pop @$expected_rows;
|
||||||
|
|
||||||
output(
|
output(
|
||||||
sub { pt_archiver::main("--source", "F=$cnf,D=bri,t=t,L=1", qw(--dest t=t_arch --where 1=1 --bulk-insert --limit 3)) },
|
sub {
|
||||||
);
|
pt_archiver::main(@args, '--source', "F=$node1_cnf,D=bri,t=t,L=1",
|
||||||
$sb->wait_for_slaves(master => 'node4', slave => 'node5');
|
qw(--dest t=t_arch --bulk-insert --limit 3))
|
||||||
|
},
|
||||||
$sql = 'select * from bri.t order by id';
|
stderr => 1,
|
||||||
is_deeply(
|
|
||||||
$node5_dbh->selectall_arrayref($sql),
|
|
||||||
$node4_dbh->selectall_arrayref($sql),
|
|
||||||
"--bulk-insert works as expected on the source table"
|
|
||||||
);
|
);
|
||||||
|
|
||||||
$sql = 'select * from bri.t_arch order by id';
|
check_rows(
|
||||||
is_deeply(
|
name => "--bulk-insert source table (same node)",
|
||||||
$node5_dbh->selectall_arrayref($sql),
|
sql => "select * from bri.t order by id",
|
||||||
$node4_dbh->selectall_arrayref($sql),
|
expect => [ $max_auto_inc_row ],
|
||||||
"...and on the dest table"
|
|
||||||
);
|
);
|
||||||
|
|
||||||
# #############################################################################
|
check_rows(
|
||||||
# Test --bulk-delete
|
name => "--bulk-insert dest table (same node)",
|
||||||
# #############################################################################
|
sql => "select * from bri.t_arch order by id",
|
||||||
|
expect => $expected_rows,
|
||||||
$sb->load_file('node4', 't/pt-archiver/samples/table5.sql');
|
|
||||||
$output = output(
|
|
||||||
sub { pt_archiver::main(qw(--no-ascend --limit 50 --bulk-delete --where 1=1), "--source", "D=test,t=table_5,F=$cnf", qw(--statistics --dest t=table_5_dest)) },
|
|
||||||
);
|
|
||||||
$sb->wait_for_slaves(master => 'node4', slave => 'node5');
|
|
||||||
|
|
||||||
$sql = 'select * from test.table_5';
|
|
||||||
is_deeply(
|
|
||||||
$node5_dbh->selectall_arrayref($sql),
|
|
||||||
$node4_dbh->selectall_arrayref($sql),
|
|
||||||
"--bulk-delete works as expected on the source table"
|
|
||||||
);
|
);
|
||||||
|
|
||||||
$sql = 'select * from test.table_5_dest';
|
# To another node
|
||||||
is_deeply(
|
|
||||||
$node5_dbh->selectall_arrayref($sql),
|
|
||||||
$node4_dbh->selectall_arrayref($sql),
|
|
||||||
"...and on the dest table"
|
|
||||||
);
|
|
||||||
|
|
||||||
# Same as above, but with a twist: --dest points to the second node. We should
|
$sb->load_file('node1', "t/pt-archiver/samples/bulk_regular_insert.sql");
|
||||||
# get the archieved rows in the first node as well
|
|
||||||
|
|
||||||
my $node5_dsn = $sb->dsn_for('node5');
|
|
||||||
my $node5_cnf = $sb->config_file_for('node5');
|
|
||||||
|
|
||||||
$sb->load_file('node4', 't/pt-archiver/samples/table5.sql');
|
|
||||||
$sb->wait_for_slaves(master => 'node4', slave => 'node5');
|
|
||||||
$output = output(
|
|
||||||
sub { pt_archiver::main(qw(--no-ascend --limit 50 --bulk-delete --where 1=1),
|
|
||||||
"--source", "D=test,t=table_5,F=$cnf", qw(--statistics),
|
|
||||||
"--dest", "$node5_dsn,D=test,t=table_5_dest,F=$node5_cnf") },
|
|
||||||
);
|
|
||||||
# Wait for the --dest table to replicate back
|
|
||||||
$sb->wait_for_slaves(master => 'node5', slave => 'node4');
|
|
||||||
|
|
||||||
$sql = 'select * from test.table_5_dest';
|
|
||||||
is_deeply(
|
|
||||||
$node5_dbh->selectall_arrayref($sql),
|
|
||||||
$node4_dbh->selectall_arrayref($sql),
|
|
||||||
"--bulk-delete with --dest on the second node, archive ends up in node1 as well"
|
|
||||||
);
|
|
||||||
|
|
||||||
$sb->load_file('node4', "t/pt-archiver/samples/bulk_regular_insert.sql");
|
|
||||||
$sb->wait_for_slaves(master => 'node4', slave => 'node5');
|
|
||||||
output(
|
output(
|
||||||
sub { pt_archiver::main("--source", "F=$cnf,D=bri,t=t,L=1",
|
sub {
|
||||||
"--dest", "$node5_dsn,D=bri,t=t_arch,F=$node5_cnf",
|
pt_archiver::main(@args, '--source', "F=$node1_cnf,D=bri,t=t,L=1",
|
||||||
qw(--where 1=1 --bulk-insert --limit 3)) },
|
'--dest', "F=$node2_cnf,t=t_arch", qw(--bulk-insert --limit 3))
|
||||||
|
},
|
||||||
|
stderr => 1,
|
||||||
);
|
);
|
||||||
$sb->wait_for_slaves(master => 'node5', slave => 'node4');
|
|
||||||
|
|
||||||
$sql = 'select * from bri.t_arch';
|
check_rows(
|
||||||
is_deeply(
|
name => "--bulk-insert source table (cross-node)",
|
||||||
$node5_dbh->selectall_arrayref($sql),
|
sql => "select * from bri.t order by id",
|
||||||
$node4_dbh->selectall_arrayref($sql),
|
expect => [ $max_auto_inc_row ],
|
||||||
"--bulk-insert with --dest on the second node, archive ends up in node1 as well"
|
);
|
||||||
|
|
||||||
|
check_rows(
|
||||||
|
name => "--bulk-insert dest table (cross-node)",
|
||||||
|
sql => "select * from bri.t_arch order by id",
|
||||||
|
expect => $expected_rows,
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
# #############################################################################
|
||||||
|
# --bulk-delete
|
||||||
|
# #############################################################################
|
||||||
|
|
||||||
|
# Same node
|
||||||
|
|
||||||
|
$sb->load_file('node2', 't/pt-archiver/samples/table5.sql');
|
||||||
|
$expected_rows = $node1_dbh->selectall_arrayref(
|
||||||
|
"SELECT * FROM test.table_5 ORDER BY a,b,c,d");
|
||||||
|
|
||||||
|
$output = output(
|
||||||
|
sub {
|
||||||
|
pt_archiver::main(@args, '--source', "D=test,t=table_5,F=$node1_cnf",
|
||||||
|
qw(--no-ascend --limit 50 --bulk-delete),
|
||||||
|
qw(--statistics --dest t=table_5_dest))
|
||||||
|
},
|
||||||
|
stderr => 1,
|
||||||
|
);
|
||||||
|
|
||||||
|
check_rows(
|
||||||
|
name => "--bulk-delete source table (same node)",
|
||||||
|
sql => "select * from test.table_5",
|
||||||
|
expect => [],
|
||||||
|
);
|
||||||
|
|
||||||
|
check_rows(
|
||||||
|
name => "--bulk-delete dest table (same node)",
|
||||||
|
sql => "select * from test.table_5_dest order by a,b,c,d",
|
||||||
|
expect => $expected_rows,
|
||||||
|
);
|
||||||
|
|
||||||
|
# To another node
|
||||||
|
|
||||||
|
$sb->load_file('node2', 't/pt-archiver/samples/table5.sql');
|
||||||
|
|
||||||
|
$output = output(
|
||||||
|
sub {
|
||||||
|
pt_archiver::main(@args, '--source', "D=test,t=table_5,F=$node1_cnf",
|
||||||
|
qw(--no-ascend --limit 50 --bulk-delete),
|
||||||
|
qw(--statistics), '--dest', "F=$node2_cnf,t=table_5_dest")
|
||||||
|
},
|
||||||
|
stderr => 1,
|
||||||
|
);
|
||||||
|
|
||||||
|
check_rows(
|
||||||
|
name => "--bulk-delete source table (cross-node)",
|
||||||
|
sql => "select * from test.table_5",
|
||||||
|
expect => [],
|
||||||
|
);
|
||||||
|
|
||||||
|
check_rows(
|
||||||
|
name => "--bulk-delete dest table (cross-node)",
|
||||||
|
sql => "select * from test.table_5_dest order by a,b,c,d",
|
||||||
|
expect => $expected_rows,
|
||||||
);
|
);
|
||||||
|
|
||||||
# #############################################################################
|
# #############################################################################
|
||||||
# Done.
|
# Done.
|
||||||
# #############################################################################
|
# #############################################################################
|
||||||
$sb->stop_sandbox(qw(node4 node5));
|
$sb->wipe_clean($node1_dbh);
|
||||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||||
|
|
||||||
done_testing;
|
done_testing;
|
||||||
|
@@ -11,6 +11,6 @@ create table a (
|
|||||||
c char(16)
|
c char(16)
|
||||||
) charset=utf8;
|
) charset=utf8;
|
||||||
insert into t values
|
insert into t values
|
||||||
(null, "が"),
|
(1, "が"),
|
||||||
(null, "が"),
|
(2, "が"),
|
||||||
(null, "が");
|
(3, "が");
|
||||||
|
@@ -4,7 +4,7 @@ CREATE TABLE issue_131_src (
|
|||||||
id INT AUTO_INCREMENT PRIMARY KEY,
|
id INT AUTO_INCREMENT PRIMARY KEY,
|
||||||
name varchar(8)
|
name varchar(8)
|
||||||
);
|
);
|
||||||
INSERT INTO issue_131_src VALUES (null,'aaa'),(null,'bbb'),(null,'zzz');
|
INSERT INTO issue_131_src VALUES (1,'aaa'),(2,'bbb'),(3,'zzz');
|
||||||
|
|
||||||
DROP TABLE IF EXISTS issue_131_dst;
|
DROP TABLE IF EXISTS issue_131_dst;
|
||||||
CREATE TABLE issue_131_dst (
|
CREATE TABLE issue_131_dst (
|
||||||
|
@@ -2,6 +2,6 @@ use test;
|
|||||||
|
|
||||||
-- This test uses an auto_increment colum to test --safeautoinc.
|
-- This test uses an auto_increment colum to test --safeautoinc.
|
||||||
drop table if exists table_12;
|
drop table if exists table_12;
|
||||||
create table table_12( a int not null auto_increment primary key, b int);
|
create table table_12( a int not null auto_increment primary key, b int) engine=innodb;
|
||||||
insert into table_12(b) values(1),(1),(1);
|
insert into table_12(a,b) values(1,1),(2,1),(3,1);
|
||||||
|
|
||||||
|
@@ -20,10 +20,7 @@ $ENV{PERCONA_TOOLKIT_TEST_USE_DSN_NAMES} = 1;
|
|||||||
|
|
||||||
use PerconaTest;
|
use PerconaTest;
|
||||||
use Sandbox;
|
use Sandbox;
|
||||||
|
|
||||||
require "$trunk/bin/pt-table-checksum";
|
require "$trunk/bin/pt-table-checksum";
|
||||||
# Do this after requiring ptc, since it uses Mo
|
|
||||||
require VersionParser;
|
|
||||||
|
|
||||||
my $dp = new DSNParser(opts=>$dsn_opts);
|
my $dp = new DSNParser(opts=>$dsn_opts);
|
||||||
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
||||||
@@ -31,12 +28,7 @@ my $node1 = $sb->get_dbh_for('node1');
|
|||||||
my $node2 = $sb->get_dbh_for('node2');
|
my $node2 = $sb->get_dbh_for('node2');
|
||||||
my $node3 = $sb->get_dbh_for('node3');
|
my $node3 = $sb->get_dbh_for('node3');
|
||||||
|
|
||||||
my $db_flavor = VersionParser->new($node1)->flavor();
|
if ( !$node1 ) {
|
||||||
|
|
||||||
if ( $db_flavor !~ /XtraDB Cluster/ ) {
|
|
||||||
plan skip_all => "PXC tests";
|
|
||||||
}
|
|
||||||
elsif ( !$node1 ) {
|
|
||||||
plan skip_all => 'Cannot connect to cluster node1';
|
plan skip_all => 'Cannot connect to cluster node1';
|
||||||
}
|
}
|
||||||
elsif ( !$node2 ) {
|
elsif ( !$node2 ) {
|
||||||
@@ -45,6 +37,9 @@ elsif ( !$node2 ) {
|
|||||||
elsif ( !$node3 ) {
|
elsif ( !$node3 ) {
|
||||||
plan skip_all => 'Cannot connect to cluster node3';
|
plan skip_all => 'Cannot connect to cluster node3';
|
||||||
}
|
}
|
||||||
|
elsif ( !$sb->is_cluster_mode ) {
|
||||||
|
plan skip_all => "PXC tests";
|
||||||
|
}
|
||||||
|
|
||||||
# The sandbox servers run with lock_wait_timeout=3 and it's not dynamic
|
# The sandbox servers run with lock_wait_timeout=3 and it's not dynamic
|
||||||
# so we need to specify --lock-wait-timeout=3 else the tool will die.
|
# so we need to specify --lock-wait-timeout=3 else the tool will die.
|
||||||
|
Reference in New Issue
Block a user