From 72a129bce125e8553dbb20a64d599fd0eec847a9 Mon Sep 17 00:00:00 2001 From: Daniel Nichter Date: Fri, 30 Nov 2012 18:13:04 +0000 Subject: [PATCH 1/3] Add is_cluster_mode() to lib/Sandbox.pm. --- lib/Sandbox.pm | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/lib/Sandbox.pm b/lib/Sandbox.pm index a7e5a733..263be500 100644 --- a/lib/Sandbox.pm +++ b/lib/Sandbox.pm @@ -409,19 +409,24 @@ sub clear_genlogs { return; } +sub is_cluster_mode { + my ($self) = @_; + return 0 unless $self->is_cluster_node('node1'); + return 0 unless $self->is_cluster_node('node2'); + return 0 unless $self->is_cluster_node('node3'); + return 1; +} sub is_cluster_node { my ($self, $server) = @_; - + my $sql = "SHOW VARIABLES LIKE 'wsrep_on'"; PTDEBUG && _d($sql); my $row = $self->use($server, qq{-ss -e "$sql"}); PTDEBUG && _d($row); $row = [split " ", $row]; - - return $row && $row->[1] - ? ($row->[1] eq 'ON' || $row->[1] eq '1') - : 0; + + return $row && $row->[1] && ($row->[1] eq 'ON' || $row->[1] eq '1'); } sub can_load_data { From a9ccca199d785b6e7ef113181f9d8ae083ee9f42 Mon Sep 17 00:00:00 2001 From: Daniel Nichter Date: Fri, 30 Nov 2012 18:13:25 +0000 Subject: [PATCH 2/3] Rewrite t/pt-archiver/pxc.t. --- t/pt-archiver/pxc.t | 457 ++++++++++++++++++++++++-------------------- 1 file changed, 255 insertions(+), 202 deletions(-) diff --git a/t/pt-archiver/pxc.t b/t/pt-archiver/pxc.t index 7d8f71c1..15c45921 100644 --- a/t/pt-archiver/pxc.t +++ b/t/pt-archiver/pxc.t @@ -12,256 +12,309 @@ use English qw(-no_match_vars); use Test::More; use Time::HiRes qw(time); +# Hostnames make testing less accurate. Tests need to see +# that such-and-such happened on specific slave hosts, but +# the sandbox servers are all on one host so all slaves have +# the same hostname. +$ENV{PERCONA_TOOLKIT_TEST_USE_DSN_NAMES} = 1; + use PerconaTest; use Sandbox; use Data::Dumper; require "$trunk/bin/pt-archiver"; -my $dp = new DSNParser(opts=>$dsn_opts); -my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); +my $dp = new DSNParser(opts=>$dsn_opts); +my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); +my $node1_dbh = $sb->get_dbh_for('node1'); +my $node2_dbh = $sb->get_dbh_for('node2'); +my $node3_dbh = $sb->get_dbh_for('node3'); -my $node1 = $sb->get_dbh_for('node1'); -my $db_flavor = VersionParser->new($node1)->flavor(); - -if ( $db_flavor !~ /XtraDB Cluster/ ) { +if ( !$node1_dbh ) { + plan skip_all => 'Cannot connect to cluster node1'; +} +elsif ( !$node2_dbh ) { + plan skip_all => 'Cannot connect to cluster node2'; +} +elsif ( !$node3_dbh ) { + plan skip_all => 'Cannot connect to cluster node3'; +} +elsif ( !$sb->is_cluster_mode ) { plan skip_all => "PXC tests"; } -my $c = $sb->start_cluster( - nodes => [qw(node4 node5)], - env => q/CLUSTER_NAME="pt_archiver_cluster"/, -); - -my $node4_dbh = $c->{node4}->{dbh}; -my $node5_dbh = $c->{node5}->{dbh}; - -# Set this up so ->wait_for_slaves works -$node4_dbh->do("CREATE DATABASE IF NOT EXISTS percona_test"); -$node4_dbh->do("CREATE TABLE IF NOT EXISTS percona_test.sentinel(id int primary key, ping varchar(64) not null default '')"); -my ($ping) = $node4_dbh->selectrow_array("SELECT MD5(RAND())"); -$node4_dbh->do("INSERT INTO percona_test.sentinel(id, ping) values(1, '$ping') ON DUPLICATE KEY UPDATE ping='$ping'"); -sleep 1 until eval { $node5_dbh->selectrow_array("SELECT * FROM percona_test.sentinel") }; - my $output; my $count; my $sql; -my $cnf = $sb->config_file_for("node4"); +my $rows; +my $node1_cnf = $sb->config_file_for("node1"); +my $node2_cnf = $sb->config_file_for("node2"); my @args = qw(--where 1=1); -$sb->create_dbs($node4_dbh, ['test']); +$sb->create_dbs($node1_dbh, ['test']); -# ########################################################################### -# These are roughly the same tests as basics.t, but we also check that the -# other ndoes got the right data. -# ########################################################################### +sub check_rows { + my (%args) = @_; + my @required_args = qw(name sql expect); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($name, $sql, $expect) = @args{@required_args}; -# Test --why-quit and --statistics output -$sb->load_file('node4', 't/pt-archiver/samples/tables1-4.sql'); -$sb->wait_for_slaves(master => 'node4', slave => 'node5'); -$output = output(sub {pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$cnf", qw(--purge --why-quit --statistics)) }); -like($output, qr/Started at \d/, 'Start timestamp'); -like($output, qr/Source:/, 'source'); -like($output, qr/SELECT 4\nINSERT 0\nDELETE 4\n/, 'row counts'); -like($output, qr/Exiting because there are no more rows/, 'Exit reason'); + $sb->wait_for_slaves; -$sql = "SELECT * FROM test.table_1"; -$sb->wait_for_slaves(master => 'node4', slave => 'node5'); -my ($m, $n); -is_deeply( - $m = $node4_dbh->selectall_arrayref($sql), - $n = $node5_dbh->selectall_arrayref($sql), - "Node4 & Node5 remain the same after --purge" -); - -# Test --no-delete. -$sb->load_file('node4', 't/pt-archiver/samples/tables1-4.sql'); -output(sub {pt_archiver::main(@args, qw(--no-delete --purge --source), "D=test,t=table_1,F=$cnf") }); -$sb->wait_for_slaves(master => 'node4', slave => 'node5'); -is_deeply( - $node4_dbh->selectall_arrayref($sql), - $node5_dbh->selectall_arrayref($sql), - "Node4 & Node5 remain the same after --dest" -); - -# --dest -$sb->load_file('node4', 't/pt-archiver/samples/tables1-4.sql'); -output(sub {pt_archiver::main(@args, qw(--statistics --source), "D=test,t=table_1,F=$cnf", qw(--dest t=table_2)) }); -$sb->wait_for_slaves(master => 'node4', slave => 'node5'); -$sql = "SELECT * FROM test.table_1, test.table_2"; -is_deeply( - $node4_dbh->selectall_arrayref($sql), - $node5_dbh->selectall_arrayref($sql), - "Node4 & Node5 remain the same after --dest" -); - -# ############################################################################# -# Bug 903387: pt-archiver doesn't honor b=1 flag to create SQL_LOG_BIN statement -# ############################################################################# -SKIP: { - $sb->load_file('node4', "t/pt-archiver/samples/bulk_regular_insert.sql"); - $sb->wait_for_slaves(master => 'node4', slave => 'node5'); - - my $original_rows = $node5_dbh->selectall_arrayref("SELECT * FROM bri.t ORDER BY id"); - my $original_no_id = $node5_dbh->selectall_arrayref("SELECT c,t FROM bri.t ORDER BY id"); - is_deeply( - $original_no_id, - [ - ['aa', '11:11:11'], - ['bb', '11:11:12'], - ['cc', '11:11:13'], - ['dd', '11:11:14'], - ['ee', '11:11:15'], - ['ff', '11:11:16'], - ['gg', '11:11:17'], - ['hh', '11:11:18'], - ['ii', '11:11:19'], - ['jj', '11:11:10'], - ], - "Bug 903387: node5 has rows" - ); - - $output = output( - sub { pt_archiver::main( - '--source', "D=bri,L=1,t=t,F=$cnf,b=1", - '--dest', "D=bri,t=t_arch", - qw(--where 1=1 --replace --commit-each --bulk-insert --bulk-delete), - qw(--limit 10)) }, - ); - - $sb->wait_for_slaves(master => 'node4', slave => 'node5'); - - my $rows = $node4_dbh->selectall_arrayref("SELECT c,t FROM bri.t ORDER BY id"); + my $rows = $node1_dbh->selectall_arrayref($sql); is_deeply( $rows, - [ - ['jj', '11:11:10'], - ], - "Bug 903387: rows deleted on node4" + $expect, + "$name on node1" ) or diag(Dumper($rows)); - $rows = $node5_dbh->selectall_arrayref("SELECT * FROM bri.t ORDER BY id"); + $rows = $node2_dbh->selectall_arrayref($sql); is_deeply( $rows, - $original_rows, - "Bug 903387: node5 still has rows" + $expect, + "$name on node2" ) or diag(Dumper($rows)); - $sql = "SELECT * FROM bri.t_arch ORDER BY id"; + $rows = $node3_dbh->selectall_arrayref($sql); is_deeply( - $node5_dbh->selectall_arrayref($sql), - $node4_dbh->selectall_arrayref($sql), - "Bug 903387: node5 has t_arch" - ); - - $sb->load_file('node4', "t/pt-archiver/samples/bulk_regular_insert.sql"); - $sb->wait_for_slaves(master => 'node4', slave => 'node5'); - output( - sub { pt_archiver::main( - '--source', "D=bri,L=1,t=t,F=$cnf,b=1", - '--dest', "D=bri,t=t_arch,b=1", - qw(--where 1=1 --replace --commit-each --bulk-insert --bulk-delete), - qw(--limit 10)) }, - ); - - is_deeply( - $node5_dbh->selectall_arrayref("SELECT * FROM bri.t_arch ORDER BY id"), - [], - "Bug 903387: ...unless b=1 was also specified for --dest" - ); + $rows, + $expect, + "$name on node3" + ) or diag(Dumper($rows)); } +# ########################################################################### +# Purge rows. +# ########################################################################### + +$sb->load_file('node1', 't/pt-archiver/samples/tables1-4.sql'); +$node1_dbh->do("INSERT INTO test.table_2 SELECT * FROM test.table_1"); + +# Since there's no auto-inc column, all rows should be purged on all nodes. +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf", + qw(--purge)) + }, + stderr => 1, +); + +check_rows( + name => "Purged all rows", + sql => "SELECT * FROM test.table_1 ORDER BY a", + expect => [], +); + +# table_2 has an auto-inc, so all rows less the max auto-inc row +# should be purged on all nodes. This is due to --[no]safe-auto-increment. +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_2,F=$node1_cnf", + qw(--purge)) + }, + stderr => 1, +); + +check_rows( + name => "Purged rows less max auto-inc", + sql => "SELECT * FROM test.table_2 ORDER BY a", + expect => [[qw(4 2 3), "\n"]], +); + +# ########################################################################### +# Do not purge rows. +# ########################################################################### + +$sb->load_file('node1', 't/pt-archiver/samples/tables1-4.sql'); +my $expected_rows = $node1_dbh->selectall_arrayref( + "SELECT * FROM test.table_1 ORDER BY a"); + +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf", + qw(--no-delete --purge)) + }, + stderr => 1, +); + +check_rows( + name => "--no-delete left all rows", + sql => "SELECT * FROM test.table_1 ORDER BY a", + expect => $expected_rows, +); + # ############################################################################# -# Test --bulk-insert +# Archive rows to another table # ############################################################################# -$sb->load_file('node4', "t/pt-archiver/samples/bulk_regular_insert.sql"); +# Presume the previous test ^ left tables1-4.sql loaded and that $expect_rows +# is still the real, expected rows. + +# Same node + +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf", + qw(--dest t=table_2)) + }, + stderr => 1, +); + +check_rows( + name => "Rows purged from table_1 (same node)", + sql => "SELECT * FROM test.table_1 ORDER BY a", + expect => [], +); + +check_rows( + name => "Rows archived to table_2 (same node)", + sql => "SELECT * FROM test.table_2 ORDER BY a", + expect => $expected_rows, +); + +# To another node + +$sb->load_file('node1', 't/pt-archiver/samples/tables1-4.sql'); +$expected_rows = $node1_dbh->selectall_arrayref( + "SELECT * FROM test.table_1 ORDER BY a"); + +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf", + '--dest', "F=$node2_cnf,D=test,t=table_2") + }, + stderr => 1, +); + +check_rows( + name => "Rows purged from table_1 (cross-node)", + sql => "SELECT * FROM test.table_1 ORDER BY a", + expect => [], +); + +check_rows( + name => "Rows archived to table_2 (cross-node)", + sql => "SELECT * FROM test.table_2 ORDER BY a", + expect => $expected_rows, +); + +# ############################################################################# +# --bulk-insert +# ############################################################################# + +# Same node + +$sb->load_file('node1', "t/pt-archiver/samples/bulk_regular_insert.sql"); +$expected_rows = $node1_dbh->selectall_arrayref( + "SELECT * FROM bri.t ORDER BY id"); +# The max auto-inc col won't be archived, so: +my $max_auto_inc_row = pop @$expected_rows; output( - sub { pt_archiver::main("--source", "F=$cnf,D=bri,t=t,L=1", qw(--dest t=t_arch --where 1=1 --bulk-insert --limit 3)) }, -); -$sb->wait_for_slaves(master => 'node4', slave => 'node5'); - -$sql = 'select * from bri.t order by id'; -is_deeply( - $node5_dbh->selectall_arrayref($sql), - $node4_dbh->selectall_arrayref($sql), - "--bulk-insert works as expected on the source table" + sub { + pt_archiver::main(@args, '--source', "F=$node1_cnf,D=bri,t=t,L=1", + qw(--dest t=t_arch --bulk-insert --limit 3)) + }, + stderr => 1, ); -$sql = 'select * from bri.t_arch order by id'; -is_deeply( - $node5_dbh->selectall_arrayref($sql), - $node4_dbh->selectall_arrayref($sql), - "...and on the dest table" +check_rows( + name => "--bulk-insert source table (same node)", + sql => "select * from bri.t order by id", + expect => [ $max_auto_inc_row ], ); -# ############################################################################# -# Test --bulk-delete -# ############################################################################# - -$sb->load_file('node4', 't/pt-archiver/samples/table5.sql'); -$output = output( - sub { pt_archiver::main(qw(--no-ascend --limit 50 --bulk-delete --where 1=1), "--source", "D=test,t=table_5,F=$cnf", qw(--statistics --dest t=table_5_dest)) }, -); -$sb->wait_for_slaves(master => 'node4', slave => 'node5'); - -$sql = 'select * from test.table_5'; -is_deeply( - $node5_dbh->selectall_arrayref($sql), - $node4_dbh->selectall_arrayref($sql), - "--bulk-delete works as expected on the source table" +check_rows( + name => "--bulk-insert dest table (same node)", + sql => "select * from bri.t_arch order by id", + expect => $expected_rows, ); -$sql = 'select * from test.table_5_dest'; -is_deeply( - $node5_dbh->selectall_arrayref($sql), - $node4_dbh->selectall_arrayref($sql), - "...and on the dest table" -); +# To another node -# Same as above, but with a twist: --dest points to the second node. We should -# get the archieved rows in the first node as well +$sb->load_file('node1', "t/pt-archiver/samples/bulk_regular_insert.sql"); -my $node5_dsn = $sb->dsn_for('node5'); -my $node5_cnf = $sb->config_file_for('node5'); - -$sb->load_file('node4', 't/pt-archiver/samples/table5.sql'); -$sb->wait_for_slaves(master => 'node4', slave => 'node5'); -$output = output( - sub { pt_archiver::main(qw(--no-ascend --limit 50 --bulk-delete --where 1=1), - "--source", "D=test,t=table_5,F=$cnf", qw(--statistics), - "--dest", "$node5_dsn,D=test,t=table_5_dest,F=$node5_cnf") }, -); -# Wait for the --dest table to replicate back -$sb->wait_for_slaves(master => 'node5', slave => 'node4'); - -$sql = 'select * from test.table_5_dest'; -is_deeply( - $node5_dbh->selectall_arrayref($sql), - $node4_dbh->selectall_arrayref($sql), - "--bulk-delete with --dest on the second node, archive ends up in node1 as well" -); - -$sb->load_file('node4', "t/pt-archiver/samples/bulk_regular_insert.sql"); -$sb->wait_for_slaves(master => 'node4', slave => 'node5'); output( - sub { pt_archiver::main("--source", "F=$cnf,D=bri,t=t,L=1", - "--dest", "$node5_dsn,D=bri,t=t_arch,F=$node5_cnf", - qw(--where 1=1 --bulk-insert --limit 3)) }, + sub { + pt_archiver::main(@args, '--source', "F=$node1_cnf,D=bri,t=t,L=1", + '--dest', "F=$node2_cnf,t=t_arch", qw(--bulk-insert --limit 3)) + }, + stderr => 1, ); -$sb->wait_for_slaves(master => 'node5', slave => 'node4'); -$sql = 'select * from bri.t_arch'; -is_deeply( - $node5_dbh->selectall_arrayref($sql), - $node4_dbh->selectall_arrayref($sql), - "--bulk-insert with --dest on the second node, archive ends up in node1 as well" +check_rows( + name => "--bulk-insert source table (cross-node)", + sql => "select * from bri.t order by id", + expect => [ $max_auto_inc_row ], +); + +check_rows( + name => "--bulk-insert dest table (cross-node)", + sql => "select * from bri.t_arch order by id", + expect => $expected_rows, +); + + +# ############################################################################# +# --bulk-delete +# ############################################################################# + +# Same node + +$sb->load_file('node2', 't/pt-archiver/samples/table5.sql'); +$expected_rows = $node1_dbh->selectall_arrayref( + "SELECT * FROM test.table_5 ORDER BY a,b,c,d"); + +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_5,F=$node1_cnf", + qw(--no-ascend --limit 50 --bulk-delete), + qw(--statistics --dest t=table_5_dest)) + }, + stderr => 1, +); + +check_rows( + name => "--bulk-delete source table (same node)", + sql => "select * from test.table_5", + expect => [], +); + +check_rows( + name => "--bulk-delete dest table (same node)", + sql => "select * from test.table_5_dest order by a,b,c,d", + expect => $expected_rows, +); + +# To another node + +$sb->load_file('node2', 't/pt-archiver/samples/table5.sql'); + +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_5,F=$node1_cnf", + qw(--no-ascend --limit 50 --bulk-delete), + qw(--statistics), '--dest', "F=$node2_cnf,t=table_5_dest") + }, + stderr => 1, +); + +check_rows( + name => "--bulk-delete source table (cross-node)", + sql => "select * from test.table_5", + expect => [], +); + +check_rows( + name => "--bulk-delete dest table (cross-node)", + sql => "select * from test.table_5_dest order by a,b,c,d", + expect => $expected_rows, ); # ############################################################################# # Done. # ############################################################################# -$sb->stop_sandbox(qw(node4 node5)); +$sb->wipe_clean($node1_dbh); ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox"); - done_testing; From f6fd2b7470dc9875ff33ef46121d93bd254e6660 Mon Sep 17 00:00:00 2001 From: Daniel Nichter Date: Fri, 30 Nov 2012 18:14:06 +0000 Subject: [PATCH 3/3] Only run t/pt-table-checksum/pxc.t if in cluster mode. --- t/pt-table-checksum/pxc.t | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/t/pt-table-checksum/pxc.t b/t/pt-table-checksum/pxc.t index 46a2d422..e6c3407c 100644 --- a/t/pt-table-checksum/pxc.t +++ b/t/pt-table-checksum/pxc.t @@ -20,10 +20,7 @@ $ENV{PERCONA_TOOLKIT_TEST_USE_DSN_NAMES} = 1; use PerconaTest; use Sandbox; - require "$trunk/bin/pt-table-checksum"; -# Do this after requiring ptc, since it uses Mo -require VersionParser; my $dp = new DSNParser(opts=>$dsn_opts); my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); @@ -31,12 +28,7 @@ my $node1 = $sb->get_dbh_for('node1'); my $node2 = $sb->get_dbh_for('node2'); my $node3 = $sb->get_dbh_for('node3'); -my $db_flavor = VersionParser->new($node1)->flavor(); - -if ( $db_flavor !~ /XtraDB Cluster/ ) { - plan skip_all => "PXC tests"; -} -elsif ( !$node1 ) { +if ( !$node1 ) { plan skip_all => 'Cannot connect to cluster node1'; } elsif ( !$node2 ) { @@ -45,6 +37,9 @@ elsif ( !$node2 ) { elsif ( !$node3 ) { plan skip_all => 'Cannot connect to cluster node3'; } +elsif ( !$sb->is_cluster_mode ) { + plan skip_all => "PXC tests"; +} # The sandbox servers run with lock_wait_timeout=3 and it's not dynamic # so we need to specify --lock-wait-timeout=3 else the tool will die.