From 771059a35e3765ad696894851d9756ab82e25987 Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Wed, 21 Nov 2012 13:21:30 -0300 Subject: [PATCH 01/11] t/pt-archiver/basics.t: Stop checking ahrdcoded indexes, to account for PXC's use of an autoinc offset --- t/pt-archiver/basics.t | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/t/pt-archiver/basics.t b/t/pt-archiver/basics.t index 6b10304a..b0101468 100644 --- a/t/pt-archiver/basics.t +++ b/t/pt-archiver/basics.t @@ -187,20 +187,21 @@ SKIP: { $sb->load_file('master', "t/pt-archiver/samples/bulk_regular_insert.sql"); $sb->wait_for_slaves(); - my $original_rows = $slave1_dbh->selectall_arrayref("SELECT * FROM bri.t ORDER BY id"); + my $original_rows = $slave1_dbh->selectall_arrayref("SELECT * FROM bri.t ORDER BY id"); + my $original_no_id = $slave1_dbh->selectall_arrayref("SELECT c,t FROM bri.t ORDER BY id"); is_deeply( - $original_rows, + $original_no_id, [ - [1, 'aa', '11:11:11'], - [2, 'bb', '11:11:12'], - [3, 'cc', '11:11:13'], - [4, 'dd', '11:11:14'], - [5, 'ee', '11:11:15'], - [6, 'ff', '11:11:16'], - [7, 'gg', '11:11:17'], - [8, 'hh', '11:11:18'], - [9, 'ii', '11:11:19'], - [10,'jj', '11:11:10'], + ['aa', '11:11:11'], + ['bb', '11:11:12'], + ['cc', '11:11:13'], + ['dd', '11:11:14'], + ['ee', '11:11:15'], + ['ff', '11:11:16'], + ['gg', '11:11:17'], + ['hh', '11:11:18'], + ['ii', '11:11:19'], + ['jj', '11:11:10'], ], "Bug 903387: slave has rows" ); @@ -213,11 +214,11 @@ SKIP: { qw(--limit 10)) }, ); - $rows = $master_dbh->selectall_arrayref("SELECT * FROM bri.t ORDER BY id"); + $rows = $master_dbh->selectall_arrayref("SELECT c,t FROM bri.t ORDER BY id"); is_deeply( $rows, [ - [10,'jj', '11:11:10'], + ['jj', '11:11:10'], ], "Bug 903387: rows deleted on master" ) or diag(Dumper($rows)); From 7efb2dbf375bc289a651a150fbfc53a281385293 Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Wed, 21 Nov 2012 13:21:56 -0300 Subject: [PATCH 02/11] t/pt-archiver/bulk_insert.t: Stop checking hardcoded indexes --- t/pt-archiver/bulk_insert.t | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/t/pt-archiver/bulk_insert.t b/t/pt-archiver/bulk_insert.t index 00409975..e76be09d 100644 --- a/t/pt-archiver/bulk_insert.t +++ b/t/pt-archiver/bulk_insert.t @@ -59,6 +59,10 @@ like($output, qr/copy\s+$chks/, 'copy checksum'); # Issue 1260: mk-archiver --bulk-insert data loss # ############################################################################ $sb->load_file('master', 't/pt-archiver/samples/bulk_regular_insert.sql'); +my $orig_rows = $dbh->selectall_arrayref('select id from bri.t order by id'); +my $lt_8 = [ grep { $_->[0] < 8 } @$orig_rows ]; +my $ge_8 = [ grep { $_->[0] >= 8 } @$orig_rows ]; + $output = output( sub { pt_archiver::main( '--where', "id < 8", qw(--limit 100000 --txn-size 1000), @@ -69,14 +73,14 @@ $output = output( $rows = $dbh->selectall_arrayref('select id from bri.t order by id'); is_deeply( $rows, - [[8],[9],[10]], + $ge_8, "--bulk-insert left 3 rows (issue 1260)" ); $rows = $dbh->selectall_arrayref('select id from bri.t_arch order by id'); is_deeply( $rows, - [[1],[2],[3],[4],[5],[6],[7]], + $lt_8, "--bulk-insert archived 7 rows (issue 1260)" ); From ef458d6bc2929a52f0b014f480eac178f606e74f Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Wed, 21 Nov 2012 13:22:25 -0300 Subject: [PATCH 03/11] t/pt-archiver/bulk_regular_insert.t: Stop checking hardcoded indexes --- t/pt-archiver/bulk_regular_insert.t | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/t/pt-archiver/bulk_regular_insert.t b/t/pt-archiver/bulk_regular_insert.t index 6c1e6705..70bde6b0 100644 --- a/t/pt-archiver/bulk_regular_insert.t +++ b/t/pt-archiver/bulk_regular_insert.t @@ -39,27 +39,27 @@ output( sub { pt_archiver::main("--source", "F=$cnf,D=bri,t=t,L=1", qw(--dest t=t_arch --where 1=1 --bulk-insert --limit 3)) }, ); -my $t_rows = $dbh->selectall_arrayref('select * from t order by id'); -my $t_arch_rows = $dbh->selectall_arrayref('select * from t_arch order by id'); +my $t_rows = $dbh->selectall_arrayref('select c,t from t order by id'); +my $t_arch_rows = $dbh->selectall_arrayref('select c,t from t_arch order by id'); is_deeply( $t_rows, - [ ['10', 'jj', '11:11:10'] ], + [ ['jj', '11:11:10'] ], "Table after normal bulk insert" ); is_deeply( $t_arch_rows, [ - ['1','aa','11:11:11'], - ['2','bb','11:11:12'], - ['3','cc','11:11:13'], - ['4','dd','11:11:14'], - ['5','ee','11:11:15'], - ['6','ff','11:11:16'], - ['7','gg','11:11:17'], - ['8','hh','11:11:18'], - ['9','ii','11:11:19'], + ['aa','11:11:11'], + ['bb','11:11:12'], + ['cc','11:11:13'], + ['dd','11:11:14'], + ['ee','11:11:15'], + ['ff','11:11:16'], + ['gg','11:11:17'], + ['hh','11:11:18'], + ['ii','11:11:19'], ], "Archive table after normal bulk insert" ); @@ -72,8 +72,8 @@ $dbh->do('use bri'); `$cmd --source F=$cnf,D=bri,t=t,L=1 --dest t=t_arch,m=bulk_regular_insert --where "1=1" --bulk-insert --limit 3`; -my $bri_t_rows = $dbh->selectall_arrayref('select * from t order by id'); -my $bri_t_arch_rows = $dbh->selectall_arrayref('select * from t_arch order by id'); +my $bri_t_rows = $dbh->selectall_arrayref('select c,t from t order by id'); +my $bri_t_arch_rows = $dbh->selectall_arrayref('select c,t from t_arch order by id'); is_deeply( $bri_t_rows, From 98190d25e74a67a6bdd37271dfd2aa9652e7ad9f Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Wed, 21 Nov 2012 13:23:05 -0300 Subject: [PATCH 04/11] t/pt-archiver/issue_1225.t: Don't assume that the first two values in an autoinc table will have indexes 1 & 2 --- t/pt-archiver/issue_1225.t | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/t/pt-archiver/issue_1225.t b/t/pt-archiver/issue_1225.t index f99489ec..857e3b1b 100644 --- a/t/pt-archiver/issue_1225.t +++ b/t/pt-archiver/issue_1225.t @@ -34,11 +34,11 @@ my $output; $sb->load_file('master', 't/pt-archiver/samples/issue_1225.sql'); $dbh->do('set names "utf8"'); -my $original_rows = $dbh->selectall_arrayref('select * from issue_1225.t where i in (1, 2)'); +my $original_rows = $dbh->selectall_arrayref('select c from issue_1225.t limit 2'); is_deeply( $original_rows, - [ [ 1, 'が'], # Your terminal must be UTF8 to see this Japanese character. - [ 2, 'が'], + [ ['が'], # Your terminal must be UTF8 to see this Japanese character. + ['が'], ], "Inserted UTF8 data" ); @@ -61,10 +61,10 @@ $output = output( }, ); -my $archived_rows = $dbh->selectall_arrayref('select * from issue_1225.a where i in (1, 2)'); +my $archived_rows = $dbh->selectall_arrayref('select c from issue_1225.a limit 2'); ok( - $original_rows->[0]->[1] ne $archived_rows->[0]->[1], + $original_rows->[0]->[0] ne $archived_rows->[0]->[0], "UTF8 characters lost when cxn isn't also UTF8" ); @@ -78,7 +78,7 @@ $output = output( }, ); -$archived_rows = $dbh->selectall_arrayref('select * from issue_1225.a where i in (1, 2)'); +$archived_rows = $dbh->selectall_arrayref('select c from issue_1225.a limit 2'); is_deeply( $original_rows, From 514feb296c9ccf205e4f3436a5945a20801f8c72 Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Wed, 21 Nov 2012 13:23:26 -0300 Subject: [PATCH 05/11] t/pt-archiver/issue_1229.t: Stop checking hardcoded indexes --- t/pt-archiver/issue_1229.t | 9 +++++---- t/pt-archiver/samples/issue_1229_file.txt | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/t/pt-archiver/issue_1229.t b/t/pt-archiver/issue_1229.t index a292688c..598de37e 100644 --- a/t/pt-archiver/issue_1229.t +++ b/t/pt-archiver/issue_1229.t @@ -42,11 +42,11 @@ my $file = "/tmp/mk-archiver-file.txt"; $sb->load_file('master', 't/pt-archiver/samples/issue_1225.sql'); $dbh->do('set names "utf8"'); -my $original_rows = $dbh->selectall_arrayref('select * from issue_1225.t where i in (1, 2)'); +my $original_rows = $dbh->selectall_arrayref('select c from issue_1225.t where i in (1, 2)'); is_deeply( $original_rows, - [ [ 1, 'が'], # Your terminal must be UTF8 to see this Japanese character. - [ 2, 'が'], + [ [ 'が'], # Your terminal must be UTF8 to see this Japanese character. + [ 'が'], ], "Inserted UTF8 data" ); @@ -62,7 +62,8 @@ $output = output( stderr => 1, ); -my $got = `cat $file`; +my $got = slurp_file($file); +$got =~ s/^\d+//gsm; ok( no_diff( $got, diff --git a/t/pt-archiver/samples/issue_1229_file.txt b/t/pt-archiver/samples/issue_1229_file.txt index 105422c6..2dd44874 100644 --- a/t/pt-archiver/samples/issue_1229_file.txt +++ b/t/pt-archiver/samples/issue_1229_file.txt @@ -1,2 +1,2 @@ -1 が -2 が + が + が From d496fbcf11bf977394f4d04beafb225855aac27c Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Thu, 29 Nov 2012 07:29:24 -0300 Subject: [PATCH 06/11] Tests for pt-archiver + PXC --- lib/Sandbox.pm | 12 +- t/pt-archiver/pxc.t | 267 ++++++++++++++++++++++++++++++++++++++ t/pt-table-checksum/pxc.t | 4 +- 3 files changed, 278 insertions(+), 5 deletions(-) create mode 100644 t/pt-archiver/pxc.t diff --git a/lib/Sandbox.pm b/lib/Sandbox.pm index a29c9528..210bdfe4 100644 --- a/lib/Sandbox.pm +++ b/lib/Sandbox.pm @@ -329,9 +329,9 @@ sub ok { # Dings a heartbeat on the master, and waits until the slave catches up fully. sub wait_for_slaves { - my ($self, $slave) = @_; - my $master_dbh = $self->get_dbh_for('master'); - my $slave2_dbh = $self->get_dbh_for($slave || 'slave2'); + my ($self, %args) = @_; + my $master_dbh = $self->get_dbh_for($args{master} || 'master'); + my $slave2_dbh = $self->get_dbh_for($args{slave} || 'slave2'); my ($ping) = $master_dbh->selectrow_array("SELECT MD5(RAND())"); $master_dbh->do("UPDATE percona_test.sentinel SET ping='$ping' WHERE id=1"); PerconaTest::wait_until( @@ -529,6 +529,12 @@ sub port_for { return $port_for{$server}; } +sub config_file_for { + my ($self, $server) = @_; + my $port = $self->port_for($server); + return "/tmp/$port/my.sandbox.cnf" +} + sub _d { my ($package, undef, $line) = caller 0; @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } diff --git a/t/pt-archiver/pxc.t b/t/pt-archiver/pxc.t new file mode 100644 index 00000000..7d8f71c1 --- /dev/null +++ b/t/pt-archiver/pxc.t @@ -0,0 +1,267 @@ +#!/usr/bin/env perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; +use Time::HiRes qw(time); + +use PerconaTest; +use Sandbox; +use Data::Dumper; +require "$trunk/bin/pt-archiver"; + +my $dp = new DSNParser(opts=>$dsn_opts); +my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); + +my $node1 = $sb->get_dbh_for('node1'); +my $db_flavor = VersionParser->new($node1)->flavor(); + +if ( $db_flavor !~ /XtraDB Cluster/ ) { + plan skip_all => "PXC tests"; +} + +my $c = $sb->start_cluster( + nodes => [qw(node4 node5)], + env => q/CLUSTER_NAME="pt_archiver_cluster"/, +); + +my $node4_dbh = $c->{node4}->{dbh}; +my $node5_dbh = $c->{node5}->{dbh}; + +# Set this up so ->wait_for_slaves works +$node4_dbh->do("CREATE DATABASE IF NOT EXISTS percona_test"); +$node4_dbh->do("CREATE TABLE IF NOT EXISTS percona_test.sentinel(id int primary key, ping varchar(64) not null default '')"); +my ($ping) = $node4_dbh->selectrow_array("SELECT MD5(RAND())"); +$node4_dbh->do("INSERT INTO percona_test.sentinel(id, ping) values(1, '$ping') ON DUPLICATE KEY UPDATE ping='$ping'"); +sleep 1 until eval { $node5_dbh->selectrow_array("SELECT * FROM percona_test.sentinel") }; + +my $output; +my $count; +my $sql; +my $cnf = $sb->config_file_for("node4"); +my @args = qw(--where 1=1); + +$sb->create_dbs($node4_dbh, ['test']); + +# ########################################################################### +# These are roughly the same tests as basics.t, but we also check that the +# other ndoes got the right data. +# ########################################################################### + +# Test --why-quit and --statistics output +$sb->load_file('node4', 't/pt-archiver/samples/tables1-4.sql'); +$sb->wait_for_slaves(master => 'node4', slave => 'node5'); +$output = output(sub {pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$cnf", qw(--purge --why-quit --statistics)) }); +like($output, qr/Started at \d/, 'Start timestamp'); +like($output, qr/Source:/, 'source'); +like($output, qr/SELECT 4\nINSERT 0\nDELETE 4\n/, 'row counts'); +like($output, qr/Exiting because there are no more rows/, 'Exit reason'); + +$sql = "SELECT * FROM test.table_1"; +$sb->wait_for_slaves(master => 'node4', slave => 'node5'); +my ($m, $n); +is_deeply( + $m = $node4_dbh->selectall_arrayref($sql), + $n = $node5_dbh->selectall_arrayref($sql), + "Node4 & Node5 remain the same after --purge" +); + +# Test --no-delete. +$sb->load_file('node4', 't/pt-archiver/samples/tables1-4.sql'); +output(sub {pt_archiver::main(@args, qw(--no-delete --purge --source), "D=test,t=table_1,F=$cnf") }); +$sb->wait_for_slaves(master => 'node4', slave => 'node5'); +is_deeply( + $node4_dbh->selectall_arrayref($sql), + $node5_dbh->selectall_arrayref($sql), + "Node4 & Node5 remain the same after --dest" +); + +# --dest +$sb->load_file('node4', 't/pt-archiver/samples/tables1-4.sql'); +output(sub {pt_archiver::main(@args, qw(--statistics --source), "D=test,t=table_1,F=$cnf", qw(--dest t=table_2)) }); +$sb->wait_for_slaves(master => 'node4', slave => 'node5'); +$sql = "SELECT * FROM test.table_1, test.table_2"; +is_deeply( + $node4_dbh->selectall_arrayref($sql), + $node5_dbh->selectall_arrayref($sql), + "Node4 & Node5 remain the same after --dest" +); + +# ############################################################################# +# Bug 903387: pt-archiver doesn't honor b=1 flag to create SQL_LOG_BIN statement +# ############################################################################# +SKIP: { + $sb->load_file('node4', "t/pt-archiver/samples/bulk_regular_insert.sql"); + $sb->wait_for_slaves(master => 'node4', slave => 'node5'); + + my $original_rows = $node5_dbh->selectall_arrayref("SELECT * FROM bri.t ORDER BY id"); + my $original_no_id = $node5_dbh->selectall_arrayref("SELECT c,t FROM bri.t ORDER BY id"); + is_deeply( + $original_no_id, + [ + ['aa', '11:11:11'], + ['bb', '11:11:12'], + ['cc', '11:11:13'], + ['dd', '11:11:14'], + ['ee', '11:11:15'], + ['ff', '11:11:16'], + ['gg', '11:11:17'], + ['hh', '11:11:18'], + ['ii', '11:11:19'], + ['jj', '11:11:10'], + ], + "Bug 903387: node5 has rows" + ); + + $output = output( + sub { pt_archiver::main( + '--source', "D=bri,L=1,t=t,F=$cnf,b=1", + '--dest', "D=bri,t=t_arch", + qw(--where 1=1 --replace --commit-each --bulk-insert --bulk-delete), + qw(--limit 10)) }, + ); + + $sb->wait_for_slaves(master => 'node4', slave => 'node5'); + + my $rows = $node4_dbh->selectall_arrayref("SELECT c,t FROM bri.t ORDER BY id"); + is_deeply( + $rows, + [ + ['jj', '11:11:10'], + ], + "Bug 903387: rows deleted on node4" + ) or diag(Dumper($rows)); + + $rows = $node5_dbh->selectall_arrayref("SELECT * FROM bri.t ORDER BY id"); + is_deeply( + $rows, + $original_rows, + "Bug 903387: node5 still has rows" + ) or diag(Dumper($rows)); + + $sql = "SELECT * FROM bri.t_arch ORDER BY id"; + is_deeply( + $node5_dbh->selectall_arrayref($sql), + $node4_dbh->selectall_arrayref($sql), + "Bug 903387: node5 has t_arch" + ); + + $sb->load_file('node4', "t/pt-archiver/samples/bulk_regular_insert.sql"); + $sb->wait_for_slaves(master => 'node4', slave => 'node5'); + output( + sub { pt_archiver::main( + '--source', "D=bri,L=1,t=t,F=$cnf,b=1", + '--dest', "D=bri,t=t_arch,b=1", + qw(--where 1=1 --replace --commit-each --bulk-insert --bulk-delete), + qw(--limit 10)) }, + ); + + is_deeply( + $node5_dbh->selectall_arrayref("SELECT * FROM bri.t_arch ORDER BY id"), + [], + "Bug 903387: ...unless b=1 was also specified for --dest" + ); +} + +# ############################################################################# +# Test --bulk-insert +# ############################################################################# + +$sb->load_file('node4', "t/pt-archiver/samples/bulk_regular_insert.sql"); + +output( + sub { pt_archiver::main("--source", "F=$cnf,D=bri,t=t,L=1", qw(--dest t=t_arch --where 1=1 --bulk-insert --limit 3)) }, +); +$sb->wait_for_slaves(master => 'node4', slave => 'node5'); + +$sql = 'select * from bri.t order by id'; +is_deeply( + $node5_dbh->selectall_arrayref($sql), + $node4_dbh->selectall_arrayref($sql), + "--bulk-insert works as expected on the source table" +); + +$sql = 'select * from bri.t_arch order by id'; +is_deeply( + $node5_dbh->selectall_arrayref($sql), + $node4_dbh->selectall_arrayref($sql), + "...and on the dest table" +); + +# ############################################################################# +# Test --bulk-delete +# ############################################################################# + +$sb->load_file('node4', 't/pt-archiver/samples/table5.sql'); +$output = output( + sub { pt_archiver::main(qw(--no-ascend --limit 50 --bulk-delete --where 1=1), "--source", "D=test,t=table_5,F=$cnf", qw(--statistics --dest t=table_5_dest)) }, +); +$sb->wait_for_slaves(master => 'node4', slave => 'node5'); + +$sql = 'select * from test.table_5'; +is_deeply( + $node5_dbh->selectall_arrayref($sql), + $node4_dbh->selectall_arrayref($sql), + "--bulk-delete works as expected on the source table" +); + +$sql = 'select * from test.table_5_dest'; +is_deeply( + $node5_dbh->selectall_arrayref($sql), + $node4_dbh->selectall_arrayref($sql), + "...and on the dest table" +); + +# Same as above, but with a twist: --dest points to the second node. We should +# get the archieved rows in the first node as well + +my $node5_dsn = $sb->dsn_for('node5'); +my $node5_cnf = $sb->config_file_for('node5'); + +$sb->load_file('node4', 't/pt-archiver/samples/table5.sql'); +$sb->wait_for_slaves(master => 'node4', slave => 'node5'); +$output = output( + sub { pt_archiver::main(qw(--no-ascend --limit 50 --bulk-delete --where 1=1), + "--source", "D=test,t=table_5,F=$cnf", qw(--statistics), + "--dest", "$node5_dsn,D=test,t=table_5_dest,F=$node5_cnf") }, +); +# Wait for the --dest table to replicate back +$sb->wait_for_slaves(master => 'node5', slave => 'node4'); + +$sql = 'select * from test.table_5_dest'; +is_deeply( + $node5_dbh->selectall_arrayref($sql), + $node4_dbh->selectall_arrayref($sql), + "--bulk-delete with --dest on the second node, archive ends up in node1 as well" +); + +$sb->load_file('node4', "t/pt-archiver/samples/bulk_regular_insert.sql"); +$sb->wait_for_slaves(master => 'node4', slave => 'node5'); +output( + sub { pt_archiver::main("--source", "F=$cnf,D=bri,t=t,L=1", + "--dest", "$node5_dsn,D=bri,t=t_arch,F=$node5_cnf", + qw(--where 1=1 --bulk-insert --limit 3)) }, +); +$sb->wait_for_slaves(master => 'node5', slave => 'node4'); + +$sql = 'select * from bri.t_arch'; +is_deeply( + $node5_dbh->selectall_arrayref($sql), + $node4_dbh->selectall_arrayref($sql), + "--bulk-insert with --dest on the second node, archive ends up in node1 as well" +); + +# ############################################################################# +# Done. +# ############################################################################# +$sb->stop_sandbox(qw(node4 node5)); +ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox"); + +done_testing; diff --git a/t/pt-table-checksum/pxc.t b/t/pt-table-checksum/pxc.t index df833958..46a2d422 100644 --- a/t/pt-table-checksum/pxc.t +++ b/t/pt-table-checksum/pxc.t @@ -202,7 +202,7 @@ $node2->do("set sql_log_bin=1"); # Wait for the slave to apply the binlogs from node1 (its master). # Then change it so it's not consistent. PerconaTest::wait_for_table($slave_dbh, 'test.t'); -$sb->wait_for_slaves('cslave1'); +$sb->wait_for_slaves(master => 'node1', slave => 'cslave1'); $slave_dbh->do("update test.t set c='zebra' where c='z'"); # Another quick test first: the tool should complain about the slave's @@ -262,7 +262,7 @@ $sb->stop_sandbox('cslave1'); # Wait for the slave to apply the binlogs from node2 (its master). # Then change it so it's not consistent. PerconaTest::wait_for_table($slave_dbh, 'test.t'); -$sb->wait_for_slaves('cslave1'); +$sb->wait_for_slaves(master => 'node1', slave => 'cslave1'); $slave_dbh->do("update test.t set c='zebra' where c='z'"); ($row) = $slave_dbh->selectrow_array("select c from test.t order by c desc limit 1"); From 72a129bce125e8553dbb20a64d599fd0eec847a9 Mon Sep 17 00:00:00 2001 From: Daniel Nichter Date: Fri, 30 Nov 2012 18:13:04 +0000 Subject: [PATCH 07/11] Add is_cluster_mode() to lib/Sandbox.pm. --- lib/Sandbox.pm | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/lib/Sandbox.pm b/lib/Sandbox.pm index a7e5a733..263be500 100644 --- a/lib/Sandbox.pm +++ b/lib/Sandbox.pm @@ -409,19 +409,24 @@ sub clear_genlogs { return; } +sub is_cluster_mode { + my ($self) = @_; + return 0 unless $self->is_cluster_node('node1'); + return 0 unless $self->is_cluster_node('node2'); + return 0 unless $self->is_cluster_node('node3'); + return 1; +} sub is_cluster_node { my ($self, $server) = @_; - + my $sql = "SHOW VARIABLES LIKE 'wsrep_on'"; PTDEBUG && _d($sql); my $row = $self->use($server, qq{-ss -e "$sql"}); PTDEBUG && _d($row); $row = [split " ", $row]; - - return $row && $row->[1] - ? ($row->[1] eq 'ON' || $row->[1] eq '1') - : 0; + + return $row && $row->[1] && ($row->[1] eq 'ON' || $row->[1] eq '1'); } sub can_load_data { From a9ccca199d785b6e7ef113181f9d8ae083ee9f42 Mon Sep 17 00:00:00 2001 From: Daniel Nichter Date: Fri, 30 Nov 2012 18:13:25 +0000 Subject: [PATCH 08/11] Rewrite t/pt-archiver/pxc.t. --- t/pt-archiver/pxc.t | 457 ++++++++++++++++++++++++-------------------- 1 file changed, 255 insertions(+), 202 deletions(-) diff --git a/t/pt-archiver/pxc.t b/t/pt-archiver/pxc.t index 7d8f71c1..15c45921 100644 --- a/t/pt-archiver/pxc.t +++ b/t/pt-archiver/pxc.t @@ -12,256 +12,309 @@ use English qw(-no_match_vars); use Test::More; use Time::HiRes qw(time); +# Hostnames make testing less accurate. Tests need to see +# that such-and-such happened on specific slave hosts, but +# the sandbox servers are all on one host so all slaves have +# the same hostname. +$ENV{PERCONA_TOOLKIT_TEST_USE_DSN_NAMES} = 1; + use PerconaTest; use Sandbox; use Data::Dumper; require "$trunk/bin/pt-archiver"; -my $dp = new DSNParser(opts=>$dsn_opts); -my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); +my $dp = new DSNParser(opts=>$dsn_opts); +my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); +my $node1_dbh = $sb->get_dbh_for('node1'); +my $node2_dbh = $sb->get_dbh_for('node2'); +my $node3_dbh = $sb->get_dbh_for('node3'); -my $node1 = $sb->get_dbh_for('node1'); -my $db_flavor = VersionParser->new($node1)->flavor(); - -if ( $db_flavor !~ /XtraDB Cluster/ ) { +if ( !$node1_dbh ) { + plan skip_all => 'Cannot connect to cluster node1'; +} +elsif ( !$node2_dbh ) { + plan skip_all => 'Cannot connect to cluster node2'; +} +elsif ( !$node3_dbh ) { + plan skip_all => 'Cannot connect to cluster node3'; +} +elsif ( !$sb->is_cluster_mode ) { plan skip_all => "PXC tests"; } -my $c = $sb->start_cluster( - nodes => [qw(node4 node5)], - env => q/CLUSTER_NAME="pt_archiver_cluster"/, -); - -my $node4_dbh = $c->{node4}->{dbh}; -my $node5_dbh = $c->{node5}->{dbh}; - -# Set this up so ->wait_for_slaves works -$node4_dbh->do("CREATE DATABASE IF NOT EXISTS percona_test"); -$node4_dbh->do("CREATE TABLE IF NOT EXISTS percona_test.sentinel(id int primary key, ping varchar(64) not null default '')"); -my ($ping) = $node4_dbh->selectrow_array("SELECT MD5(RAND())"); -$node4_dbh->do("INSERT INTO percona_test.sentinel(id, ping) values(1, '$ping') ON DUPLICATE KEY UPDATE ping='$ping'"); -sleep 1 until eval { $node5_dbh->selectrow_array("SELECT * FROM percona_test.sentinel") }; - my $output; my $count; my $sql; -my $cnf = $sb->config_file_for("node4"); +my $rows; +my $node1_cnf = $sb->config_file_for("node1"); +my $node2_cnf = $sb->config_file_for("node2"); my @args = qw(--where 1=1); -$sb->create_dbs($node4_dbh, ['test']); +$sb->create_dbs($node1_dbh, ['test']); -# ########################################################################### -# These are roughly the same tests as basics.t, but we also check that the -# other ndoes got the right data. -# ########################################################################### +sub check_rows { + my (%args) = @_; + my @required_args = qw(name sql expect); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($name, $sql, $expect) = @args{@required_args}; -# Test --why-quit and --statistics output -$sb->load_file('node4', 't/pt-archiver/samples/tables1-4.sql'); -$sb->wait_for_slaves(master => 'node4', slave => 'node5'); -$output = output(sub {pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$cnf", qw(--purge --why-quit --statistics)) }); -like($output, qr/Started at \d/, 'Start timestamp'); -like($output, qr/Source:/, 'source'); -like($output, qr/SELECT 4\nINSERT 0\nDELETE 4\n/, 'row counts'); -like($output, qr/Exiting because there are no more rows/, 'Exit reason'); + $sb->wait_for_slaves; -$sql = "SELECT * FROM test.table_1"; -$sb->wait_for_slaves(master => 'node4', slave => 'node5'); -my ($m, $n); -is_deeply( - $m = $node4_dbh->selectall_arrayref($sql), - $n = $node5_dbh->selectall_arrayref($sql), - "Node4 & Node5 remain the same after --purge" -); - -# Test --no-delete. -$sb->load_file('node4', 't/pt-archiver/samples/tables1-4.sql'); -output(sub {pt_archiver::main(@args, qw(--no-delete --purge --source), "D=test,t=table_1,F=$cnf") }); -$sb->wait_for_slaves(master => 'node4', slave => 'node5'); -is_deeply( - $node4_dbh->selectall_arrayref($sql), - $node5_dbh->selectall_arrayref($sql), - "Node4 & Node5 remain the same after --dest" -); - -# --dest -$sb->load_file('node4', 't/pt-archiver/samples/tables1-4.sql'); -output(sub {pt_archiver::main(@args, qw(--statistics --source), "D=test,t=table_1,F=$cnf", qw(--dest t=table_2)) }); -$sb->wait_for_slaves(master => 'node4', slave => 'node5'); -$sql = "SELECT * FROM test.table_1, test.table_2"; -is_deeply( - $node4_dbh->selectall_arrayref($sql), - $node5_dbh->selectall_arrayref($sql), - "Node4 & Node5 remain the same after --dest" -); - -# ############################################################################# -# Bug 903387: pt-archiver doesn't honor b=1 flag to create SQL_LOG_BIN statement -# ############################################################################# -SKIP: { - $sb->load_file('node4', "t/pt-archiver/samples/bulk_regular_insert.sql"); - $sb->wait_for_slaves(master => 'node4', slave => 'node5'); - - my $original_rows = $node5_dbh->selectall_arrayref("SELECT * FROM bri.t ORDER BY id"); - my $original_no_id = $node5_dbh->selectall_arrayref("SELECT c,t FROM bri.t ORDER BY id"); - is_deeply( - $original_no_id, - [ - ['aa', '11:11:11'], - ['bb', '11:11:12'], - ['cc', '11:11:13'], - ['dd', '11:11:14'], - ['ee', '11:11:15'], - ['ff', '11:11:16'], - ['gg', '11:11:17'], - ['hh', '11:11:18'], - ['ii', '11:11:19'], - ['jj', '11:11:10'], - ], - "Bug 903387: node5 has rows" - ); - - $output = output( - sub { pt_archiver::main( - '--source', "D=bri,L=1,t=t,F=$cnf,b=1", - '--dest', "D=bri,t=t_arch", - qw(--where 1=1 --replace --commit-each --bulk-insert --bulk-delete), - qw(--limit 10)) }, - ); - - $sb->wait_for_slaves(master => 'node4', slave => 'node5'); - - my $rows = $node4_dbh->selectall_arrayref("SELECT c,t FROM bri.t ORDER BY id"); + my $rows = $node1_dbh->selectall_arrayref($sql); is_deeply( $rows, - [ - ['jj', '11:11:10'], - ], - "Bug 903387: rows deleted on node4" + $expect, + "$name on node1" ) or diag(Dumper($rows)); - $rows = $node5_dbh->selectall_arrayref("SELECT * FROM bri.t ORDER BY id"); + $rows = $node2_dbh->selectall_arrayref($sql); is_deeply( $rows, - $original_rows, - "Bug 903387: node5 still has rows" + $expect, + "$name on node2" ) or diag(Dumper($rows)); - $sql = "SELECT * FROM bri.t_arch ORDER BY id"; + $rows = $node3_dbh->selectall_arrayref($sql); is_deeply( - $node5_dbh->selectall_arrayref($sql), - $node4_dbh->selectall_arrayref($sql), - "Bug 903387: node5 has t_arch" - ); - - $sb->load_file('node4', "t/pt-archiver/samples/bulk_regular_insert.sql"); - $sb->wait_for_slaves(master => 'node4', slave => 'node5'); - output( - sub { pt_archiver::main( - '--source', "D=bri,L=1,t=t,F=$cnf,b=1", - '--dest', "D=bri,t=t_arch,b=1", - qw(--where 1=1 --replace --commit-each --bulk-insert --bulk-delete), - qw(--limit 10)) }, - ); - - is_deeply( - $node5_dbh->selectall_arrayref("SELECT * FROM bri.t_arch ORDER BY id"), - [], - "Bug 903387: ...unless b=1 was also specified for --dest" - ); + $rows, + $expect, + "$name on node3" + ) or diag(Dumper($rows)); } +# ########################################################################### +# Purge rows. +# ########################################################################### + +$sb->load_file('node1', 't/pt-archiver/samples/tables1-4.sql'); +$node1_dbh->do("INSERT INTO test.table_2 SELECT * FROM test.table_1"); + +# Since there's no auto-inc column, all rows should be purged on all nodes. +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf", + qw(--purge)) + }, + stderr => 1, +); + +check_rows( + name => "Purged all rows", + sql => "SELECT * FROM test.table_1 ORDER BY a", + expect => [], +); + +# table_2 has an auto-inc, so all rows less the max auto-inc row +# should be purged on all nodes. This is due to --[no]safe-auto-increment. +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_2,F=$node1_cnf", + qw(--purge)) + }, + stderr => 1, +); + +check_rows( + name => "Purged rows less max auto-inc", + sql => "SELECT * FROM test.table_2 ORDER BY a", + expect => [[qw(4 2 3), "\n"]], +); + +# ########################################################################### +# Do not purge rows. +# ########################################################################### + +$sb->load_file('node1', 't/pt-archiver/samples/tables1-4.sql'); +my $expected_rows = $node1_dbh->selectall_arrayref( + "SELECT * FROM test.table_1 ORDER BY a"); + +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf", + qw(--no-delete --purge)) + }, + stderr => 1, +); + +check_rows( + name => "--no-delete left all rows", + sql => "SELECT * FROM test.table_1 ORDER BY a", + expect => $expected_rows, +); + # ############################################################################# -# Test --bulk-insert +# Archive rows to another table # ############################################################################# -$sb->load_file('node4', "t/pt-archiver/samples/bulk_regular_insert.sql"); +# Presume the previous test ^ left tables1-4.sql loaded and that $expect_rows +# is still the real, expected rows. + +# Same node + +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf", + qw(--dest t=table_2)) + }, + stderr => 1, +); + +check_rows( + name => "Rows purged from table_1 (same node)", + sql => "SELECT * FROM test.table_1 ORDER BY a", + expect => [], +); + +check_rows( + name => "Rows archived to table_2 (same node)", + sql => "SELECT * FROM test.table_2 ORDER BY a", + expect => $expected_rows, +); + +# To another node + +$sb->load_file('node1', 't/pt-archiver/samples/tables1-4.sql'); +$expected_rows = $node1_dbh->selectall_arrayref( + "SELECT * FROM test.table_1 ORDER BY a"); + +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf", + '--dest', "F=$node2_cnf,D=test,t=table_2") + }, + stderr => 1, +); + +check_rows( + name => "Rows purged from table_1 (cross-node)", + sql => "SELECT * FROM test.table_1 ORDER BY a", + expect => [], +); + +check_rows( + name => "Rows archived to table_2 (cross-node)", + sql => "SELECT * FROM test.table_2 ORDER BY a", + expect => $expected_rows, +); + +# ############################################################################# +# --bulk-insert +# ############################################################################# + +# Same node + +$sb->load_file('node1', "t/pt-archiver/samples/bulk_regular_insert.sql"); +$expected_rows = $node1_dbh->selectall_arrayref( + "SELECT * FROM bri.t ORDER BY id"); +# The max auto-inc col won't be archived, so: +my $max_auto_inc_row = pop @$expected_rows; output( - sub { pt_archiver::main("--source", "F=$cnf,D=bri,t=t,L=1", qw(--dest t=t_arch --where 1=1 --bulk-insert --limit 3)) }, -); -$sb->wait_for_slaves(master => 'node4', slave => 'node5'); - -$sql = 'select * from bri.t order by id'; -is_deeply( - $node5_dbh->selectall_arrayref($sql), - $node4_dbh->selectall_arrayref($sql), - "--bulk-insert works as expected on the source table" + sub { + pt_archiver::main(@args, '--source', "F=$node1_cnf,D=bri,t=t,L=1", + qw(--dest t=t_arch --bulk-insert --limit 3)) + }, + stderr => 1, ); -$sql = 'select * from bri.t_arch order by id'; -is_deeply( - $node5_dbh->selectall_arrayref($sql), - $node4_dbh->selectall_arrayref($sql), - "...and on the dest table" +check_rows( + name => "--bulk-insert source table (same node)", + sql => "select * from bri.t order by id", + expect => [ $max_auto_inc_row ], ); -# ############################################################################# -# Test --bulk-delete -# ############################################################################# - -$sb->load_file('node4', 't/pt-archiver/samples/table5.sql'); -$output = output( - sub { pt_archiver::main(qw(--no-ascend --limit 50 --bulk-delete --where 1=1), "--source", "D=test,t=table_5,F=$cnf", qw(--statistics --dest t=table_5_dest)) }, -); -$sb->wait_for_slaves(master => 'node4', slave => 'node5'); - -$sql = 'select * from test.table_5'; -is_deeply( - $node5_dbh->selectall_arrayref($sql), - $node4_dbh->selectall_arrayref($sql), - "--bulk-delete works as expected on the source table" +check_rows( + name => "--bulk-insert dest table (same node)", + sql => "select * from bri.t_arch order by id", + expect => $expected_rows, ); -$sql = 'select * from test.table_5_dest'; -is_deeply( - $node5_dbh->selectall_arrayref($sql), - $node4_dbh->selectall_arrayref($sql), - "...and on the dest table" -); +# To another node -# Same as above, but with a twist: --dest points to the second node. We should -# get the archieved rows in the first node as well +$sb->load_file('node1', "t/pt-archiver/samples/bulk_regular_insert.sql"); -my $node5_dsn = $sb->dsn_for('node5'); -my $node5_cnf = $sb->config_file_for('node5'); - -$sb->load_file('node4', 't/pt-archiver/samples/table5.sql'); -$sb->wait_for_slaves(master => 'node4', slave => 'node5'); -$output = output( - sub { pt_archiver::main(qw(--no-ascend --limit 50 --bulk-delete --where 1=1), - "--source", "D=test,t=table_5,F=$cnf", qw(--statistics), - "--dest", "$node5_dsn,D=test,t=table_5_dest,F=$node5_cnf") }, -); -# Wait for the --dest table to replicate back -$sb->wait_for_slaves(master => 'node5', slave => 'node4'); - -$sql = 'select * from test.table_5_dest'; -is_deeply( - $node5_dbh->selectall_arrayref($sql), - $node4_dbh->selectall_arrayref($sql), - "--bulk-delete with --dest on the second node, archive ends up in node1 as well" -); - -$sb->load_file('node4', "t/pt-archiver/samples/bulk_regular_insert.sql"); -$sb->wait_for_slaves(master => 'node4', slave => 'node5'); output( - sub { pt_archiver::main("--source", "F=$cnf,D=bri,t=t,L=1", - "--dest", "$node5_dsn,D=bri,t=t_arch,F=$node5_cnf", - qw(--where 1=1 --bulk-insert --limit 3)) }, + sub { + pt_archiver::main(@args, '--source', "F=$node1_cnf,D=bri,t=t,L=1", + '--dest', "F=$node2_cnf,t=t_arch", qw(--bulk-insert --limit 3)) + }, + stderr => 1, ); -$sb->wait_for_slaves(master => 'node5', slave => 'node4'); -$sql = 'select * from bri.t_arch'; -is_deeply( - $node5_dbh->selectall_arrayref($sql), - $node4_dbh->selectall_arrayref($sql), - "--bulk-insert with --dest on the second node, archive ends up in node1 as well" +check_rows( + name => "--bulk-insert source table (cross-node)", + sql => "select * from bri.t order by id", + expect => [ $max_auto_inc_row ], +); + +check_rows( + name => "--bulk-insert dest table (cross-node)", + sql => "select * from bri.t_arch order by id", + expect => $expected_rows, +); + + +# ############################################################################# +# --bulk-delete +# ############################################################################# + +# Same node + +$sb->load_file('node2', 't/pt-archiver/samples/table5.sql'); +$expected_rows = $node1_dbh->selectall_arrayref( + "SELECT * FROM test.table_5 ORDER BY a,b,c,d"); + +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_5,F=$node1_cnf", + qw(--no-ascend --limit 50 --bulk-delete), + qw(--statistics --dest t=table_5_dest)) + }, + stderr => 1, +); + +check_rows( + name => "--bulk-delete source table (same node)", + sql => "select * from test.table_5", + expect => [], +); + +check_rows( + name => "--bulk-delete dest table (same node)", + sql => "select * from test.table_5_dest order by a,b,c,d", + expect => $expected_rows, +); + +# To another node + +$sb->load_file('node2', 't/pt-archiver/samples/table5.sql'); + +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_5,F=$node1_cnf", + qw(--no-ascend --limit 50 --bulk-delete), + qw(--statistics), '--dest', "F=$node2_cnf,t=table_5_dest") + }, + stderr => 1, +); + +check_rows( + name => "--bulk-delete source table (cross-node)", + sql => "select * from test.table_5", + expect => [], +); + +check_rows( + name => "--bulk-delete dest table (cross-node)", + sql => "select * from test.table_5_dest order by a,b,c,d", + expect => $expected_rows, ); # ############################################################################# # Done. # ############################################################################# -$sb->stop_sandbox(qw(node4 node5)); +$sb->wipe_clean($node1_dbh); ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox"); - done_testing; From f6fd2b7470dc9875ff33ef46121d93bd254e6660 Mon Sep 17 00:00:00 2001 From: Daniel Nichter Date: Fri, 30 Nov 2012 18:14:06 +0000 Subject: [PATCH 09/11] Only run t/pt-table-checksum/pxc.t if in cluster mode. --- t/pt-table-checksum/pxc.t | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/t/pt-table-checksum/pxc.t b/t/pt-table-checksum/pxc.t index 46a2d422..e6c3407c 100644 --- a/t/pt-table-checksum/pxc.t +++ b/t/pt-table-checksum/pxc.t @@ -20,10 +20,7 @@ $ENV{PERCONA_TOOLKIT_TEST_USE_DSN_NAMES} = 1; use PerconaTest; use Sandbox; - require "$trunk/bin/pt-table-checksum"; -# Do this after requiring ptc, since it uses Mo -require VersionParser; my $dp = new DSNParser(opts=>$dsn_opts); my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); @@ -31,12 +28,7 @@ my $node1 = $sb->get_dbh_for('node1'); my $node2 = $sb->get_dbh_for('node2'); my $node3 = $sb->get_dbh_for('node3'); -my $db_flavor = VersionParser->new($node1)->flavor(); - -if ( $db_flavor !~ /XtraDB Cluster/ ) { - plan skip_all => "PXC tests"; -} -elsif ( !$node1 ) { +if ( !$node1 ) { plan skip_all => 'Cannot connect to cluster node1'; } elsif ( !$node2 ) { @@ -45,6 +37,9 @@ elsif ( !$node2 ) { elsif ( !$node3 ) { plan skip_all => 'Cannot connect to cluster node3'; } +elsif ( !$sb->is_cluster_mode ) { + plan skip_all => "PXC tests"; +} # The sandbox servers run with lock_wait_timeout=3 and it's not dynamic # so we need to specify --lock-wait-timeout=3 else the tool will die. From 8343f6b1f7dd9eb09fb6019d6052e32d4680b99d Mon Sep 17 00:00:00 2001 From: Daniel Nichter Date: Fri, 30 Nov 2012 18:34:14 +0000 Subject: [PATCH 10/11] Conditionalize dest.t for PXC. --- t/pt-archiver/dest.t | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/t/pt-archiver/dest.t b/t/pt-archiver/dest.t index 31e24a58..9de5693c 100644 --- a/t/pt-archiver/dest.t +++ b/t/pt-archiver/dest.t @@ -54,14 +54,33 @@ ok(scalar @$rows == 0, 'Purged all rows ok'); # This test has been changed. I manually examined the tables before # and after the archive operation and I am convinced that the original # expected output was incorrect. -$rows = $dbh->selectall_arrayref("select * from test.table_2", { Slice => {}}); -is_deeply( - $rows, - [ { a => '1', b => '2', c => '3', d => undef }, +my ($sql, $expect_rows); +if ( $sb->is_cluster_node('master') ) { + # PXC nodes have auto-inc offsets, so rather than see what they are + # and account for them, we just don't select the auto-inc col, a. + # This test is really about b, c, and d anyway. + $sql = "SELECT b, c, d FROM test.table_2 ORDER BY a"; + $expect_rows = [ + { b => '2', c => '3', d => undef }, + { b => undef, c => '3', d => undef }, + { b => '2', c => '3', d => undef }, + { b => '2', c => '3', d => undef }, + ]; +} +else { + # The original, non-PXC values. + $sql = "SELECT * FROM test.table_2 ORDER BY a"; + $expect_rows = [ + { a => '1', b => '2', c => '3', d => undef }, { a => '2', b => undef, c => '3', d => undef }, { a => '3', b => '2', c => '3', d => undef }, { a => '4', b => '2', c => '3', d => undef }, - ], + ]; +} +$rows = $dbh->selectall_arrayref($sql, { Slice => {}}); +is_deeply( + $rows, + $expect_rows, 'Found rows in new table OK when archiving only some columns to another table') or diag(Dumper($rows)); # Archive to another table with autocommit From e69533e4923c80fb3ec290975b3a70e15f103778 Mon Sep 17 00:00:00 2001 From: Daniel Nichter Date: Fri, 30 Nov 2012 19:25:35 +0000 Subject: [PATCH 11/11] Test with MyISAM tables. Add PXC section to pt-archiver docs. --- bin/pt-archiver | 35 ++++++++++++ t/pt-archiver/pxc.t | 88 +++++++++++++++++++++++++++++++ t/pt-archiver/samples/table14.sql | 25 +++++++++ 3 files changed, 148 insertions(+) create mode 100644 t/pt-archiver/samples/table14.sql diff --git a/bin/pt-archiver b/bin/pt-archiver index fda2d5ca..3a285648 100755 --- a/bin/pt-archiver +++ b/bin/pt-archiver @@ -6332,6 +6332,41 @@ in long table scans if you're trying to nibble from the end of the table by an index other than the one it prefers. See L<"--source"> and read the documentation on the C part if this applies to you. +=head1 Percona XtraDB Cluster + +pt-archiver works with Percona XtraDB Cluster (PXC) 5.5.28-23.7 and newer, +but there are three limitations you should consider before archiving on +a cluster: + +=over + +=item Error on commit + +pt-archiver does not check for error when it commits transactions. +Commits on PXC can fail, but the tool does not yet check for or retry the +transaction when this happens. If it happens, the tool will die. + +=item MyISAM tables + +Archiving MyISAM tables works, but MyISAM support in PXC is still +experimental at the time of this release. There are several known bugs with +PXC, MyISAM tables, and C columns. Therefore, you must ensure +that archiving will not directly or indirectly result in the use of default +C values for a MyISAM table. For example, this happens with +L<"--dest"> if L<"--columns"> is used and the C column is not +included. The tool does not check for this! + +=item Non-cluster options + +Certain options may or may not work. For example, if a cluster node +is not also a slave, then L<"--check-slave-lag"> does not work. And since PXC +tables are usually InnoDB, but InnoDB doesn't support C, then +L<"--delayed-insert"> does not work. Other options may also not work, but +the tool does not check them, therefore you should test archiving on a test +cluster before archiving on your real cluster. + +=back + =head1 OUTPUT If you specify L<"--progress">, the output is a header row, plus status output diff --git a/t/pt-archiver/pxc.t b/t/pt-archiver/pxc.t index 15c45921..663d8e75 100644 --- a/t/pt-archiver/pxc.t +++ b/t/pt-archiver/pxc.t @@ -312,6 +312,94 @@ check_rows( expect => $expected_rows, ); +# ############################################################################# +# Repeat some of the above tests with MyISAM. +# ############################################################################# + +$sb->load_file('node1', 't/pt-archiver/samples/table14.sql'); +$expected_rows = $node1_dbh->selectall_arrayref( + "SELECT * FROM test.table_1 ORDER BY a"); +$node1_dbh->do("INSERT INTO test.table_2 SELECT * FROM test.table_1"); + +# Since there's no auto-inc column, all rows should be purged on all nodes. +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf", + qw(--purge)) + }, + stderr => 1, +); + +check_rows( + name => "MyISAM: Purged all rows", + sql => "SELECT * FROM test.table_1 ORDER BY a", + expect => [], +); + + +# table_2 has an auto-inc, so all rows less the max auto-inc row +# should be purged on all nodes. This is due to --[no]safe-auto-increment. +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_2,F=$node1_cnf", + qw(--purge)) + }, + stderr => 1, +); + +check_rows( + name => "MyISAM: Purged rows less max auto-inc", + sql => "SELECT * FROM test.table_2 ORDER BY a", + expect => [[qw(4 2 3), "\n"]], +); + +# Archive rows to another MyISAM table. + +# Same node +$sb->load_file('node1', 't/pt-archiver/samples/table14.sql'); +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf", + qw(--dest t=table_2)) + }, + stderr => 1, +); + +check_rows( + name => "MyISAM: Rows purged from table_1 (same node)", + sql => "SELECT * FROM test.table_1 ORDER BY a", + expect => [], +); + +check_rows( + name => "MyISAM: Rows archived to table_2 (same node)", + sql => "SELECT * FROM test.table_2 ORDER BY a", + expect => $expected_rows, +); + +# To another node +$sb->load_file('node1', 't/pt-archiver/samples/table14.sql'); + +$output = output( + sub { + pt_archiver::main(@args, '--source', "D=test,t=table_1,F=$node1_cnf", + '--dest', "F=$node2_cnf,D=test,t=table_2") + }, + stderr => 1, +); + +check_rows( + name => "MyISAM: Rows purged from table_1 (cross-node)", + sql => "SELECT * FROM test.table_1 ORDER BY a", + expect => [], +); + +check_rows( + name => "MyISAM: Rows archived to table_2 (cross-node)", + sql => "SELECT * FROM test.table_2 ORDER BY a", + expect => $expected_rows, +); + # ############################################################################# # Done. # ############################################################################# diff --git a/t/pt-archiver/samples/table14.sql b/t/pt-archiver/samples/table14.sql new file mode 100644 index 00000000..83766a7f --- /dev/null +++ b/t/pt-archiver/samples/table14.sql @@ -0,0 +1,25 @@ +use test; + +drop table if exists table_1; +drop table if exists table_2; + +create table table_1( + a int not null primary key, + b int, + c int not null, + d varchar(50), + key(b) +) engine=myisam; + +create table table_2( + a int not null primary key auto_increment, + b int, + c int not null, + d varchar(50) +) engine=myisam; + +insert into table_1 values + (1, 2, 3, 4), + (2, null, 3, 4), + (3, 2, 3, "\t"), + (4, 2, 3, "\n");