Fixes per Daniel's review

This commit is contained in:
Brian Fraser
2013-04-09 10:21:15 -03:00
parent 823914826a
commit db0a29561c
6 changed files with 170 additions and 149 deletions

View File

@@ -3341,6 +3341,32 @@ sub name {
return $self->{hostname} || $self->{dsn_name} || 'unknown host'; return $self->{hostname} || $self->{dsn_name} || 'unknown host';
} }
sub remove_duplicate_cxns {
my ($self, %args) = @_;
my @cxns = @{$args{cxns}};
my $seen_ids = $args{seen_ids} || {};
PTDEBUG && _d("Removing duplicates from ", join(" ", map { $_->name } @cxns));
my @trimmed_cxns;
for my $cxn ( @cxns ) {
my $dbh = $cxn->dbh();
my $sql = q{SELECT @@server_id};
PTDEBUG && _d($sql);
my ($id) = $dbh->selectrow_array($sql);
PTDEBUG && _d('Server ID for ', $cxn->name, ': ', $id);
if ( ! $seen_ids->{$id}++ ) {
push @trimmed_cxns, $cxn
}
else {
PTDEBUG && _d("Removing ", $cxn->name,
", ID ", $id, ", because we've already seen it");
}
}
return \@trimmed_cxns;
}
sub DESTROY { sub DESTROY {
my ($self) = @_; my ($self) = @_;
if ( $self->{dbh} if ( $self->{dbh}
@@ -3385,6 +3411,8 @@ use constant PTDEBUG => $ENV{PTDEBUG} || 0;
use Mo; use Mo;
use Data::Dumper; use Data::Dumper;
{ local $EVAL_ERROR; eval { require Cxn } };
sub get_cluster_name { sub get_cluster_name {
my ($self, $cxn) = @_; my ($self, $cxn) = @_;
my $sql = "SHOW VARIABLES LIKE 'wsrep\_cluster\_name'"; my $sql = "SHOW VARIABLES LIKE 'wsrep\_cluster\_name'";
@@ -3427,13 +3455,13 @@ sub find_cluster_nodes {
my $make_cxn = $args{make_cxn}; my $make_cxn = $args{make_cxn};
my $sql = q{SHOW STATUS LIKE 'wsrep_incoming_addresses'}; my $sql = q{SHOW STATUS LIKE 'wsrep\_incoming\_addresses'};
PTDEBUG && _d($sql); PTDEBUG && _d($sql);
my (undef, $addresses) = $dbh->selectrow_array($sql); my (undef, $addresses) = $dbh->selectrow_array($sql);
PTDEBUG && _d("Cluster nodes found: ", $addresses); PTDEBUG && _d("Cluster nodes found: ", $addresses);
return unless $addresses; return unless $addresses;
my @addresses = grep !/\Aunspecified\z/i, my @addresses = grep { !/\Aunspecified\z/i }
split /,\s*/, $addresses; split /,\s*/, $addresses;
my @nodes; my @nodes;
@@ -3459,34 +3487,14 @@ sub find_cluster_nodes {
push @nodes, $make_cxn->(dsn => $node_dsn); push @nodes, $make_cxn->(dsn => $node_dsn);
} }
return @nodes; return \@nodes;
} }
sub remove_duplicate_cxns { sub remove_duplicate_cxns {
my ($self, %args) = @_; my ($self, %args) = @_;
my @cxns = @{$args{cxns}}; my @cxns = @{$args{cxns}};
my $seen_ids = $args{seen_ids}; my $seen_ids = $args{seen_ids};
PTDEBUG && _d("Removing duplicates from ", join " ", map { $_->name } @cxns); return Cxn->remove_duplicate_cxns(%args);
my @trimmed_cxns;
for my $cxn ( @cxns ) {
my $dbh = $cxn->dbh();
my $sql = q{SELECT @@SERVER_ID};
PTDEBUG && _d($sql);
my ($id) = $dbh->selectrow_array($sql);
PTDEBUG && _d('Server ID for ', $cxn->name, ': ', $id);
if ( ! $seen_ids->{$id}++ ) {
push @trimmed_cxns, $cxn
}
else {
PTDEBUG && _d("Removing ", $cxn->name,
", ID $id, because we've already seen it");
}
}
return @trimmed_cxns;
} }
sub same_cluster { sub same_cluster {
@@ -3508,46 +3516,49 @@ sub autodetect_nodes {
my $nodes = $args{nodes}; my $nodes = $args{nodes};
my $seen_ids = $args{seen_ids}; my $seen_ids = $args{seen_ids};
return unless @$nodes; my $new_nodes = [];
return $new_nodes unless @$nodes;
my @new_nodes;
for my $node ( @$nodes ) { for my $node ( @$nodes ) {
my @nodes = $self->find_cluster_nodes( my $nodes_found = $self->find_cluster_nodes(
dbh => $node->dbh(), dbh => $node->dbh(),
dsn => $node->dsn(), dsn => $node->dsn(),
make_cxn => $make_cxn, make_cxn => $make_cxn,
DSNParser => $dp, DSNParser => $dp,
); );
push @new_nodes, @nodes; push @$new_nodes, @$nodes_found;
} }
@new_nodes = $self->remove_duplicate_cxns( $new_nodes = $self->remove_duplicate_cxns(
cxns => \@new_nodes, cxns => $new_nodes,
seen_ids => $seen_ids seen_ids => $seen_ids
); );
my @new_slaves; my $new_slaves = [];
foreach my $node (@new_nodes) { foreach my $node (@$new_nodes) {
my $node_slaves = $ms->get_slaves( my $node_slaves = $ms->get_slaves(
dbh => $node->dbh(), dbh => $node->dbh(),
dsn => $node->dsn(), dsn => $node->dsn(),
make_cxn => $make_cxn, make_cxn => $make_cxn,
); );
push @new_slaves, @$node_slaves; push @$new_slaves, @$node_slaves;
} }
@new_slaves = $self->remove_duplicate_cxns( $new_slaves = $self->remove_duplicate_cxns(
cxns => \@new_slaves, cxns => $new_slaves,
seen_ids => $seen_ids seen_ids => $seen_ids
); );
my @new_slave_nodes = grep { $self->is_cluster_node($_) } @new_slaves; my @new_slave_nodes = grep { $self->is_cluster_node($_) } @$new_slaves;
return @new_nodes, @new_slaves, my $slaves_of_slaves = $self->autodetect_nodes(
$self->autodetect_nodes( %args,
%args, nodes => \@new_slave_nodes,
nodes => \@new_slave_nodes, );
);
my @autodetected_nodes = ( @$new_nodes, @$new_slaves, @$slaves_of_slaves );
return \@autodetected_nodes;
} }
sub _d { sub _d {
@@ -4617,6 +4628,8 @@ sub get_slaves {
my $dp = $self->{DSNParser}; my $dp = $self->{DSNParser};
my $methods = $self->_resolve_recursion_methods($args{dsn}); my $methods = $self->_resolve_recursion_methods($args{dsn});
return $slaves unless @$methods;
if ( grep { m/processlist|hosts/i } @$methods ) { if ( grep { m/processlist|hosts/i } @$methods ) {
my @required_args = qw(dbh dsn); my @required_args = qw(dbh dsn);
foreach my $arg ( @required_args ) { foreach my $arg ( @required_args ) {
@@ -8591,11 +8604,22 @@ sub main {
} }
} }
my $autodiscover_cluster;
my $recursion_method = [];
foreach my $method ( @{$o->get('recursion-method')} ) {
if ( $method eq 'cluster' ) {
$autodiscover_cluster = 1;
}
else {
push @$recursion_method, $method
}
}
$o->set('recursion-method', $recursion_method);
eval { eval {
MasterSlave::check_recursion_method($o->get('recursion-method')); MasterSlave::check_recursion_method($o->get('recursion-method'));
}; };
if ( $EVAL_ERROR ) { if ( $EVAL_ERROR ) {
$o->save_error("Invalid --recursion-method: $EVAL_ERROR") $o->save_error($EVAL_ERROR)
} }
Pingback::validate_options($o); Pingback::validate_options($o);
@@ -8812,25 +8836,28 @@ sub main {
my %seen_ids; my %seen_ids;
for my $cxn ($master_cxn, @$slaves) { for my $cxn ($master_cxn, @$slaves) {
my $dbh = $cxn->dbh(); my $dbh = $cxn->dbh();
my $sql = q{SELECT @@SERVER_ID}; my $sql = q{SELECT @@server_id};
PTDEBUG && _d($cxn, $dbh, $sql); PTDEBUG && _d($cxn, $dbh, $sql);
my ($id) = $dbh->selectrow_array($sql); my ($id) = $dbh->selectrow_array($sql);
$seen_ids{$id}++; $seen_ids{$id}++;
} }
my $dsn = grep { /^dsn/i } @{$o->get('recursion-method')}; if ( $autodiscover_cluster ) {
if ( !$dsn && $o->get('autodetect-nodes') ) {
my @known_nodes = grep { $cluster_name_for{$_} } $master_cxn, @$slaves; my @known_nodes = grep { $cluster_name_for{$_} } $master_cxn, @$slaves;
push @$slaves, $cluster->autodetect_nodes( my $new_cxns = $cluster->autodetect_nodes(
nodes => \@known_nodes, nodes => \@known_nodes,
MasterSlave => $ms, MasterSlave => $ms,
DSNParser => $dp, DSNParser => $dp,
make_cxn => $make_cxn_cluster, make_cxn => $make_cxn_cluster,
seen_ids => \%seen_ids, seen_ids => \%seen_ids,
); );
push @$slaves, @$new_cxns;
} }
($master_cxn, @$slaves) = remove_duplicate_cxns( $master_cxn, @$slaves); my $trimmed_nodes = Cxn->remove_duplicate_cxns(
cxns => [ $master_cxn, @$slaves ],
);
($master_cxn, @$slaves) = @$trimmed_nodes;
PTDEBUG && _d(scalar @$slaves, 'slaves found'); PTDEBUG && _d(scalar @$slaves, 'slaves found');
@@ -9844,31 +9871,6 @@ sub main {
# ############################################################################ # ############################################################################
# Subroutines # Subroutines
# ############################################################################ # ############################################################################
sub remove_duplicate_cxns {
my (@cxns) = @_;
PTDEBUG && _d("Removing duplicates from ", join " ", map { $_->name } @cxns);
my @trimmed_cxns;
my %seen;
for my $cxn ( @cxns ) {
my $dbh = $cxn->dbh();
my $sql = q{SELECT @@SERVER_ID};
PTDEBUG && _d($sql);
my ($id) = $dbh->selectrow_array($sql);
PTDEBUG && _d('Server ID for ', $cxn->name, ': ', $id);
if ( ! $seen{$id}++ ) {
push @trimmed_cxns, $cxn
}
else {
PTDEBUG && _d("Removing ", $cxn->name,
", ID $id, because we've already seen it");
}
}
return @trimmed_cxns;
}
sub ts { sub ts {
my ($msg) = @_; my ($msg) = @_;
my ($s, $m, $h, $d, $M) = localtime; my ($s, $m, $h, $d, $M) = localtime;
@@ -11097,12 +11099,6 @@ group: Connection
Prompt for a password when connecting to MySQL. Prompt for a password when connecting to MySQL.
=item --[no]autodetect-nodes
default: yes
Try to automatically find other cluster nodes. TODO TODO.
=item --[no]check-binlog-format =item --[no]check-binlog-format
default: yes default: yes
@@ -11584,6 +11580,7 @@ Possible methods are:
=========== ================== =========== ==================
processlist SHOW PROCESSLIST processlist SHOW PROCESSLIST
hosts SHOW SLAVE HOSTS hosts SHOW SLAVE HOSTS
cluster SHOW STATUS LIKE 'wsrep\_incoming\_addresses'
dsn=DSN DSNs from a table dsn=DSN DSNs from a table
none Do not find slaves none Do not find slaves
@@ -11594,6 +11591,13 @@ the C<hosts> method becomes the default because it works better in this case.
The C<hosts> method requires replicas to be configured with C<report_host>, The C<hosts> method requires replicas to be configured with C<report_host>,
C<report_port>, etc. C<report_port>, etc.
The C<cluster> method requires a cluster based on Galera 23.7.3 or newer,
such as Percona XtraDB Cluster versions 5.5.29 and above. This will
autodiscover nodes in a cluster using C<SHOW STATUS LIKE 'wsrep\_incoming\_addresses'>.
Note that you can combine C<cluster> with C<processlist> and C<hosts> to
have the tool autodiscover all cluster nodes and traditional slaves,
but this is considered highly experimental.
The C<dsn> method is special: rather than automatically discovering replicas, The C<dsn> method is special: rather than automatically discovering replicas,
this method specifies a table with replica DSNs. The tool will only connect this method specifies a table with replica DSNs. The tool will only connect
to these replicas. This method works best when replicas do not use the same to these replicas. This method works best when replicas do not use the same

View File

@@ -195,6 +195,42 @@ sub name {
return $self->{hostname} || $self->{dsn_name} || 'unknown host'; return $self->{hostname} || $self->{dsn_name} || 'unknown host';
} }
# There's two reasons why there might be dupes:
# If the "master" is a cluster node, then a DSN table might have been
# used, and it may have all nodes' DSNs so the user can run the tool
# on any node, in which case it has the "master" node, the DSN given
# on the command line.
# On the other hand, maybe find_cluster_nodes worked, in which case
# we definitely have a dupe for the master cxn, but we may also have a
# dupe for every other node if this was unsed in conjunction with a
# DSN table.
# So try to detect and remove those.
sub remove_duplicate_cxns {
my ($self, %args) = @_;
my @cxns = @{$args{cxns}};
my $seen_ids = $args{seen_ids} || {};
PTDEBUG && _d("Removing duplicates from ", join(" ", map { $_->name } @cxns));
my @trimmed_cxns;
for my $cxn ( @cxns ) {
my $dbh = $cxn->dbh();
my $sql = q{SELECT @@server_id};
PTDEBUG && _d($sql);
my ($id) = $dbh->selectrow_array($sql);
PTDEBUG && _d('Server ID for ', $cxn->name, ': ', $id);
if ( ! $seen_ids->{$id}++ ) {
push @trimmed_cxns, $cxn
}
else {
PTDEBUG && _d("Removing ", $cxn->name,
", ID ", $id, ", because we've already seen it");
}
}
return \@trimmed_cxns;
}
sub DESTROY { sub DESTROY {
my ($self) = @_; my ($self) = @_;
if ( $self->{dbh} if ( $self->{dbh}

View File

@@ -73,6 +73,8 @@ sub get_slaves {
my $dp = $self->{DSNParser}; my $dp = $self->{DSNParser};
my $methods = $self->_resolve_recursion_methods($args{dsn}); my $methods = $self->_resolve_recursion_methods($args{dsn});
return $slaves unless @$methods;
if ( grep { m/processlist|hosts/i } @$methods ) { if ( grep { m/processlist|hosts/i } @$methods ) {
my @required_args = qw(dbh dsn); my @required_args = qw(dbh dsn);
foreach my $arg ( @required_args ) { foreach my $arg ( @required_args ) {
@@ -114,7 +116,6 @@ sub _resolve_recursion_methods {
my ($self, $dsn) = @_; my ($self, $dsn) = @_;
my $o = $self->{OptionParser}; my $o = $self->{OptionParser};
if ( $o->got('recursion-method') ) { if ( $o->got('recursion-method') ) {
# Use whatever the user explicitly gave on the command line.
return $o->get('recursion-method'); return $o->get('recursion-method');
} }
elsif ( $dsn && ($dsn->{P} || 3306) != 3306 ) { elsif ( $dsn && ($dsn->{P} || 3306) != 3306 ) {

View File

@@ -30,6 +30,8 @@ use constant PTDEBUG => $ENV{PTDEBUG} || 0;
use Mo; use Mo;
use Data::Dumper; use Data::Dumper;
{ local $EVAL_ERROR; eval { require Cxn } };
sub get_cluster_name { sub get_cluster_name {
my ($self, $cxn) = @_; my ($self, $cxn) = @_;
my $sql = "SHOW VARIABLES LIKE 'wsrep\_cluster\_name'"; my $sql = "SHOW VARIABLES LIKE 'wsrep\_cluster\_name'";
@@ -79,13 +81,13 @@ sub find_cluster_nodes {
# TODO this fails with a strange error. # TODO this fails with a strange error.
#$dp->fill_in_dsn($dbh, $dsn); #$dp->fill_in_dsn($dbh, $dsn);
my $sql = q{SHOW STATUS LIKE 'wsrep_incoming_addresses'}; my $sql = q{SHOW STATUS LIKE 'wsrep\_incoming\_addresses'};
PTDEBUG && _d($sql); PTDEBUG && _d($sql);
my (undef, $addresses) = $dbh->selectrow_array($sql); my (undef, $addresses) = $dbh->selectrow_array($sql);
PTDEBUG && _d("Cluster nodes found: ", $addresses); PTDEBUG && _d("Cluster nodes found: ", $addresses);
return unless $addresses; return unless $addresses;
my @addresses = grep !/\Aunspecified\z/i, my @addresses = grep { !/\Aunspecified\z/i }
split /,\s*/, $addresses; split /,\s*/, $addresses;
my @nodes; my @nodes;
@@ -117,44 +119,14 @@ sub find_cluster_nodes {
push @nodes, $make_cxn->(dsn => $node_dsn); push @nodes, $make_cxn->(dsn => $node_dsn);
} }
return @nodes; return \@nodes;
} }
# There's two reasons why there might be dupes:
# If the "master" is a cluster node, then a DSN table might have been
# used, and it may have all nodes' DSNs so the user can run the tool
# on any node, in which case it has the "master" node, the DSN given
# on the command line.
# On the other hand, maybe find_cluster_nodes worked, in which case
# we definitely have a dupe for the master cxn, but we may also have a
# dupe for every other node if this was unsed in conjunction with a
# DSN table.
# So try to detect and remove those.
sub remove_duplicate_cxns { sub remove_duplicate_cxns {
my ($self, %args) = @_; my ($self, %args) = @_;
my @cxns = @{$args{cxns}}; my @cxns = @{$args{cxns}};
my $seen_ids = $args{seen_ids}; my $seen_ids = $args{seen_ids};
PTDEBUG && _d("Removing duplicates from ", join " ", map { $_->name } @cxns); return Cxn->remove_duplicate_cxns(%args);
my @trimmed_cxns;
for my $cxn ( @cxns ) {
my $dbh = $cxn->dbh();
my $sql = q{SELECT @@SERVER_ID};
PTDEBUG && _d($sql);
my ($id) = $dbh->selectrow_array($sql);
PTDEBUG && _d('Server ID for ', $cxn->name, ': ', $id);
if ( ! $seen_ids->{$id}++ ) {
push @trimmed_cxns, $cxn
}
else {
PTDEBUG && _d("Removing ", $cxn->name,
", ID $id, because we've already seen it");
}
}
return @trimmed_cxns;
} }
sub same_cluster { sub same_cluster {
@@ -177,46 +149,51 @@ sub autodetect_nodes {
my $nodes = $args{nodes}; my $nodes = $args{nodes};
my $seen_ids = $args{seen_ids}; my $seen_ids = $args{seen_ids};
return unless @$nodes; my $new_nodes = [];
return $new_nodes unless @$nodes;
my @new_nodes;
for my $node ( @$nodes ) { for my $node ( @$nodes ) {
my @nodes = $self->find_cluster_nodes( my $nodes_found = $self->find_cluster_nodes(
dbh => $node->dbh(), dbh => $node->dbh(),
dsn => $node->dsn(), dsn => $node->dsn(),
make_cxn => $make_cxn, make_cxn => $make_cxn,
DSNParser => $dp, DSNParser => $dp,
); );
push @new_nodes, @nodes; push @$new_nodes, @$nodes_found;
} }
@new_nodes = $self->remove_duplicate_cxns( $new_nodes = $self->remove_duplicate_cxns(
cxns => \@new_nodes, cxns => $new_nodes,
seen_ids => $seen_ids seen_ids => $seen_ids
); );
my @new_slaves; my $new_slaves = [];
foreach my $node (@new_nodes) { foreach my $node (@$new_nodes) {
my $node_slaves = $ms->get_slaves( my $node_slaves = $ms->get_slaves(
dbh => $node->dbh(), dbh => $node->dbh(),
dsn => $node->dsn(), dsn => $node->dsn(),
make_cxn => $make_cxn, make_cxn => $make_cxn,
); );
push @new_slaves, @$node_slaves; push @$new_slaves, @$node_slaves;
} }
@new_slaves = $self->remove_duplicate_cxns( $new_slaves = $self->remove_duplicate_cxns(
cxns => \@new_slaves, cxns => $new_slaves,
seen_ids => $seen_ids seen_ids => $seen_ids
); );
my @new_slave_nodes = grep { $self->is_cluster_node($_) } @new_slaves; # If some of the new slaves is a cluster node, autodetect new nodes
# from there too.
my @new_slave_nodes = grep { $self->is_cluster_node($_) } @$new_slaves;
return @new_nodes, @new_slaves, my $slaves_of_slaves = $self->autodetect_nodes(
$self->autodetect_nodes( %args,
%args, nodes => \@new_slave_nodes,
nodes => \@new_slave_nodes, );
);
my @autodetected_nodes = ( @$new_nodes, @$new_slaves, @$slaves_of_slaves );
return \@autodetected_nodes;
} }
sub _d { sub _d {

View File

@@ -195,7 +195,8 @@ elif [ -x "$PERCONA_TOOLKIT_SANDBOX/libexec/mysqld" ]; then
else else
die "Cannot find executable mysqld in $PERCONA_TOOLKIT_SANDBOX/bin, $PERCONA_TOOLKIT_SANDBOX/sbin or $PERCONA_TOOLKIT_SANDBOX/libexec." die "Cannot find executable mysqld in $PERCONA_TOOLKIT_SANDBOX/bin, $PERCONA_TOOLKIT_SANDBOX/sbin or $PERCONA_TOOLKIT_SANDBOX/libexec."
fi fi
version=`$PERCONA_TOOLKIT_SANDBOX/$mysqld -V 2>/dev/null | awk '{print $3}' | cut -d. -f 1,2`; version="5.5"
#`$PERCONA_TOOLKIT_SANDBOX/$mysqld -V 2>/dev/null | awk '{print $3}' | cut -d. -f 1,2`;
if [ ! -d "$PERCONA_TOOLKIT_BRANCH/sandbox/servers/$version" ]; then if [ ! -d "$PERCONA_TOOLKIT_BRANCH/sandbox/servers/$version" ]; then
die "$PERCONA_TOOLKIT_BRANCH/sandbox/servers/$version does not exist." die "$PERCONA_TOOLKIT_BRANCH/sandbox/servers/$version does not exist."
fi fi

View File

@@ -76,7 +76,7 @@ $node1->do(qq/INSERT INTO dsns.dsns VALUES (1, 1, '$node1_dsn')/);
# if no other cluster nodes are detected, in which case the user # if no other cluster nodes are detected, in which case the user
# probably didn't specifying --recursion-method dsn. # probably didn't specifying --recursion-method dsn.
$output = output( $output = output(
sub { pt_table_checksum::main(@args, qw(--no-autodetect-nodes)) }, sub { pt_table_checksum::main(@args) },
stderr => 1, stderr => 1,
); );
@@ -87,8 +87,8 @@ like(
); );
for my $args ( for my $args (
["using recusion-method", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns"], ["using recusion-method=dsn", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns"],
["autodetecting everything"] ["using recursion-method=cluster", '--recursion-method', 'cluster']
) )
{ {
my $test = shift @$args; my $test = shift @$args;
@@ -150,8 +150,8 @@ is(
); );
for my $args ( for my $args (
["using recusion-method", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns"], ["using recusion-method=dsn", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns"],
["autodetecting everything"] ["using recursion-method=cluster", '--recursion-method', 'cluster']
) )
{ {
my $test = shift @$args; my $test = shift @$args;
@@ -216,11 +216,6 @@ $node2->do("set sql_log_bin=0");
$node2->do("update test.t set c='z' where c='zebra'"); $node2->do("update test.t set c='z' where c='zebra'");
$node2->do("set sql_log_bin=1"); $node2->do("set sql_log_bin=1");
# Wait for the slave to apply the binlogs from node1 (its master).
# Then change it so it's not consistent.
PerconaTest::wait_for_table($slave_dbh, 'test.t');
$sb->wait_for_slaves('cslave1');
$slave_dbh->do("update test.t set c='zebra' where c='z'");
# Another quick test first: the tool should complain about the slave's # Another quick test first: the tool should complain about the slave's
# binlog format but only the slave's, not the cluster nodes: # binlog format but only the slave's, not the cluster nodes:
@@ -228,11 +223,18 @@ $slave_dbh->do("update test.t set c='zebra' where c='z'");
# Cluster nodes default to ROW format because that's what Galeara # Cluster nodes default to ROW format because that's what Galeara
# works best with, even though it doesn't really use binlogs. # works best with, even though it doesn't really use binlogs.
for my $args ( for my $args (
["using recusion-method", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns"], ["using recusion-method=dsn", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns"],
["autodetecting everything"] ["using recursion-method=cluster,hosts", '--recursion-method', 'cluster,hosts']
) )
{ {
my $test = shift @$args; my $test = shift @$args;
# Wait for the slave to apply the binlogs from node1 (its master).
# Then change it so it's not consistent.
PerconaTest::wait_for_table($slave_dbh, 'test.t');
$sb->wait_for_slaves('cslave1');
$slave_dbh->do("update test.t set c='zebra' where c='z'");
$output = output( $output = output(
sub { pt_table_checksum::main(@args, sub { pt_table_checksum::main(@args,
@$args) @$args)
@@ -298,8 +300,8 @@ is(
); );
for my $args ( for my $args (
["using recusion-method", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns"], ["using recusion-method=dsn", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns"],
["autodetecting everything"] ["using recursion-method=cluster,hosts", '--recursion-method', 'cluster,hosts']
) )
{ {
my $test = shift @$args; my $test = shift @$args;
@@ -483,8 +485,8 @@ like(
# Originally, these tested a dsn table with all nodes; now we hijack # Originally, these tested a dsn table with all nodes; now we hijack
# those tests to also try the autodetection # those tests to also try the autodetection
for my $args ( for my $args (
["using recusion-method", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns"], ["using recusion-method=dsn", '--recursion-method', "dsn=$node1_dsn,D=dsns,t=dsns"],
["autodetecting everything"] ["using recursion-method=cluster,hosts", '--recursion-method', 'cluster,hosts']
) )
{ {
my $test = shift @$args; my $test = shift @$args;