diff --git a/bin/pt-slave-restart b/bin/pt-slave-restart index dfcb552a..5ea55372 100755 --- a/bin/pt-slave-restart +++ b/bin/pt-slave-restart @@ -4693,9 +4693,6 @@ sub watch_server { . $o->get('skip-count')); my $start = $dbh->prepare($start_sql); my $stop = $dbh->prepare('STOP SLAVE'); - my $chmt = $dbh->prepare( - 'CHANGE MASTER TO MASTER_LOG_FILE=?, MASTER_LOG_POS=?'); - # ######################################################################## # Lookup tables of things to do when a problem is detected. @@ -4717,8 +4714,14 @@ sub watch_server { PTDEBUG && _d('Found relay log corruption'); # Can't do CHANGE MASTER TO with a running slave. $stop->execute(); - $chmt->execute( - @{$stat}{qw(relay_master_log_file exec_master_log_pos)}); + + # Cannot use ? placeholders for CHANGE MASTER values: + # https://bugs.launchpad.net/percona-toolkit/+bug/932614 + my $sql = "CHANGE MASTER TO " + . "MASTER_LOG_FILE='$stat->{relay_master_log_file}', " + . "MASTER_LOG_POS=$stat->{exec_master_log_pos}"; + PTDEBUG && _d($sql); + $dbh->do($sql); }, skip => sub { my ( $stat, $dbh ) = @_; @@ -4940,7 +4943,7 @@ pt-slave-restart - Watch and restart MySQL replication after errors. =head1 SYNOPSIS -Usage: pt-slave-restart [OPTION...] [DSN] +Usage: pt-slave-restart [OPTIONS] [DSN] pt-slave-restart watches one or more MySQL replication slaves for errors, and tries to restart replication if it stops. @@ -4957,9 +4960,6 @@ it is having problems with replication. Don't be too hasty to use it unless you need to. If you use this tool carelessly, you might miss the chance to really solve the slave server's problems. -At the time of this release there is a bug that causes an invalid -C statement to be executed. - The authoritative source for updated information is always the online issue tracking system. Issues that affect this tool will be marked as such. You can see a list of such issues at the following URL: @@ -4974,12 +4974,9 @@ statements that cause errors. It polls slaves intelligently with an exponentially varying sleep time. You can specify errors to skip and run the slaves until a certain binlog position. -Note: it has come to my attention that Yahoo! had or has an internal tool -called fix_repl, described to me by a past Yahoo! employee and mentioned in -the first edition of High Performance MySQL. Apparently this tool does the -same thing. Make no mistake, though: this is not a way to "fix replication." -In fact I would not even encourage its use on a regular basis; I use it only -when I have an error I know I just need to skip past. +Although this tool can help a slave advance past errors, you should not +rely on it to "fix" replication. If slave errors occur frequently or +unexpectedly, you should identify and fix the root cause. =head1 OUTPUT diff --git a/t/pt-slave-restart/pt-slave-restart.t b/t/pt-slave-restart/pt-slave-restart.t index 7e59ab47..b33e8aa0 100644 --- a/t/pt-slave-restart/pt-slave-restart.t +++ b/t/pt-slave-restart/pt-slave-restart.t @@ -26,23 +26,22 @@ if ( !$master_dbh ) { elsif ( !$slave_dbh ) { plan skip_all => 'Cannot connect to sandbox slave'; } -else { - plan tests => 15; -} -$sb->create_dbs($master_dbh, ['test']); +$master_dbh->do('DROP DATABASE IF EXISTS test'); +$master_dbh->do('CREATE DATABASE test'); $master_dbh->do('CREATE TABLE test.t (a INT)'); -my $i = 0; -PerconaTest::wait_for_table($slave_dbh, 'test.t'); +$sb->wait_for_slaves; # Bust replication $slave_dbh->do('DROP TABLE test.t'); $master_dbh->do('INSERT INTO test.t SELECT 1'); wait_until( sub { - ! $slave_dbh->selectrow_hashref('show slave status')->{slave_sql_running}; + my $row = $slave_dbh->selectrow_hashref('show slave status'); + return $row->{last_sql_errno}; } ); + my $r = $slave_dbh->selectrow_hashref('show slave status'); like($r->{last_error}, qr/Table 'test.t' doesn't exist'/, 'It is busted'); @@ -140,4 +139,4 @@ diag(`rm -f /tmp/pt-slave-re*`); $sb->wipe_clean($master_dbh); $sb->wipe_clean($slave_dbh); ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox"); -exit; +done_testing;