Use InnoDB instead of MyISAM in basic_no_fks.sql. Make query_table.pl handle error on COMMIT.

This commit is contained in:
vagrant
2012-11-20 17:27:54 -08:00
parent 7a51df2c35
commit 537b36c73c
2 changed files with 58 additions and 31 deletions

View File

@@ -6,7 +6,7 @@ CREATE TABLE t (
c char(32),
d date,
unique index (c(32))
) ENGINE=MyISAM;
) ENGINE=InnoDB;
INSERT INTO pt_osc.t VALUES
(null, 'a', now()),
(null, 'b', now()),

View File

@@ -4,9 +4,12 @@ use strict;
use warnings FATAL => 'all';
use English qw(-no_match_vars);
use DBI;
use Time::HiRes qw(usleep time);
use Time::HiRes qw(sleep time);
use Test::More qw();
my ($host, $port, $db, $tbl, $pkcol, $stop_file, $pid_file, $sleep_time) = @ARGV;
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
my ($host, $port, $db, $tbl, $pkcol, $stop_file, $pid_file, $sleep) = @ARGV;
die "I need a pid_file argument" unless $pid_file;
open my $fh, '>', $pid_file or die $OS_ERROR;
@@ -19,63 +22,87 @@ my $dbh = DBI->connect(
{RaiseError => 1, AutoCommit => 0, ShowErrorStatement => 1, PrintError => 0},
);
my $sleep = ($sleep_time || 0.001) * 1_000_000;
$sleep ||= 0.001;
my $cnt = 0;
my @del;
my @upd;
my @ins;
my (@del, %del);
my (@upd, %upd);
my (@ins, %ins);
use constant TYPE_DELETE => 1;
use constant TYPE_UPDATE => 2;
my $start_xa = "START TRANSACTION /*!40108 WITH CONSISTENT SNAPSHOT */";
$dbh->do($start_xa);
sub commit {
eval {
$dbh->commit;
};
if ( $EVAL_ERROR ) {
Test::More::diag($EVAL_ERROR);
$dbh->do("ROLLBACK");
}
else {
map { $del{$_}++ } @del;
map { $ins{$_}++ } @ins;
map { $upd{$_}++ } @upd;
}
@del = ();
@ins = ();
@upd = ();
$cnt = 0;
}
for my $i ( 1..5_000 ) {
last if -f $stop_file;
my $id = 0;
my $type = '';
eval {
# We do roughly 25% DELETE, 25% UPDATE and 50% INSERT.
my $x = int(rand(5));
if ($x == 1) {
my $id = int(rand(500)) || 1;
my $type = int(rand(5));
if ($type == TYPE_DELETE) {
$id = int(rand(500)) || 1;
$dbh->do("delete from $db.$tbl where $pkcol=$id");
# To challenge the tool, we *do* (or can) delete the same id twice.
# But to keep the numbers straight, we only record each deleted
# id once.
push @del, $id unless grep { $_ == $id } @del;
push @del, $id;
}
elsif ($x == 2) {
my $id = int(rand(500)) || 1;
if ( !grep { $_ == $id } @del ) {
elsif ($type == TYPE_UPDATE) {
$id = int(rand(500)) || 1;
# Update a row if we haven't already deleted it.
if ( !$del{$id} ) {
my $t=time;
$dbh->do("update $db.$tbl set c='updated row $t' where $pkcol=$id");
push @upd, $id;
}
}
else {
my $id = 500 + $i;
my $t=time;
$id = 500 + $i;
my $t = time;
$dbh->do("insert ignore into $db.$tbl ($pkcol, c) values ($id, 'new row $t')");
push @ins, $id;
}
# COMMIT every N statements
if ( $cnt++ > 5 ) {
$dbh->do('COMMIT');
$cnt = 0;
usleep($sleep);
$dbh->do($start_xa);
}
};
if ( $EVAL_ERROR ) {
warn $EVAL_ERROR;
last;
Test::More::diag($EVAL_ERROR);
sleep $sleep;
}
# COMMIT every N statements. With PXC this can fail.
if ( $cnt++ > 5 ) {
commit();
sleep($sleep);
$dbh->do($start_xa);
}
}
$dbh->do('COMMIT');
commit();
$dbh->disconnect();
print "deleted:" . join(',', @del) . "\n";
print "updated:" . join(',', @upd) . "\n";
print "inserted:" . join(',', @ins) . "\n";
print "deleted:" . join(',', sort keys %del) . "\n";
print "updated:" . join(',', sort keys %upd) . "\n";
print "inserted:" . join(',', sort keys %ins) . "\n";
exit 0;