diff --git a/bin/pt-agent b/bin/pt-agent new file mode 100755 index 00000000..21b9c73d --- /dev/null +++ b/bin/pt-agent @@ -0,0 +1,9343 @@ +#!/usr/bin/env perl + +# This program is part of Percona Toolkit: http://www.percona.com/software/ +# See "COPYRIGHT, LICENSE, AND WARRANTY" at the end of this file for legal +# notices and disclaimers. + +use strict; +use warnings FATAL => 'all'; + +# This tool is "fat-packed": most of its dependent modules are embedded +# in this file. Setting %INC to this file for each module makes Perl aware +# of this so it will not try to load the module from @INC. See the tool's +# documentation for a full list of dependencies. +BEGIN { + $INC{$_} = __FILE__ for map { (my $pkg = "$_.pm") =~ s!::!/!g; $pkg } (qw( + Percona::Toolkit + Lmo::Utils + Lmo::Meta + Lmo::Object + Lmo::Types + Lmo + Percona::WebAPI::Representation + Percona::WebAPI::Client + Percona::WebAPI::Exception::Request + Percona::WebAPI::Exception::Resource + Percona::WebAPI::Resource::Agent + Percona::WebAPI::Resource::Config + Percona::WebAPI::Resource::Service + Percona::WebAPI::Resource::Task + Percona::WebAPI::Resource::LogEntry + VersionCheck + DSNParser + OptionParser + Cxn + Quoter + VersionParser + Daemon + Transformers + Safeguards + Percona::Agent::Logger + )); +} + +# ########################################################################### +# Percona::Toolkit package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Percona/Toolkit.pm +# t/lib/Percona/Toolkit.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Percona::Toolkit; + +our $VERSION = '2.2.2'; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Carp qw(carp cluck); +use Data::Dumper qw(); + +require Exporter; +our @ISA = qw(Exporter); +our @EXPORT_OK = qw( + have_required_args + Dumper + _d +); + +sub have_required_args { + my ($args, @required_args) = @_; + my $have_required_args = 1; + foreach my $arg ( @required_args ) { + if ( !defined $args->{$arg} ) { + $have_required_args = 0; + carp "Argument $arg is not defined"; + } + } + cluck unless $have_required_args; # print backtrace + return $have_required_args; +} + +sub Dumper { + local $Data::Dumper::Indent = 1; + local $Data::Dumper::Sortkeys = 1; + local $Data::Dumper::Quotekeys = 0; + Data::Dumper::Dumper(@_); +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Percona::Toolkit package +# ########################################################################### + +# ########################################################################### +# Lmo::Utils package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Lmo/Utils.pm +# t/lib/Lmo/Utils.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Lmo::Utils; + +use strict; +use warnings qw( FATAL all ); +require Exporter; +our (@ISA, @EXPORT, @EXPORT_OK); + +BEGIN { + @ISA = qw(Exporter); + @EXPORT = @EXPORT_OK = qw( + _install_coderef + _unimport_coderefs + _glob_for + _stash_for + ); +} + +{ + no strict 'refs'; + sub _glob_for { + return \*{shift()} + } + + sub _stash_for { + return \%{ shift() . "::" }; + } +} + +sub _install_coderef { + my ($to, $code) = @_; + + return *{ _glob_for $to } = $code; +} + +sub _unimport_coderefs { + my ($target, @names) = @_; + return unless @names; + my $stash = _stash_for($target); + foreach my $name (@names) { + if ($stash->{$name} and defined(&{$stash->{$name}})) { + delete $stash->{$name}; + } + } +} + +1; +} +# ########################################################################### +# End Lmo::Utils package +# ########################################################################### + +# ########################################################################### +# Lmo::Meta package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Lmo/Meta.pm +# t/lib/Lmo/Meta.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Lmo::Meta; +use strict; +use warnings qw( FATAL all ); + +my %metadata_for; + +sub new { + my $class = shift; + return bless { @_ }, $class +} + +sub metadata_for { + my $self = shift; + my ($class) = @_; + + return $metadata_for{$class} ||= {}; +} + +sub class { shift->{class} } + +sub attributes { + my $self = shift; + return keys %{$self->metadata_for($self->class)} +} + +sub attributes_for_new { + my $self = shift; + my @attributes; + + my $class_metadata = $self->metadata_for($self->class); + while ( my ($attr, $meta) = each %$class_metadata ) { + if ( exists $meta->{init_arg} ) { + push @attributes, $meta->{init_arg} + if defined $meta->{init_arg}; + } + else { + push @attributes, $attr; + } + } + return @attributes; +} + +1; +} +# ########################################################################### +# End Lmo::Meta package +# ########################################################################### + +# ########################################################################### +# Lmo::Object package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Lmo/Object.pm +# t/lib/Lmo/Object.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Lmo::Object; + +use strict; +use warnings qw( FATAL all ); + +use Carp (); +use Scalar::Util qw(blessed); + +use Lmo::Meta; +use Lmo::Utils qw(_glob_for); + +sub new { + my $class = shift; + my $args = $class->BUILDARGS(@_); + + my $class_metadata = Lmo::Meta->metadata_for($class); + + my @args_to_delete; + while ( my ($attr, $meta) = each %$class_metadata ) { + next unless exists $meta->{init_arg}; + my $init_arg = $meta->{init_arg}; + + if ( defined $init_arg ) { + $args->{$attr} = delete $args->{$init_arg}; + } + else { + push @args_to_delete, $attr; + } + } + + delete $args->{$_} for @args_to_delete; + + for my $attribute ( keys %$args ) { + if ( my $coerce = $class_metadata->{$attribute}{coerce} ) { + $args->{$attribute} = $coerce->($args->{$attribute}); + } + if ( my $isa_check = $class_metadata->{$attribute}{isa} ) { + my ($check_name, $check_sub) = @$isa_check; + $check_sub->($args->{$attribute}); + } + } + + while ( my ($attribute, $meta) = each %$class_metadata ) { + next unless $meta->{required}; + Carp::confess("Attribute ($attribute) is required for $class") + if ! exists $args->{$attribute} + } + + my $self = bless $args, $class; + + my @build_subs; + my $linearized_isa = mro::get_linear_isa($class); + + for my $isa_class ( @$linearized_isa ) { + unshift @build_subs, *{ _glob_for "${isa_class}::BUILD" }{CODE}; + } + my @args = %$args; + for my $sub (grep { defined($_) && exists &$_ } @build_subs) { + $sub->( $self, @args); + } + return $self; +} + +sub BUILDARGS { + shift; # No need for the classname + if ( @_ == 1 && ref($_[0]) ) { + Carp::confess("Single parameters to new() must be a HASH ref, not $_[0]") + unless ref($_[0]) eq ref({}); + return {%{$_[0]}} # We want a new reference, always + } + else { + return { @_ }; + } +} + +sub meta { + my $class = shift; + $class = Scalar::Util::blessed($class) || $class; + return Lmo::Meta->new(class => $class); +} + +1; +} +# ########################################################################### +# End Lmo::Object package +# ########################################################################### + +# ########################################################################### +# Lmo::Types package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Lmo/Types.pm +# t/lib/Lmo/Types.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Lmo::Types; + +use strict; +use warnings qw( FATAL all ); + +use Carp (); +use Scalar::Util qw(looks_like_number blessed); + + +our %TYPES = ( + Bool => sub { !$_[0] || (defined $_[0] && looks_like_number($_[0]) && $_[0] == 1) }, + Num => sub { defined $_[0] && looks_like_number($_[0]) }, + Int => sub { defined $_[0] && looks_like_number($_[0]) && $_[0] == int($_[0]) }, + Str => sub { defined $_[0] }, + Object => sub { defined $_[0] && blessed($_[0]) }, + FileHandle => sub { local $@; require IO::Handle; fileno($_[0]) && $_[0]->opened }, + + map { + my $type = /R/ ? $_ : uc $_; + $_ . "Ref" => sub { ref $_[0] eq $type } + } qw(Array Code Hash Regexp Glob Scalar) +); + +sub check_type_constaints { + my ($attribute, $type_check, $check_name, $val) = @_; + ( ref($type_check) eq 'CODE' + ? $type_check->($val) + : (ref $val eq $type_check + || ($val && $val eq $type_check) + || (exists $TYPES{$type_check} && $TYPES{$type_check}->($val))) + ) + || Carp::confess( + qq + . qq + . (defined $val ? Lmo::Dumper($val) : 'undef') ) +} + +sub _nested_constraints { + my ($attribute, $aggregate_type, $type) = @_; + + my $inner_types; + if ( $type =~ /\A(ArrayRef|Maybe)\[(.*)\]\z/ ) { + $inner_types = _nested_constraints($1, $2); + } + else { + $inner_types = $TYPES{$type}; + } + + if ( $aggregate_type eq 'ArrayRef' ) { + return sub { + my ($val) = @_; + return unless ref($val) eq ref([]); + + if ($inner_types) { + for my $value ( @{$val} ) { + return unless $inner_types->($value) + } + } + else { + for my $value ( @{$val} ) { + return unless $value && ($value eq $type + || (Scalar::Util::blessed($value) && $value->isa($type))); + } + } + return 1; + }; + } + elsif ( $aggregate_type eq 'Maybe' ) { + return sub { + my ($value) = @_; + return 1 if ! defined($value); + if ($inner_types) { + return unless $inner_types->($value) + } + else { + return unless $value eq $type + || (Scalar::Util::blessed($value) && $value->isa($type)); + } + return 1; + } + } + else { + Carp::confess("Nested aggregate types are only implemented for ArrayRefs and Maybe"); + } +} + +1; +} +# ########################################################################### +# End Lmo::Types package +# ########################################################################### + +# ########################################################################### +# Lmo package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Lmo.pm +# t/lib/Lmo.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +BEGIN { +$INC{"Lmo.pm"} = __FILE__; +package Lmo; +our $VERSION = '0.30_Percona'; # Forked from 0.30 of Mo. + + +use strict; +use warnings qw( FATAL all ); + +use Carp (); +use Scalar::Util qw(looks_like_number blessed); + +use Lmo::Meta; +use Lmo::Object; +use Lmo::Types; + +use Lmo::Utils; + +my %export_for; +sub import { + warnings->import(qw(FATAL all)); + strict->import(); + + my $caller = scalar caller(); # Caller's package + my %exports = ( + extends => \&extends, + has => \&has, + with => \&with, + override => \&override, + confess => \&Carp::confess, + ); + + $export_for{$caller} = \%exports; + + for my $keyword ( keys %exports ) { + _install_coderef "${caller}::$keyword" => $exports{$keyword}; + } + + if ( !@{ *{ _glob_for "${caller}::ISA" }{ARRAY} || [] } ) { + @_ = "Lmo::Object"; + goto *{ _glob_for "${caller}::extends" }{CODE}; + } +} + +sub extends { + my $caller = scalar caller(); + for my $class ( @_ ) { + _load_module($class); + } + _set_package_isa($caller, @_); + _set_inherited_metadata($caller); +} + +sub _load_module { + my ($class) = @_; + + (my $file = $class) =~ s{::|'}{/}g; + $file .= '.pm'; + { local $@; eval { require "$file" } } # or warn $@; + return; +} + +sub with { + my $package = scalar caller(); + require Role::Tiny; + for my $role ( @_ ) { + _load_module($role); + _role_attribute_metadata($package, $role); + } + Role::Tiny->apply_roles_to_package($package, @_); +} + +sub _role_attribute_metadata { + my ($package, $role) = @_; + + my $package_meta = Lmo::Meta->metadata_for($package); + my $role_meta = Lmo::Meta->metadata_for($role); + + %$package_meta = (%$role_meta, %$package_meta); +} + +sub has { + my $names = shift; + my $caller = scalar caller(); + + my $class_metadata = Lmo::Meta->metadata_for($caller); + + for my $attribute ( ref $names ? @$names : $names ) { + my %args = @_; + my $method = ($args{is} || '') eq 'ro' + ? sub { + Carp::confess("Cannot assign a value to a read-only accessor at reader ${caller}::${attribute}") + if $#_; + return $_[0]{$attribute}; + } + : sub { + return $#_ + ? $_[0]{$attribute} = $_[1] + : $_[0]{$attribute}; + }; + + $class_metadata->{$attribute} = (); + + if ( my $type_check = $args{isa} ) { + my $check_name = $type_check; + + if ( my ($aggregate_type, $inner_type) = $type_check =~ /\A(ArrayRef|Maybe)\[(.*)\]\z/ ) { + $type_check = Lmo::Types::_nested_constraints($attribute, $aggregate_type, $inner_type); + } + + my $check_sub = sub { + my ($new_val) = @_; + Lmo::Types::check_type_constaints($attribute, $type_check, $check_name, $new_val); + }; + + $class_metadata->{$attribute}{isa} = [$check_name, $check_sub]; + my $orig_method = $method; + $method = sub { + $check_sub->($_[1]) if $#_; + goto &$orig_method; + }; + } + + if ( my $builder = $args{builder} ) { + my $original_method = $method; + $method = sub { + $#_ + ? goto &$original_method + : ! exists $_[0]{$attribute} + ? $_[0]{$attribute} = $_[0]->$builder + : goto &$original_method + }; + } + + if ( my $code = $args{default} ) { + Carp::confess("${caller}::${attribute}'s default is $code, but should be a coderef") + unless ref($code) eq 'CODE'; + my $original_method = $method; + $method = sub { + $#_ + ? goto &$original_method + : ! exists $_[0]{$attribute} + ? $_[0]{$attribute} = $_[0]->$code + : goto &$original_method + }; + } + + if ( my $role = $args{does} ) { + my $original_method = $method; + $method = sub { + if ( $#_ ) { + Carp::confess(qq) + unless Scalar::Util::blessed($_[1]) && eval { $_[1]->does($role) } + } + goto &$original_method + }; + } + + if ( my $coercion = $args{coerce} ) { + $class_metadata->{$attribute}{coerce} = $coercion; + my $original_method = $method; + $method = sub { + if ( $#_ ) { + return $original_method->($_[0], $coercion->($_[1])) + } + goto &$original_method; + } + } + + _install_coderef "${caller}::$attribute" => $method; + + if ( $args{required} ) { + $class_metadata->{$attribute}{required} = 1; + } + + if ($args{clearer}) { + _install_coderef "${caller}::$args{clearer}" + => sub { delete shift->{$attribute} } + } + + if ($args{predicate}) { + _install_coderef "${caller}::$args{predicate}" + => sub { exists shift->{$attribute} } + } + + if ($args{handles}) { + _has_handles($caller, $attribute, \%args); + } + + if (exists $args{init_arg}) { + $class_metadata->{$attribute}{init_arg} = $args{init_arg}; + } + } +} + +sub _has_handles { + my ($caller, $attribute, $args) = @_; + my $handles = $args->{handles}; + + my $ref = ref $handles; + my $kv; + if ( $ref eq ref [] ) { + $kv = { map { $_,$_ } @{$handles} }; + } + elsif ( $ref eq ref {} ) { + $kv = $handles; + } + elsif ( $ref eq ref qr// ) { + Carp::confess("Cannot delegate methods based on a Regexp without a type constraint (isa)") + unless $args->{isa}; + my $target_class = $args->{isa}; + $kv = { + map { $_, $_ } + grep { $_ =~ $handles } + grep { !exists $Lmo::Object::{$_} && $target_class->can($_) } + grep { !$export_for{$target_class}->{$_} } + keys %{ _stash_for $target_class } + }; + } + else { + Carp::confess("handles for $ref not yet implemented"); + } + + while ( my ($method, $target) = each %{$kv} ) { + my $name = _glob_for "${caller}::$method"; + Carp::confess("You cannot overwrite a locally defined method ($method) with a delegation") + if defined &$name; + + my ($target, @curried_args) = ref($target) ? @$target : $target; + *$name = sub { + my $self = shift; + my $delegate_to = $self->$attribute(); + my $error = "Cannot delegate $method to $target because the value of $attribute"; + Carp::confess("$error is not defined") unless $delegate_to; + Carp::confess("$error is not an object (got '$delegate_to')") + unless Scalar::Util::blessed($delegate_to) || (!ref($delegate_to) && $delegate_to->can($target)); + return $delegate_to->$target(@curried_args, @_); + } + } +} + +sub _set_package_isa { + my ($package, @new_isa) = @_; + my $package_isa = \*{ _glob_for "${package}::ISA" }; + @{*$package_isa} = @new_isa; +} + +sub _set_inherited_metadata { + my $class = shift; + my $class_metadata = Lmo::Meta->metadata_for($class); + my $linearized_isa = mro::get_linear_isa($class); + my %new_metadata; + + for my $isa_class (reverse @$linearized_isa) { + my $isa_metadata = Lmo::Meta->metadata_for($isa_class); + %new_metadata = ( + %new_metadata, + %$isa_metadata, + ); + } + %$class_metadata = %new_metadata; +} + +sub unimport { + my $caller = scalar caller(); + my $target = caller; + _unimport_coderefs($target, keys %{$export_for{$caller}}); +} + +sub Dumper { + require Data::Dumper; + local $Data::Dumper::Indent = 0; + local $Data::Dumper::Sortkeys = 0; + local $Data::Dumper::Quotekeys = 0; + local $Data::Dumper::Terse = 1; + + Data::Dumper::Dumper(@_) +} + +BEGIN { + if ($] >= 5.010) { + { local $@; require mro; } + } + else { + local $@; + eval { + require MRO::Compat; + } or do { + *mro::get_linear_isa = *mro::get_linear_isa_dfs = sub { + no strict 'refs'; + + my $classname = shift; + + my @lin = ($classname); + my %stored; + foreach my $parent (@{"$classname\::ISA"}) { + my $plin = mro::get_linear_isa_dfs($parent); + foreach (@$plin) { + next if exists $stored{$_}; + push(@lin, $_); + $stored{$_} = 1; + } + } + return \@lin; + }; + } + } +} + +sub override { + my ($methods, $code) = @_; + my $caller = scalar caller; + + for my $method ( ref($methods) ? @$methods : $methods ) { + my $full_method = "${caller}::${method}"; + *{_glob_for $full_method} = $code; + } +} + +} +1; +} +# ########################################################################### +# End Lmo package +# ########################################################################### + +# ########################################################################### +# Percona::WebAPI::Representation package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Percona/WebAPI/Representation.pm +# t/lib/Percona/WebAPI/Representation.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Percona::WebAPI::Representation; + +eval { + require JSON; +}; + +require Exporter; +our @ISA = qw(Exporter); +our @EXPORT_OK = qw( + as_hashref + as_json + as_config +); + +sub as_hashref { + my ($resource, %args) = @_; + + my $as_hashref = { %$resource }; + + if ( !defined $args{with_links} || !$args{with_links} ) { + delete $as_hashref->{links}; + } + + return $as_hashref; +} + +sub as_json { + my ($resource, %args) = @_; + + my $json = $args{json} || JSON->new; + $json->allow_blessed([]); + $json->convert_blessed([]); + + my $text = $json->encode( + ref $resource eq 'ARRAY' ? $resource : as_hashref($resource, %args) + ); + if ( $args{json} && $text ) { # for testing + chomp($text); + $text .= "\n"; + } + return $text; +} + +sub as_config { + my $resource = shift; + if ( !$resource->isa('Percona::WebAPI::Resource::Config') ) { + die "Only Config resources can be represented as config.\n"; + } + my $as_hashref = as_hashref($resource); + my $options = $as_hashref->{options}; + my $config = join("\n", + map { defined $options->{$_} ? "$_=$options->{$_}" : "$_" } + sort keys %$options + ) . "\n"; + return $config; +} + +1; +} +# ########################################################################### +# End Percona::WebAPI::Representation package +# ########################################################################### + +# ########################################################################### +# Percona::WebAPI::Client package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Percona/WebAPI/Client.pm +# t/lib/Percona/WebAPI/Client.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Percona::WebAPI::Client; + +our $VERSION = '0.01'; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +eval { + require LWP; + require JSON; +}; + +use Scalar::Util qw(blessed); + +use Lmo; +use Percona::Toolkit; +use Percona::WebAPI::Representation; +use Percona::WebAPI::Exception::Request; +use Percona::WebAPI::Exception::Resource; + +Percona::WebAPI::Representation->import(qw(as_json)); +Percona::Toolkit->import(qw(_d Dumper have_required_args)); + +has 'api_key' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'entry_link' => ( + is => 'rw', + isa => 'Str', + required => 0, + default => sub { return 'https://cloud-api.percona.com' }, +); + +has 'ua' => ( + is => 'rw', + isa => 'Object', + lazy => 1, + required => 0, + builder => '_build_ua', +); + +has 'response' => ( + is => 'rw', + isa => 'Object', + required => 0, + default => undef, +); + +sub _build_ua { + my $self = shift; + my $ua = LWP::UserAgent->new; + $ua->agent("Percona::WebAPI::Client/$Percona::WebAPI::Client::VERSION"); + $ua->default_header('Content-Type', 'application/json'); + $ua->default_header('X-Percona-API-Key', $self->api_key); + return $ua; +} + +sub get { + my ($self, %args) = @_; + + have_required_args(\%args, qw( + link + )) or die; + my ($link) = $args{link}; + + eval { + $self->_request( + method => 'GET', + link => $link, + ); + }; + if ( my $e = $EVAL_ERROR ) { + if (blessed($e) && $e->isa('Percona::WebAPI::Exception::Request')) { + die $e; + } + else { + die "Unknown error: $e"; + } + } + + my $resource = eval { + JSON::decode_json($self->response->content); + }; + if ( $EVAL_ERROR ) { + warn sprintf "Error decoding resource: %s: %s", + $self->response->content, + $EVAL_ERROR; + return; + } + + my $resource_objects; + if ( my $type = $self->response->headers->{'x-percona-resource-type'} ) { + eval { + $type = "Percona::WebAPI::Resource::$type"; + if ( ref $resource eq 'ARRAY' ) { + PTDEBUG && _d('Got a list of', $type, 'resources'); + $resource_objects = []; + foreach my $attribs ( @$resource ) { + my $obj = $type->new(%$attribs); + push @$resource_objects, $obj; + } + } + else { + PTDEBUG && _d('Got a', $type, 'resource', Dumper($resource)); + $resource_objects = $type->new(%$resource); + } + }; + if ( my $e = $EVAL_ERROR ) { + die Percona::WebAPI::Exception::Resource->new( + type => $type, + link => $link, + data => (ref $resource eq 'ARRAY' ? $resource : [ $resource ]), + error => $e, + ); + } + } + elsif ( exists $resource->{links} ) { + $resource_objects = $resource->{links}; + } + else { + warn "Did not get X-Percona-Resource-Type or links from $link\n"; + } + + return $resource_objects; +} + +sub post { + my $self = shift; + $self->_set( + @_, + method => 'POST', + ); + return $self->response->header('Location'); +} + +sub put { + my $self = shift; + $self->_set( + @_, + method => 'PUT', + ); + return $self->response->header('Location'); +} + +sub delete { + my ($self, %args) = @_; + have_required_args(\%args, qw( + link + )) or die; + my ($link) = $args{link}; + + eval { + $self->_request( + method => 'DELETE', + link => $link, + headers => { 'Content-Length' => 0 }, + ); + }; + if ( my $e = $EVAL_ERROR ) { + if (blessed($e) && $e->isa('Percona::WebAPI::Exception::Request')) { + die $e; + } + else { + die "Unknown error: $e"; + } + } + + return; +} + +sub _set { + my ($self, %args) = @_; + have_required_args(\%args, qw( + method + resources + link + )) or die; + my $method = $args{method}; + my $res = $args{resources}; + my $link = $args{link}; + + my $headers = $args{headers}; + + my $content = ''; + if ( ref($res) eq 'ARRAY' ) { + PTDEBUG && _d('List of resources'); + $content = '[' . join(",\n", map { as_json($_) } @$res) . ']'; + } + elsif ( ref($res) ) { + PTDEBUG && _d('Resource object'); + $content = as_json($res); + } + elsif ( $res !~ m/\n/ && -f $res ) { + PTDEBUG && _d('List of resources in file', $res); + $content = '['; + my $data = do { + local $INPUT_RECORD_SEPARATOR = undef; + open my $fh, '<', $res + or die "Error opening $res: $OS_ERROR"; + <$fh>; + }; + $data =~ s/,?\s*$/]/; + $content .= $data; + } + else { + PTDEBUG && _d('Resource text'); + $content = $res; + } + + eval { + $self->_request( + method => $method, + link => $link, + content => $content, + headers => $headers, + ); + }; + if ( my $e = $EVAL_ERROR ) { + if (blessed($e) && $e->isa('Percona::WebAPI::Exception::Request')) { + die $e; + } + else { + die "Unknown error: $e"; + } + } + + return; +} + +sub _request { + my ($self, %args) = @_; + + have_required_args(\%args, qw( + method + link + )) or die; + my $method = $args{method}; + my $link = $args{link}; + + my $content = $args{content}; + my $headers = $args{headers}; + + my $req = HTTP::Request->new($method => $link); + if ( $content ) { + $req->content($content); + } + if ( $headers ) { + map { $req->header($_ => $headers->{$_}) } keys %$headers; + } + PTDEBUG && _d('Request', $method, $link, Dumper($req)); + + my $response = $self->ua->request($req); + PTDEBUG && _d('Response', Dumper($response)); + + $self->response($response); + + if ( !($response->code >= 200 && $response->code < 400) ) { + die Percona::WebAPI::Exception::Request->new( + method => $method, + url => $link, + content => $content, + status => $response->code, + error => "Failed to $method $link", + ); + } + + return; +} + +no Lmo; +1; +} +# ########################################################################### +# End Percona::WebAPI::Client package +# ########################################################################### + +# ########################################################################### +# Percona::WebAPI::Exception::Request package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Percona/WebAPI/Exception/Request.pm +# t/lib/Percona/WebAPI/Exception/Request.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Percona::WebAPI::Exception::Request; + +use Lmo; +use overload '""' => \&as_string; + +has 'method' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'url' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'content' => ( + is => 'ro', + isa => 'Maybe[Str]', + required => 0, +); + +has 'status' => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has 'error' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +sub as_string { + my $self = shift; + chomp(my $error = $self->error); + $error =~ s/\n/ /g; + return sprintf "%s\nRequest: %s %s %s\nStatus: %d\n", + $error, $self->method, $self->url, $self->content || '', $self->status; +} + +no Lmo; +1; +} +# ########################################################################### +# End Percona::WebAPI::Exception::Request package +# ########################################################################### + +# ########################################################################### +# Percona::WebAPI::Exception::Resource package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Percona/WebAPI/Exception/Resource.pm +# t/lib/Percona/WebAPI/Exception/Resource.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Percona::WebAPI::Exception::Resource; + +use Lmo; +use overload '""' => \&as_string; +use Data::Dumper; + +has 'type' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'link' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'data' => ( + is => 'ro', + isa => 'ArrayRef', + required => 1, +); + +has 'error' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +sub as_string { + my $self = shift; + chomp(my $error = $self->error); + local $Data::Dumper::Indent = 1; + local $Data::Dumper::Sortkeys = 1; + local $Data::Dumper::Quotekeys = 0; + return sprintf "Invalid %s resource from %s:\n\n%s\nError: %s\n\n", + $self->type, $self->link, Dumper($self->data), $error; +} + +no Lmo; +1; +} +# ########################################################################### +# End Percona::WebAPI::Exception::Resource package +# ########################################################################### + +# ########################################################################### +# Percona::WebAPI::Resource::Agent package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Percona/WebAPI/Resource/Agent.pm +# t/lib/Percona/WebAPI/Resource/Agent.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Percona::WebAPI::Resource::Agent; + +use Lmo; + +has 'uuid' => ( + is => 'ro', + isa => 'Str', + required => 0, +); + +has 'username' => ( + is => 'rw', + isa => 'Str', + required => 0, + default => sub { return $ENV{USER} || $ENV{LOGNAME} }, +); + +has 'hostname' => ( + is => 'rw', + isa => 'Str', + required => 0, + default => sub { + chomp(my $hostname = `hostname`); + return $hostname; + }, +); + +has 'alias' => ( + is => 'rw', + isa => 'Str', + required => 0, +); + +has 'versions' => ( + is => 'rw', + isa => 'Maybe[HashRef]', + required => 0, +); + +has 'links' => ( + is => 'rw', + isa => 'Maybe[HashRef]', + required => 0, + default => sub { return {} }, +); + +sub name { + my ($self) = @_; + return $self->alias || $self->hostname || $self->uuid || 'Unknown'; +} + +no Lmo; +1; +} +# ########################################################################### +# End Percona::WebAPI::Resource::Agent package +# ########################################################################### + +# ########################################################################### +# Percona::WebAPI::Resource::Config package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Percona/WebAPI/Resource/Config.pm +# t/lib/Percona/WebAPI/Resource/Config.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Percona::WebAPI::Resource::Config; + +use Lmo; + +has 'ts' => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has 'name' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'options' => ( + is => 'ro', + isa => 'HashRef', + required => 1, +); + +has 'links' => ( + is => 'rw', + isa => 'Maybe[HashRef]', + required => 0, + default => sub { return {} }, +); + +no Lmo; +1; +} +# ########################################################################### +# End Percona::WebAPI::Resource::Config package +# ########################################################################### + +# ########################################################################### +# Percona::WebAPI::Resource::Service package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Percona/WebAPI/Resource/Service.pm +# t/lib/Percona/WebAPI/Resource/Service.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Percona::WebAPI::Resource::Service; + +use Lmo; + +has 'ts' => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has 'name' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'tasks' => ( + is => 'ro', + isa => 'ArrayRef[Percona::WebAPI::Resource::Task]', + required => 1, +); + +has 'run_schedule' => ( + is => 'ro', + isa => 'Str', + required => 0, +); + +has 'spool_schedule' => ( + is => 'ro', + isa => 'Str', + required => 0, +); + +has 'meta' => ( + is => 'ro', + isa => 'Bool', + required => 0, + default => sub { return 0 }, +); + +has 'run_once' => ( + is => 'ro', + isa => 'Bool', + required => 0, + default => sub { return 0 }, +); + +has 'links' => ( + is => 'rw', + isa => 'Maybe[HashRef]', + required => 0, + default => sub { return {} }, +); + +sub BUILDARGS { + my ($class, %args) = @_; + if ( ref $args{tasks} eq 'ARRAY' ) { + my @tasks; + foreach my $run_hashref ( @{$args{tasks}} ) { + my $task = Percona::WebAPI::Resource::Task->new(%$run_hashref); + push @tasks, $task; + } + $args{tasks} = \@tasks; + } + return $class->SUPER::BUILDARGS(%args); +} + +no Lmo; +1; +} +# ########################################################################### +# End Percona::WebAPI::Resource::Service package +# ########################################################################### + +# ########################################################################### +# Percona::WebAPI::Resource::Task package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Percona/WebAPI/Resource/Task.pm +# t/lib/Percona/WebAPI/Resource/Task.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Percona::WebAPI::Resource::Task; + +use Lmo; + +has 'name' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'number' => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has 'program' => ( + is => 'ro', + isa => 'Maybe[Str]', + required => 0, +); + +has 'query' => ( + is => 'ro', + isa => 'Maybe[Str]', + required => 0, +); + +has 'output' => ( + is => 'ro', + isa => 'Maybe[Str]', + required => 0, +); + +sub TO_JSON { return { %{ shift() } }; } + +no Lmo; +1; +} +# ########################################################################### +# End Percona::WebAPI::Resource::Task package +# ########################################################################### + +# ########################################################################### +# Percona::WebAPI::Resource::LogEntry package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Percona/WebAPI/Resource/LogEntry.pm +# t/lib/Percona/WebAPI/Resource/LogEntry.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Percona::WebAPI::Resource::LogEntry; + +use Lmo; + +has 'pid' => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has 'service' => ( + is => 'ro', + isa => 'Str', + required => 0, +); + +has 'data_ts' => ( + is => 'ro', + isa => 'Int', + required => 0, +); + +has 'entry_ts' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'log_level' => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has 'message' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +no Lmo; +1; +} +# ########################################################################### +# End Percona::WebAPI::Resource::LogEntry package +# ########################################################################### + +# ########################################################################### +# VersionCheck package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/VersionCheck.pm +# t/lib/VersionCheck.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package VersionCheck; + + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +local $Data::Dumper::Indent = 1; +local $Data::Dumper::Sortkeys = 1; +local $Data::Dumper::Quotekeys = 0; + +use Digest::MD5 qw(md5_hex); +use Sys::Hostname qw(hostname); +use File::Basename qw(); +use File::Spec; +use FindBin qw(); + +eval { + require Percona::Toolkit; + require HTTPMicro; +}; + +{ + my $file = 'percona-version-check'; + my $home = $ENV{HOME} || $ENV{HOMEPATH} || $ENV{USERPROFILE} || '.'; + my @vc_dirs = ( + '/etc/percona', + '/etc/percona-toolkit', + '/tmp', + "$home", + ); + + sub version_check_file { + foreach my $dir ( @vc_dirs ) { + if ( -d $dir && -w $dir ) { + PTDEBUG && _d('Version check file', $file, 'in', $dir); + return $dir . '/' . $file; + } + } + PTDEBUG && _d('Version check file', $file, 'in', $ENV{PWD}); + return $file; # in the CWD + } +} + +sub version_check_time_limit { + return 60 * 60 * 24; # one day +} + + +sub version_check { + my (%args) = @_; + + my $instances = $args{instances} || []; + my $instances_to_check; + + PTDEBUG && _d('FindBin::Bin:', $FindBin::Bin); + if ( !$args{force} ) { + if ( $FindBin::Bin + && (-d "$FindBin::Bin/../.bzr" || -d "$FindBin::Bin/../../.bzr") ) { + PTDEBUG && _d("$FindBin::Bin/../.bzr disables --version-check"); + return; + } + } + + eval { + foreach my $instance ( @$instances ) { + my ($name, $id) = get_instance_id($instance); + $instance->{name} = $name; + $instance->{id} = $id; + } + + push @$instances, { name => 'system', id => 0 }; + + $instances_to_check = get_instances_to_check( + instances => $instances, + vc_file => $args{vc_file}, # testing + now => $args{now}, # testing + ); + PTDEBUG && _d(scalar @$instances_to_check, 'instances to check'); + return unless @$instances_to_check; + + my $protocol = 'https'; # optimistic, but... + eval { require IO::Socket::SSL; }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d($EVAL_ERROR); + $protocol = 'http'; + } + PTDEBUG && _d('Using', $protocol); + + my $advice = pingback( + instances => $instances_to_check, + protocol => $protocol, + url => $args{url} # testing + || $ENV{PERCONA_VERSION_CHECK_URL} # testing + || "$protocol://v.percona.com", + ); + if ( $advice ) { + PTDEBUG && _d('Advice:', Dumper($advice)); + if ( scalar @$advice > 1) { + print "\n# " . scalar @$advice . " software updates are " + . "available:\n"; + } + else { + print "\n# A software update is available:\n"; + } + print join("\n", map { "# * $_" } @$advice), "\n\n"; + } + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Version check failed:', $EVAL_ERROR); + } + + if ( @$instances_to_check ) { + eval { + update_check_times( + instances => $instances_to_check, + vc_file => $args{vc_file}, # testing + now => $args{now}, # testing + ); + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Error updating version check file:', $EVAL_ERROR); + } + } + + if ( $ENV{PTDEBUG_VERSION_CHECK} ) { + warn "Exiting because the PTDEBUG_VERSION_CHECK " + . "environment variable is defined.\n"; + exit 255; + } + + return; +} + +sub get_instances_to_check { + my (%args) = @_; + + my $instances = $args{instances}; + my $now = $args{now} || int(time); + my $vc_file = $args{vc_file} || version_check_file(); + + if ( !-f $vc_file ) { + PTDEBUG && _d('Version check file', $vc_file, 'does not exist;', + 'version checking all instances'); + return $instances; + } + + open my $fh, '<', $vc_file or die "Cannot open $vc_file: $OS_ERROR"; + chomp(my $file_contents = do { local $/ = undef; <$fh> }); + PTDEBUG && _d('Version check file', $vc_file, 'contents:', $file_contents); + close $fh; + my %last_check_time_for = $file_contents =~ /^([^,]+),(.+)$/mg; + + my $check_time_limit = version_check_time_limit(); + my @instances_to_check; + foreach my $instance ( @$instances ) { + my $last_check_time = $last_check_time_for{ $instance->{id} }; + PTDEBUG && _d('Intsance', $instance->{id}, 'last checked', + $last_check_time, 'now', $now, 'diff', $now - ($last_check_time || 0), + 'hours until next check', + sprintf '%.2f', + ($check_time_limit - ($now - ($last_check_time || 0))) / 3600); + if ( !defined $last_check_time + || ($now - $last_check_time) >= $check_time_limit ) { + PTDEBUG && _d('Time to check', Dumper($instance)); + push @instances_to_check, $instance; + } + } + + return \@instances_to_check; +} + +sub update_check_times { + my (%args) = @_; + + my $instances = $args{instances}; + my $now = $args{now} || int(time); + my $vc_file = $args{vc_file} || version_check_file(); + PTDEBUG && _d('Updating last check time:', $now); + + open my $fh, '>', $vc_file or die "Cannot write to $vc_file: $OS_ERROR"; + foreach my $instance ( sort { $a->{id} cmp $b->{id} } @$instances ) { + PTDEBUG && _d('Updated:', Dumper($instance)); + print { $fh } $instance->{id} . ',' . $now . "\n"; + } + close $fh; + + return; +} + +sub get_instance_id { + my ($instance) = @_; + + my $dbh = $instance->{dbh}; + my $dsn = $instance->{dsn}; + + my $sql = q{SELECT CONCAT(@@hostname, @@port)}; + PTDEBUG && _d($sql); + my ($name) = eval { $dbh->selectrow_array($sql) }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d($EVAL_ERROR); + $sql = q{SELECT @@hostname}; + PTDEBUG && _d($sql); + ($name) = eval { $dbh->selectrow_array($sql) }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d($EVAL_ERROR); + $name = ($dsn->{h} || 'localhost') . ($dsn->{P} || 3306); + } + else { + $sql = q{SHOW VARIABLES LIKE 'port'}; + PTDEBUG && _d($sql); + my (undef, $port) = eval { $dbh->selectrow_array($sql) }; + PTDEBUG && _d('port:', $port); + $name .= $port || ''; + } + } + my $id = md5_hex($name); + + PTDEBUG && _d('MySQL instance:', $id, $name, $dsn); + + return $name, $id; +} + + +sub pingback { + my (%args) = @_; + my @required_args = qw(url instances); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my $url = $args{url}; + my $instances = $args{instances}; + + my $ua = $args{ua} || HTTPMicro->new( timeout => 3 ); + + my $response = $ua->request('GET', $url); + PTDEBUG && _d('Server response:', Dumper($response)); + die "No response from GET $url" + if !$response; + die("GET on $url returned HTTP status $response->{status}; expected 200\n", + ($response->{content} || '')) if $response->{status} != 200; + die("GET on $url did not return any programs to check") + if !$response->{content}; + + my $items = parse_server_response( + response => $response->{content} + ); + die "Failed to parse server requested programs: $response->{content}" + if !scalar keys %$items; + + my $versions = get_versions( + items => $items, + instances => $instances, + ); + die "Failed to get any program versions; should have at least gotten Perl" + if !scalar keys %$versions; + + my $client_content = encode_client_response( + items => $items, + versions => $versions, + general_id => md5_hex( hostname() ), + ); + + my $client_response = { + headers => { "X-Percona-Toolkit-Tool" => File::Basename::basename($0) }, + content => $client_content, + }; + PTDEBUG && _d('Client response:', Dumper($client_response)); + + $response = $ua->request('POST', $url, $client_response); + PTDEBUG && _d('Server suggestions:', Dumper($response)); + die "No response from POST $url $client_response" + if !$response; + die "POST $url returned HTTP status $response->{status}; expected 200" + if $response->{status} != 200; + + return unless $response->{content}; + + $items = parse_server_response( + response => $response->{content}, + split_vars => 0, + ); + die "Failed to parse server suggestions: $response->{content}" + if !scalar keys %$items; + my @suggestions = map { $_->{vars} } + sort { $a->{item} cmp $b->{item} } + values %$items; + + return \@suggestions; +} + +sub encode_client_response { + my (%args) = @_; + my @required_args = qw(items versions general_id); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($items, $versions, $general_id) = @args{@required_args}; + + my @lines; + foreach my $item ( sort keys %$items ) { + next unless exists $versions->{$item}; + if ( ref($versions->{$item}) eq 'HASH' ) { + my $mysql_versions = $versions->{$item}; + for my $id ( sort keys %$mysql_versions ) { + push @lines, join(';', $id, $item, $mysql_versions->{$id}); + } + } + else { + push @lines, join(';', $general_id, $item, $versions->{$item}); + } + } + + my $client_response = join("\n", @lines) . "\n"; + return $client_response; +} + +sub parse_server_response { + my (%args) = @_; + my @required_args = qw(response); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($response) = @args{@required_args}; + + my %items = map { + my ($item, $type, $vars) = split(";", $_); + if ( !defined $args{split_vars} || $args{split_vars} ) { + $vars = [ split(",", ($vars || '')) ]; + } + $item => { + item => $item, + type => $type, + vars => $vars, + }; + } split("\n", $response); + + PTDEBUG && _d('Items:', Dumper(\%items)); + + return \%items; +} + +my %sub_for_type = ( + os_version => \&get_os_version, + perl_version => \&get_perl_version, + perl_module_version => \&get_perl_module_version, + mysql_variable => \&get_mysql_variable, + bin_version => \&get_bin_version, +); + +sub valid_item { + my ($item) = @_; + return unless $item; + if ( !exists $sub_for_type{ $item->{type} } ) { + PTDEBUG && _d('Invalid type:', $item->{type}); + return 0; + } + return 1; +} + +sub get_versions { + my (%args) = @_; + my @required_args = qw(items); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($items) = @args{@required_args}; + + my %versions; + foreach my $item ( values %$items ) { + next unless valid_item($item); + eval { + my $version = $sub_for_type{ $item->{type} }->( + item => $item, + instances => $args{instances}, + ); + if ( $version ) { + chomp $version unless ref($version); + $versions{$item->{item}} = $version; + } + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Error getting version for', Dumper($item), $EVAL_ERROR); + } + } + + return \%versions; +} + + +sub get_os_version { + if ( $OSNAME eq 'MSWin32' ) { + require Win32; + return Win32::GetOSDisplayName(); + } + + chomp(my $platform = `uname -s`); + PTDEBUG && _d('platform:', $platform); + return $OSNAME unless $platform; + + chomp(my $lsb_release + = `which lsb_release 2>/dev/null | awk '{print \$1}'` || ''); + PTDEBUG && _d('lsb_release:', $lsb_release); + + my $release = ""; + + if ( $platform eq 'Linux' ) { + if ( -f "/etc/fedora-release" ) { + $release = `cat /etc/fedora-release`; + } + elsif ( -f "/etc/redhat-release" ) { + $release = `cat /etc/redhat-release`; + } + elsif ( -f "/etc/system-release" ) { + $release = `cat /etc/system-release`; + } + elsif ( $lsb_release ) { + $release = `$lsb_release -ds`; + } + elsif ( -f "/etc/lsb-release" ) { + $release = `grep DISTRIB_DESCRIPTION /etc/lsb-release`; + $release =~ s/^\w+="([^"]+)".+/$1/; + } + elsif ( -f "/etc/debian_version" ) { + chomp(my $rel = `cat /etc/debian_version`); + $release = "Debian $rel"; + if ( -f "/etc/apt/sources.list" ) { + chomp(my $code_name = `awk '/^deb/ {print \$3}' /etc/apt/sources.list | awk -F/ '{print \$1}'| awk 'BEGIN {FS="|"} {print \$1}' | sort | uniq -c | sort -rn | head -n1 | awk '{print \$2}'`); + $release .= " ($code_name)" if $code_name; + } + } + elsif ( -f "/etc/os-release" ) { # openSUSE + chomp($release = `grep PRETTY_NAME /etc/os-release`); + $release =~ s/^PRETTY_NAME="(.+)"$/$1/; + } + elsif ( `ls /etc/*release 2>/dev/null` ) { + if ( `grep DISTRIB_DESCRIPTION /etc/*release 2>/dev/null` ) { + $release = `grep DISTRIB_DESCRIPTION /etc/*release | head -n1`; + } + else { + $release = `cat /etc/*release | head -n1`; + } + } + } + elsif ( $platform =~ m/(?:BSD|^Darwin)$/ ) { + my $rel = `uname -r`; + $release = "$platform $rel"; + } + elsif ( $platform eq "SunOS" ) { + my $rel = `head -n1 /etc/release` || `uname -r`; + $release = "$platform $rel"; + } + + if ( !$release ) { + PTDEBUG && _d('Failed to get the release, using platform'); + $release = $platform; + } + chomp($release); + + $release =~ s/^"|"$//g; + + PTDEBUG && _d('OS version =', $release); + return $release; +} + +sub get_perl_version { + my (%args) = @_; + my $item = $args{item}; + return unless $item; + + my $version = sprintf '%vd', $PERL_VERSION; + PTDEBUG && _d('Perl version', $version); + return $version; +} + +sub get_perl_module_version { + my (%args) = @_; + my $item = $args{item}; + return unless $item; + + my $var = '$' . $item->{item} . '::VERSION'; + my $version = eval "use $item->{item}; $var;"; + PTDEBUG && _d('Perl version for', $var, '=', $version); + return $version; +} + +sub get_mysql_variable { + return get_from_mysql( + show => 'VARIABLES', + @_, + ); +} + +sub get_from_mysql { + my (%args) = @_; + my $show = $args{show}; + my $item = $args{item}; + my $instances = $args{instances}; + return unless $show && $item; + + if ( !$instances || !@$instances ) { + PTDEBUG && _d('Cannot check', $item, + 'because there are no MySQL instances'); + return; + } + + my @versions; + my %version_for; + foreach my $instance ( @$instances ) { + next unless $instance->{id}; # special system instance has id=0 + my $dbh = $instance->{dbh}; + local $dbh->{FetchHashKeyName} = 'NAME_lc'; + my $sql = qq/SHOW $show/; + PTDEBUG && _d($sql); + my $rows = $dbh->selectall_hashref($sql, 'variable_name'); + + my @versions; + foreach my $var ( @{$item->{vars}} ) { + $var = lc($var); + my $version = $rows->{$var}->{value}; + PTDEBUG && _d('MySQL version for', $item->{item}, '=', $version, + 'on', $instance->{name}); + push @versions, $version; + } + $version_for{ $instance->{id} } = join(' ', @versions); + } + + return \%version_for; +} + +sub get_bin_version { + my (%args) = @_; + my $item = $args{item}; + my $cmd = $item->{item}; + return unless $cmd; + + my $sanitized_command = File::Basename::basename($cmd); + PTDEBUG && _d('cmd:', $cmd, 'sanitized:', $sanitized_command); + return if $sanitized_command !~ /\A[a-zA-Z0-9_-]+\z/; + + my $output = `$sanitized_command --version 2>&1`; + PTDEBUG && _d('output:', $output); + + my ($version) = $output =~ /v?([0-9]+\.[0-9]+(?:\.[\w-]+)?)/; + + PTDEBUG && _d('Version for', $sanitized_command, '=', $version); + return $version; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End VersionCheck package +# ########################################################################### + +# ########################################################################### +# DSNParser package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/DSNParser.pm +# t/lib/DSNParser.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package DSNParser; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 0; +$Data::Dumper::Quotekeys = 0; + +my $dsn_sep = qr/(? {} # h, P, u, etc. Should come from DSN OPTIONS section in POD. + }; + foreach my $opt ( @{$args{opts}} ) { + if ( !$opt->{key} || !$opt->{desc} ) { + die "Invalid DSN option: ", Dumper($opt); + } + PTDEBUG && _d('DSN option:', + join(', ', + map { "$_=" . (defined $opt->{$_} ? ($opt->{$_} || '') : 'undef') } + keys %$opt + ) + ); + $self->{opts}->{$opt->{key}} = { + dsn => $opt->{dsn}, + desc => $opt->{desc}, + copy => $opt->{copy} || 0, + }; + } + return bless $self, $class; +} + +sub prop { + my ( $self, $prop, $value ) = @_; + if ( @_ > 2 ) { + PTDEBUG && _d('Setting', $prop, 'property'); + $self->{$prop} = $value; + } + return $self->{$prop}; +} + +sub parse { + my ( $self, $dsn, $prev, $defaults ) = @_; + if ( !$dsn ) { + PTDEBUG && _d('No DSN to parse'); + return; + } + PTDEBUG && _d('Parsing', $dsn); + $prev ||= {}; + $defaults ||= {}; + my %given_props; + my %final_props; + my $opts = $self->{opts}; + + foreach my $dsn_part ( split($dsn_sep, $dsn) ) { + $dsn_part =~ s/\\,/,/g; + if ( my ($prop_key, $prop_val) = $dsn_part =~ m/^(.)=(.*)$/ ) { + $given_props{$prop_key} = $prop_val; + } + else { + PTDEBUG && _d('Interpreting', $dsn_part, 'as h=', $dsn_part); + $given_props{h} = $dsn_part; + } + } + + foreach my $key ( keys %$opts ) { + PTDEBUG && _d('Finding value for', $key); + $final_props{$key} = $given_props{$key}; + if ( !defined $final_props{$key} + && defined $prev->{$key} && $opts->{$key}->{copy} ) + { + $final_props{$key} = $prev->{$key}; + PTDEBUG && _d('Copying value for', $key, 'from previous DSN'); + } + if ( !defined $final_props{$key} ) { + $final_props{$key} = $defaults->{$key}; + PTDEBUG && _d('Copying value for', $key, 'from defaults'); + } + } + + foreach my $key ( keys %given_props ) { + die "Unknown DSN option '$key' in '$dsn'. For more details, " + . "please use the --help option, or try 'perldoc $PROGRAM_NAME' " + . "for complete documentation." + unless exists $opts->{$key}; + } + if ( (my $required = $self->prop('required')) ) { + foreach my $key ( keys %$required ) { + die "Missing required DSN option '$key' in '$dsn'. For more details, " + . "please use the --help option, or try 'perldoc $PROGRAM_NAME' " + . "for complete documentation." + unless $final_props{$key}; + } + } + + return \%final_props; +} + +sub parse_options { + my ( $self, $o ) = @_; + die 'I need an OptionParser object' unless ref $o eq 'OptionParser'; + my $dsn_string + = join(',', + map { "$_=".$o->get($_); } + grep { $o->has($_) && $o->get($_) } + keys %{$self->{opts}} + ); + PTDEBUG && _d('DSN string made from options:', $dsn_string); + return $self->parse($dsn_string); +} + +sub as_string { + my ( $self, $dsn, $props ) = @_; + return $dsn unless ref $dsn; + my @keys = $props ? @$props : sort keys %$dsn; + return join(',', + map { "$_=" . ($_ eq 'p' ? '...' : $dsn->{$_}) } + grep { + exists $self->{opts}->{$_} + && exists $dsn->{$_} + && defined $dsn->{$_} + } @keys); +} + +sub usage { + my ( $self ) = @_; + my $usage + = "DSN syntax is key=value[,key=value...] Allowable DSN keys:\n\n" + . " KEY COPY MEANING\n" + . " === ==== =============================================\n"; + my %opts = %{$self->{opts}}; + foreach my $key ( sort keys %opts ) { + $usage .= " $key " + . ($opts{$key}->{copy} ? 'yes ' : 'no ') + . ($opts{$key}->{desc} || '[No description]') + . "\n"; + } + $usage .= "\n If the DSN is a bareword, the word is treated as the 'h' key.\n"; + return $usage; +} + +sub get_cxn_params { + my ( $self, $info ) = @_; + my $dsn; + my %opts = %{$self->{opts}}; + my $driver = $self->prop('dbidriver') || ''; + if ( $driver eq 'Pg' ) { + $dsn = 'DBI:Pg:dbname=' . ( $info->{D} || '' ) . ';' + . join(';', map { "$opts{$_}->{dsn}=$info->{$_}" } + grep { defined $info->{$_} } + qw(h P)); + } + else { + $dsn = 'DBI:mysql:' . ( $info->{D} || '' ) . ';' + . join(';', map { "$opts{$_}->{dsn}=$info->{$_}" } + grep { defined $info->{$_} } + qw(F h P S A)) + . ';mysql_read_default_group=client' + . ($info->{L} ? ';mysql_local_infile=1' : ''); + } + PTDEBUG && _d($dsn); + return ($dsn, $info->{u}, $info->{p}); +} + +sub fill_in_dsn { + my ( $self, $dbh, $dsn ) = @_; + my $vars = $dbh->selectall_hashref('SHOW VARIABLES', 'Variable_name'); + my ($user, $db) = $dbh->selectrow_array('SELECT USER(), DATABASE()'); + $user =~ s/@.*//; + $dsn->{h} ||= $vars->{hostname}->{Value}; + $dsn->{S} ||= $vars->{'socket'}->{Value}; + $dsn->{P} ||= $vars->{port}->{Value}; + $dsn->{u} ||= $user; + $dsn->{D} ||= $db; +} + +sub get_dbh { + my ( $self, $cxn_string, $user, $pass, $opts ) = @_; + $opts ||= {}; + my $defaults = { + AutoCommit => 0, + RaiseError => 1, + PrintError => 0, + ShowErrorStatement => 1, + mysql_enable_utf8 => ($cxn_string =~ m/charset=utf8/i ? 1 : 0), + }; + @{$defaults}{ keys %$opts } = values %$opts; + if (delete $defaults->{L}) { # L for LOAD DATA LOCAL INFILE, our own extension + $defaults->{mysql_local_infile} = 1; + } + + if ( $opts->{mysql_use_result} ) { + $defaults->{mysql_use_result} = 1; + } + + if ( !$have_dbi ) { + die "Cannot connect to MySQL because the Perl DBI module is not " + . "installed or not found. Run 'perl -MDBI' to see the directories " + . "that Perl searches for DBI. If DBI is not installed, try:\n" + . " Debian/Ubuntu apt-get install libdbi-perl\n" + . " RHEL/CentOS yum install perl-DBI\n" + . " OpenSolaris pkg install pkg:/SUNWpmdbi\n"; + + } + + my $dbh; + my $tries = 2; + while ( !$dbh && $tries-- ) { + PTDEBUG && _d($cxn_string, ' ', $user, ' ', $pass, + join(', ', map { "$_=>$defaults->{$_}" } keys %$defaults )); + + $dbh = eval { DBI->connect($cxn_string, $user, $pass, $defaults) }; + + if ( !$dbh && $EVAL_ERROR ) { + if ( $EVAL_ERROR =~ m/locate DBD\/mysql/i ) { + die "Cannot connect to MySQL because the Perl DBD::mysql module is " + . "not installed or not found. Run 'perl -MDBD::mysql' to see " + . "the directories that Perl searches for DBD::mysql. If " + . "DBD::mysql is not installed, try:\n" + . " Debian/Ubuntu apt-get install libdbd-mysql-perl\n" + . " RHEL/CentOS yum install perl-DBD-MySQL\n" + . " OpenSolaris pgk install pkg:/SUNWapu13dbd-mysql\n"; + } + elsif ( $EVAL_ERROR =~ m/not a compiled character set|character set utf8/ ) { + PTDEBUG && _d('Going to try again without utf8 support'); + delete $defaults->{mysql_enable_utf8}; + } + if ( !$tries ) { + die $EVAL_ERROR; + } + } + } + + if ( $cxn_string =~ m/mysql/i ) { + my $sql; + + $sql = 'SELECT @@SQL_MODE'; + PTDEBUG && _d($dbh, $sql); + my ($sql_mode) = eval { $dbh->selectrow_array($sql) }; + if ( $EVAL_ERROR ) { + die "Error getting the current SQL_MODE: $EVAL_ERROR"; + } + + if ( my ($charset) = $cxn_string =~ m/charset=([\w]+)/ ) { + $sql = qq{/*!40101 SET NAMES "$charset"*/}; + PTDEBUG && _d($dbh, $sql); + eval { $dbh->do($sql) }; + if ( $EVAL_ERROR ) { + die "Error setting NAMES to $charset: $EVAL_ERROR"; + } + PTDEBUG && _d('Enabling charset for STDOUT'); + if ( $charset eq 'utf8' ) { + binmode(STDOUT, ':utf8') + or die "Can't binmode(STDOUT, ':utf8'): $OS_ERROR"; + } + else { + binmode(STDOUT) or die "Can't binmode(STDOUT): $OS_ERROR"; + } + } + + if ( my $vars = $self->prop('set-vars') ) { + $self->set_vars($dbh, $vars); + } + + $sql = 'SET @@SQL_QUOTE_SHOW_CREATE = 1' + . '/*!40101, @@SQL_MODE=\'NO_AUTO_VALUE_ON_ZERO' + . ($sql_mode ? ",$sql_mode" : '') + . '\'*/'; + PTDEBUG && _d($dbh, $sql); + eval { $dbh->do($sql) }; + if ( $EVAL_ERROR ) { + die "Error setting SQL_QUOTE_SHOW_CREATE, SQL_MODE" + . ($sql_mode ? " and $sql_mode" : '') + . ": $EVAL_ERROR"; + } + } + + PTDEBUG && _d('DBH info: ', + $dbh, + Dumper($dbh->selectrow_hashref( + 'SELECT DATABASE(), CONNECTION_ID(), VERSION()/*!50038 , @@hostname*/')), + 'Connection info:', $dbh->{mysql_hostinfo}, + 'Character set info:', Dumper($dbh->selectall_arrayref( + "SHOW VARIABLES LIKE 'character_set%'", { Slice => {}})), + '$DBD::mysql::VERSION:', $DBD::mysql::VERSION, + '$DBI::VERSION:', $DBI::VERSION, + ); + + return $dbh; +} + +sub get_hostname { + my ( $self, $dbh ) = @_; + if ( my ($host) = ($dbh->{mysql_hostinfo} || '') =~ m/^(\w+) via/ ) { + return $host; + } + my ( $hostname, $one ) = $dbh->selectrow_array( + 'SELECT /*!50038 @@hostname, */ 1'); + return $hostname; +} + +sub disconnect { + my ( $self, $dbh ) = @_; + PTDEBUG && $self->print_active_handles($dbh); + $dbh->disconnect; +} + +sub print_active_handles { + my ( $self, $thing, $level ) = @_; + $level ||= 0; + printf("# Active %sh: %s %s %s\n", ($thing->{Type} || 'undef'), "\t" x $level, + $thing, (($thing->{Type} || '') eq 'st' ? $thing->{Statement} || '' : '')) + or die "Cannot print: $OS_ERROR"; + foreach my $handle ( grep {defined} @{ $thing->{ChildHandles} } ) { + $self->print_active_handles( $handle, $level + 1 ); + } +} + +sub copy { + my ( $self, $dsn_1, $dsn_2, %args ) = @_; + die 'I need a dsn_1 argument' unless $dsn_1; + die 'I need a dsn_2 argument' unless $dsn_2; + my %new_dsn = map { + my $key = $_; + my $val; + if ( $args{overwrite} ) { + $val = defined $dsn_1->{$key} ? $dsn_1->{$key} : $dsn_2->{$key}; + } + else { + $val = defined $dsn_2->{$key} ? $dsn_2->{$key} : $dsn_1->{$key}; + } + $key => $val; + } keys %{$self->{opts}}; + return \%new_dsn; +} + +sub set_vars { + my ($self, $dbh, $vars) = @_; + + return unless $vars; + + foreach my $var ( sort keys %$vars ) { + my $val = $vars->{$var}->{val}; + + (my $quoted_var = $var) =~ s/_/\\_/; + my ($var_exists, $current_val); + eval { + ($var_exists, $current_val) = $dbh->selectrow_array( + "SHOW VARIABLES LIKE '$quoted_var'"); + }; + my $e = $EVAL_ERROR; + if ( $e ) { + PTDEBUG && _d($e); + } + + if ( $vars->{$var}->{default} && !$var_exists ) { + PTDEBUG && _d('Not setting default var', $var, + 'because it does not exist'); + next; + } + + if ( $current_val && $current_val eq $val ) { + PTDEBUG && _d('Not setting var', $var, 'because its value', + 'is already', $val); + next; + } + + my $sql = "SET SESSION $var=$val"; + PTDEBUG && _d($dbh, $sql); + eval { $dbh->do($sql) }; + if ( my $set_error = $EVAL_ERROR ) { + chomp($set_error); + $set_error =~ s/ at \S+ line \d+//; + my $msg = "Error setting $var: $set_error"; + if ( $current_val ) { + $msg .= " The current value for $var is $current_val. " + . "If the variable is read only (not dynamic), specify " + . "--set-vars $var=$current_val to avoid this warning, " + . "else manually set the variable and restart MySQL."; + } + warn $msg . "\n\n"; + } + } + + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End DSNParser package +# ########################################################################### + +# ########################################################################### +# OptionParser package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/OptionParser.pm +# t/lib/OptionParser.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package OptionParser; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use List::Util qw(max); +use Getopt::Long; +use Data::Dumper; + +my $POD_link_re = '[LC]<"?([^">]+)"?>'; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + + my ($program_name) = $PROGRAM_NAME =~ m/([.A-Za-z-]+)$/; + $program_name ||= $PROGRAM_NAME; + my $home = $ENV{HOME} || $ENV{HOMEPATH} || $ENV{USERPROFILE} || '.'; + + my %attributes = ( + 'type' => 1, + 'short form' => 1, + 'group' => 1, + 'default' => 1, + 'cumulative' => 1, + 'negatable' => 1, + ); + + my $self = { + head1 => 'OPTIONS', # These args are used internally + skip_rules => 0, # to instantiate another Option- + item => '--(.*)', # Parser obj that parses the + attributes => \%attributes, # DSN OPTIONS section. Tools + parse_attributes => \&_parse_attribs, # don't tinker with these args. + + %args, + + strict => 1, # disabled by a special rule + program_name => $program_name, + opts => {}, + got_opts => 0, + short_opts => {}, + defaults => {}, + groups => {}, + allowed_groups => {}, + errors => [], + rules => [], # desc of rules for --help + mutex => [], # rule: opts are mutually exclusive + atleast1 => [], # rule: at least one opt is required + disables => {}, # rule: opt disables other opts + defaults_to => {}, # rule: opt defaults to value of other opt + DSNParser => undef, + default_files => [ + "/etc/percona-toolkit/percona-toolkit.conf", + "/etc/percona-toolkit/$program_name.conf", + "$home/.percona-toolkit.conf", + "$home/.$program_name.conf", + ], + types => { + string => 's', # standard Getopt type + int => 'i', # standard Getopt type + float => 'f', # standard Getopt type + Hash => 'H', # hash, formed from a comma-separated list + hash => 'h', # hash as above, but only if a value is given + Array => 'A', # array, similar to Hash + array => 'a', # array, similar to hash + DSN => 'd', # DSN + size => 'z', # size with kMG suffix (powers of 2^10) + time => 'm', # time, with an optional suffix of s/h/m/d + }, + }; + + return bless $self, $class; +} + +sub get_specs { + my ( $self, $file ) = @_; + $file ||= $self->{file} || __FILE__; + my @specs = $self->_pod_to_specs($file); + $self->_parse_specs(@specs); + + open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR"; + my $contents = do { local $/ = undef; <$fh> }; + close $fh; + if ( $contents =~ m/^=head1 DSN OPTIONS/m ) { + PTDEBUG && _d('Parsing DSN OPTIONS'); + my $dsn_attribs = { + dsn => 1, + copy => 1, + }; + my $parse_dsn_attribs = sub { + my ( $self, $option, $attribs ) = @_; + map { + my $val = $attribs->{$_}; + if ( $val ) { + $val = $val eq 'yes' ? 1 + : $val eq 'no' ? 0 + : $val; + $attribs->{$_} = $val; + } + } keys %$attribs; + return { + key => $option, + %$attribs, + }; + }; + my $dsn_o = new OptionParser( + description => 'DSN OPTIONS', + head1 => 'DSN OPTIONS', + dsn => 0, # XXX don't infinitely recurse! + item => '\* (.)', # key opts are a single character + skip_rules => 1, # no rules before opts + attributes => $dsn_attribs, + parse_attributes => $parse_dsn_attribs, + ); + my @dsn_opts = map { + my $opts = { + key => $_->{spec}->{key}, + dsn => $_->{spec}->{dsn}, + copy => $_->{spec}->{copy}, + desc => $_->{desc}, + }; + $opts; + } $dsn_o->_pod_to_specs($file); + $self->{DSNParser} = DSNParser->new(opts => \@dsn_opts); + } + + if ( $contents =~ m/^=head1 VERSION\n\n^(.+)$/m ) { + $self->{version} = $1; + PTDEBUG && _d($self->{version}); + } + + return; +} + +sub DSNParser { + my ( $self ) = @_; + return $self->{DSNParser}; +}; + +sub get_defaults_files { + my ( $self ) = @_; + return @{$self->{default_files}}; +} + +sub _pod_to_specs { + my ( $self, $file ) = @_; + $file ||= $self->{file} || __FILE__; + open my $fh, '<', $file or die "Cannot open $file: $OS_ERROR"; + + my @specs = (); + my @rules = (); + my $para; + + local $INPUT_RECORD_SEPARATOR = ''; + while ( $para = <$fh> ) { + next unless $para =~ m/^=head1 $self->{head1}/; + last; + } + + while ( $para = <$fh> ) { + last if $para =~ m/^=over/; + next if $self->{skip_rules}; + chomp $para; + $para =~ s/\s+/ /g; + $para =~ s/$POD_link_re/$1/go; + PTDEBUG && _d('Option rule:', $para); + push @rules, $para; + } + + die "POD has no $self->{head1} section" unless $para; + + do { + if ( my ($option) = $para =~ m/^=item $self->{item}/ ) { + chomp $para; + PTDEBUG && _d($para); + my %attribs; + + $para = <$fh>; # read next paragraph, possibly attributes + + if ( $para =~ m/: / ) { # attributes + $para =~ s/\s+\Z//g; + %attribs = map { + my ( $attrib, $val) = split(/: /, $_); + die "Unrecognized attribute for --$option: $attrib" + unless $self->{attributes}->{$attrib}; + ($attrib, $val); + } split(/; /, $para); + if ( $attribs{'short form'} ) { + $attribs{'short form'} =~ s/-//; + } + $para = <$fh>; # read next paragraph, probably short help desc + } + else { + PTDEBUG && _d('Option has no attributes'); + } + + $para =~ s/\s+\Z//g; + $para =~ s/\s+/ /g; + $para =~ s/$POD_link_re/$1/go; + + $para =~ s/\.(?:\n.*| [A-Z].*|\Z)//s; + PTDEBUG && _d('Short help:', $para); + + die "No description after option spec $option" if $para =~ m/^=item/; + + if ( my ($base_option) = $option =~ m/^\[no\](.*)/ ) { + $option = $base_option; + $attribs{'negatable'} = 1; + } + + push @specs, { + spec => $self->{parse_attributes}->($self, $option, \%attribs), + desc => $para + . (defined $attribs{default} ? " (default $attribs{default})" : ''), + group => ($attribs{'group'} ? $attribs{'group'} : 'default'), + }; + } + while ( $para = <$fh> ) { + last unless $para; + if ( $para =~ m/^=head1/ ) { + $para = undef; # Can't 'last' out of a do {} block. + last; + } + last if $para =~ m/^=item /; + } + } while ( $para ); + + die "No valid specs in $self->{head1}" unless @specs; + + close $fh; + return @specs, @rules; +} + +sub _parse_specs { + my ( $self, @specs ) = @_; + my %disables; # special rule that requires deferred checking + + foreach my $opt ( @specs ) { + if ( ref $opt ) { # It's an option spec, not a rule. + PTDEBUG && _d('Parsing opt spec:', + map { ($_, '=>', $opt->{$_}) } keys %$opt); + + my ( $long, $short ) = $opt->{spec} =~ m/^([\w-]+)(?:\|([^!+=]*))?/; + if ( !$long ) { + die "Cannot parse long option from spec $opt->{spec}"; + } + $opt->{long} = $long; + + die "Duplicate long option --$long" if exists $self->{opts}->{$long}; + $self->{opts}->{$long} = $opt; + + if ( length $long == 1 ) { + PTDEBUG && _d('Long opt', $long, 'looks like short opt'); + $self->{short_opts}->{$long} = $long; + } + + if ( $short ) { + die "Duplicate short option -$short" + if exists $self->{short_opts}->{$short}; + $self->{short_opts}->{$short} = $long; + $opt->{short} = $short; + } + else { + $opt->{short} = undef; + } + + $opt->{is_negatable} = $opt->{spec} =~ m/!/ ? 1 : 0; + $opt->{is_cumulative} = $opt->{spec} =~ m/\+/ ? 1 : 0; + $opt->{is_required} = $opt->{desc} =~ m/required/ ? 1 : 0; + + $opt->{group} ||= 'default'; + $self->{groups}->{ $opt->{group} }->{$long} = 1; + + $opt->{value} = undef; + $opt->{got} = 0; + + my ( $type ) = $opt->{spec} =~ m/=(.)/; + $opt->{type} = $type; + PTDEBUG && _d($long, 'type:', $type); + + + $opt->{spec} =~ s/=./=s/ if ( $type && $type =~ m/[HhAadzm]/ ); + + if ( (my ($def) = $opt->{desc} =~ m/default\b(?: ([^)]+))?/) ) { + $self->{defaults}->{$long} = defined $def ? $def : 1; + PTDEBUG && _d($long, 'default:', $def); + } + + if ( $long eq 'config' ) { + $self->{defaults}->{$long} = join(',', $self->get_defaults_files()); + } + + if ( (my ($dis) = $opt->{desc} =~ m/(disables .*)/) ) { + $disables{$long} = $dis; + PTDEBUG && _d('Deferring check of disables rule for', $opt, $dis); + } + + $self->{opts}->{$long} = $opt; + } + else { # It's an option rule, not a spec. + PTDEBUG && _d('Parsing rule:', $opt); + push @{$self->{rules}}, $opt; + my @participants = $self->_get_participants($opt); + my $rule_ok = 0; + + if ( $opt =~ m/mutually exclusive|one and only one/ ) { + $rule_ok = 1; + push @{$self->{mutex}}, \@participants; + PTDEBUG && _d(@participants, 'are mutually exclusive'); + } + if ( $opt =~ m/at least one|one and only one/ ) { + $rule_ok = 1; + push @{$self->{atleast1}}, \@participants; + PTDEBUG && _d(@participants, 'require at least one'); + } + if ( $opt =~ m/default to/ ) { + $rule_ok = 1; + $self->{defaults_to}->{$participants[0]} = $participants[1]; + PTDEBUG && _d($participants[0], 'defaults to', $participants[1]); + } + if ( $opt =~ m/restricted to option groups/ ) { + $rule_ok = 1; + my ($groups) = $opt =~ m/groups ([\w\s\,]+)/; + my @groups = split(',', $groups); + %{$self->{allowed_groups}->{$participants[0]}} = map { + s/\s+//; + $_ => 1; + } @groups; + } + if( $opt =~ m/accepts additional command-line arguments/ ) { + $rule_ok = 1; + $self->{strict} = 0; + PTDEBUG && _d("Strict mode disabled by rule"); + } + + die "Unrecognized option rule: $opt" unless $rule_ok; + } + } + + foreach my $long ( keys %disables ) { + my @participants = $self->_get_participants($disables{$long}); + $self->{disables}->{$long} = \@participants; + PTDEBUG && _d('Option', $long, 'disables', @participants); + } + + return; +} + +sub _get_participants { + my ( $self, $str ) = @_; + my @participants; + foreach my $long ( $str =~ m/--(?:\[no\])?([\w-]+)/g ) { + die "Option --$long does not exist while processing rule $str" + unless exists $self->{opts}->{$long}; + push @participants, $long; + } + PTDEBUG && _d('Participants for', $str, ':', @participants); + return @participants; +} + +sub opts { + my ( $self ) = @_; + my %opts = %{$self->{opts}}; + return %opts; +} + +sub short_opts { + my ( $self ) = @_; + my %short_opts = %{$self->{short_opts}}; + return %short_opts; +} + +sub set_defaults { + my ( $self, %defaults ) = @_; + $self->{defaults} = {}; + foreach my $long ( keys %defaults ) { + die "Cannot set default for nonexistent option $long" + unless exists $self->{opts}->{$long}; + $self->{defaults}->{$long} = $defaults{$long}; + PTDEBUG && _d('Default val for', $long, ':', $defaults{$long}); + } + return; +} + +sub get_defaults { + my ( $self ) = @_; + return $self->{defaults}; +} + +sub get_groups { + my ( $self ) = @_; + return $self->{groups}; +} + +sub _set_option { + my ( $self, $opt, $val ) = @_; + my $long = exists $self->{opts}->{$opt} ? $opt + : exists $self->{short_opts}->{$opt} ? $self->{short_opts}->{$opt} + : die "Getopt::Long gave a nonexistent option: $opt"; + + $opt = $self->{opts}->{$long}; + if ( $opt->{is_cumulative} ) { + $opt->{value}++; + } + else { + $opt->{value} = $val; + } + $opt->{got} = 1; + PTDEBUG && _d('Got option', $long, '=', $val); +} + +sub get_opts { + my ( $self ) = @_; + + foreach my $long ( keys %{$self->{opts}} ) { + $self->{opts}->{$long}->{got} = 0; + $self->{opts}->{$long}->{value} + = exists $self->{defaults}->{$long} ? $self->{defaults}->{$long} + : $self->{opts}->{$long}->{is_cumulative} ? 0 + : undef; + } + $self->{got_opts} = 0; + + $self->{errors} = []; + + if ( @ARGV && $ARGV[0] eq "--config" ) { + shift @ARGV; + $self->_set_option('config', shift @ARGV); + } + if ( $self->has('config') ) { + my @extra_args; + foreach my $filename ( split(',', $self->get('config')) ) { + eval { + push @extra_args, $self->_read_config_file($filename); + }; + if ( $EVAL_ERROR ) { + if ( $self->got('config') ) { + die $EVAL_ERROR; + } + elsif ( PTDEBUG ) { + _d($EVAL_ERROR); + } + } + } + unshift @ARGV, @extra_args; + } + + Getopt::Long::Configure('no_ignore_case', 'bundling'); + GetOptions( + map { $_->{spec} => sub { $self->_set_option(@_); } } + grep { $_->{long} ne 'config' } # --config is handled specially above. + values %{$self->{opts}} + ) or $self->save_error('Error parsing options'); + + if ( exists $self->{opts}->{version} && $self->{opts}->{version}->{got} ) { + if ( $self->{version} ) { + print $self->{version}, "\n"; + } + else { + print "Error parsing version. See the VERSION section of the tool's documentation.\n"; + } + exit 1; + } + + if ( @ARGV && $self->{strict} ) { + $self->save_error("Unrecognized command-line options @ARGV"); + } + + foreach my $mutex ( @{$self->{mutex}} ) { + my @set = grep { $self->{opts}->{$_}->{got} } @$mutex; + if ( @set > 1 ) { + my $err = join(', ', map { "--$self->{opts}->{$_}->{long}" } + @{$mutex}[ 0 .. scalar(@$mutex) - 2] ) + . ' and --'.$self->{opts}->{$mutex->[-1]}->{long} + . ' are mutually exclusive.'; + $self->save_error($err); + } + } + + foreach my $required ( @{$self->{atleast1}} ) { + my @set = grep { $self->{opts}->{$_}->{got} } @$required; + if ( @set == 0 ) { + my $err = join(', ', map { "--$self->{opts}->{$_}->{long}" } + @{$required}[ 0 .. scalar(@$required) - 2] ) + .' or --'.$self->{opts}->{$required->[-1]}->{long}; + $self->save_error("Specify at least one of $err"); + } + } + + $self->_check_opts( keys %{$self->{opts}} ); + $self->{got_opts} = 1; + return; +} + +sub _check_opts { + my ( $self, @long ) = @_; + my $long_last = scalar @long; + while ( @long ) { + foreach my $i ( 0..$#long ) { + my $long = $long[$i]; + next unless $long; + my $opt = $self->{opts}->{$long}; + if ( $opt->{got} ) { + if ( exists $self->{disables}->{$long} ) { + my @disable_opts = @{$self->{disables}->{$long}}; + map { $self->{opts}->{$_}->{value} = undef; } @disable_opts; + PTDEBUG && _d('Unset options', @disable_opts, + 'because', $long,'disables them'); + } + + if ( exists $self->{allowed_groups}->{$long} ) { + + my @restricted_groups = grep { + !exists $self->{allowed_groups}->{$long}->{$_} + } keys %{$self->{groups}}; + + my @restricted_opts; + foreach my $restricted_group ( @restricted_groups ) { + RESTRICTED_OPT: + foreach my $restricted_opt ( + keys %{$self->{groups}->{$restricted_group}} ) + { + next RESTRICTED_OPT if $restricted_opt eq $long; + push @restricted_opts, $restricted_opt + if $self->{opts}->{$restricted_opt}->{got}; + } + } + + if ( @restricted_opts ) { + my $err; + if ( @restricted_opts == 1 ) { + $err = "--$restricted_opts[0]"; + } + else { + $err = join(', ', + map { "--$self->{opts}->{$_}->{long}" } + grep { $_ } + @restricted_opts[0..scalar(@restricted_opts) - 2] + ) + . ' or --'.$self->{opts}->{$restricted_opts[-1]}->{long}; + } + $self->save_error("--$long is not allowed with $err"); + } + } + + } + elsif ( $opt->{is_required} ) { + $self->save_error("Required option --$long must be specified"); + } + + $self->_validate_type($opt); + if ( $opt->{parsed} ) { + delete $long[$i]; + } + else { + PTDEBUG && _d('Temporarily failed to parse', $long); + } + } + + die "Failed to parse options, possibly due to circular dependencies" + if @long == $long_last; + $long_last = @long; + } + + return; +} + +sub _validate_type { + my ( $self, $opt ) = @_; + return unless $opt; + + if ( !$opt->{type} ) { + $opt->{parsed} = 1; + return; + } + + my $val = $opt->{value}; + + if ( $val && $opt->{type} eq 'm' ) { # type time + PTDEBUG && _d('Parsing option', $opt->{long}, 'as a time value'); + my ( $prefix, $num, $suffix ) = $val =~ m/([+-]?)(\d+)([a-z])?$/; + if ( !$suffix ) { + my ( $s ) = $opt->{desc} =~ m/\(suffix (.)\)/; + $suffix = $s || 's'; + PTDEBUG && _d('No suffix given; using', $suffix, 'for', + $opt->{long}, '(value:', $val, ')'); + } + if ( $suffix =~ m/[smhd]/ ) { + $val = $suffix eq 's' ? $num # Seconds + : $suffix eq 'm' ? $num * 60 # Minutes + : $suffix eq 'h' ? $num * 3600 # Hours + : $num * 86400; # Days + $opt->{value} = ($prefix || '') . $val; + PTDEBUG && _d('Setting option', $opt->{long}, 'to', $val); + } + else { + $self->save_error("Invalid time suffix for --$opt->{long}"); + } + } + elsif ( $val && $opt->{type} eq 'd' ) { # type DSN + PTDEBUG && _d('Parsing option', $opt->{long}, 'as a DSN'); + my $prev = {}; + my $from_key = $self->{defaults_to}->{ $opt->{long} }; + if ( $from_key ) { + PTDEBUG && _d($opt->{long}, 'DSN copies from', $from_key, 'DSN'); + if ( $self->{opts}->{$from_key}->{parsed} ) { + $prev = $self->{opts}->{$from_key}->{value}; + } + else { + PTDEBUG && _d('Cannot parse', $opt->{long}, 'until', + $from_key, 'parsed'); + return; + } + } + my $defaults = $self->{DSNParser}->parse_options($self); + $opt->{value} = $self->{DSNParser}->parse($val, $prev, $defaults); + } + elsif ( $val && $opt->{type} eq 'z' ) { # type size + PTDEBUG && _d('Parsing option', $opt->{long}, 'as a size value'); + $self->_parse_size($opt, $val); + } + elsif ( $opt->{type} eq 'H' || (defined $val && $opt->{type} eq 'h') ) { + $opt->{value} = { map { $_ => 1 } split(/(?{type} eq 'A' || (defined $val && $opt->{type} eq 'a') ) { + $opt->{value} = [ split(/(?{long}, 'type', $opt->{type}, 'value', $val); + } + + $opt->{parsed} = 1; + return; +} + +sub get { + my ( $self, $opt ) = @_; + my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt); + die "Option $opt does not exist" + unless $long && exists $self->{opts}->{$long}; + return $self->{opts}->{$long}->{value}; +} + +sub got { + my ( $self, $opt ) = @_; + my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt); + die "Option $opt does not exist" + unless $long && exists $self->{opts}->{$long}; + return $self->{opts}->{$long}->{got}; +} + +sub has { + my ( $self, $opt ) = @_; + my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt); + return defined $long ? exists $self->{opts}->{$long} : 0; +} + +sub set { + my ( $self, $opt, $val ) = @_; + my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt); + die "Option $opt does not exist" + unless $long && exists $self->{opts}->{$long}; + $self->{opts}->{$long}->{value} = $val; + return; +} + +sub save_error { + my ( $self, $error ) = @_; + push @{$self->{errors}}, $error; + return; +} + +sub errors { + my ( $self ) = @_; + return $self->{errors}; +} + +sub usage { + my ( $self ) = @_; + warn "No usage string is set" unless $self->{usage}; # XXX + return "Usage: " . ($self->{usage} || '') . "\n"; +} + +sub descr { + my ( $self ) = @_; + warn "No description string is set" unless $self->{description}; # XXX + my $descr = ($self->{description} || $self->{program_name} || '') + . " For more details, please use the --help option, " + . "or try 'perldoc $PROGRAM_NAME' " + . "for complete documentation."; + $descr = join("\n", $descr =~ m/(.{0,80})(?:\s+|$)/g) + unless $ENV{DONT_BREAK_LINES}; + $descr =~ s/ +$//mg; + return $descr; +} + +sub usage_or_errors { + my ( $self, $file, $return ) = @_; + $file ||= $self->{file} || __FILE__; + + if ( !$self->{description} || !$self->{usage} ) { + PTDEBUG && _d("Getting description and usage from SYNOPSIS in", $file); + my %synop = $self->_parse_synopsis($file); + $self->{description} ||= $synop{description}; + $self->{usage} ||= $synop{usage}; + PTDEBUG && _d("Description:", $self->{description}, + "\nUsage:", $self->{usage}); + } + + if ( $self->{opts}->{help}->{got} ) { + print $self->print_usage() or die "Cannot print usage: $OS_ERROR"; + exit 0 unless $return; + } + elsif ( scalar @{$self->{errors}} ) { + print $self->print_errors() or die "Cannot print errors: $OS_ERROR"; + exit 1 unless $return; + } + + return; +} + +sub print_errors { + my ( $self ) = @_; + my $usage = $self->usage() . "\n"; + if ( (my @errors = @{$self->{errors}}) ) { + $usage .= join("\n * ", 'Errors in command-line arguments:', @errors) + . "\n"; + } + return $usage . "\n" . $self->descr(); +} + +sub print_usage { + my ( $self ) = @_; + die "Run get_opts() before print_usage()" unless $self->{got_opts}; + my @opts = values %{$self->{opts}}; + + my $maxl = max( + map { + length($_->{long}) # option long name + + ($_->{is_negatable} ? 4 : 0) # "[no]" if opt is negatable + + ($_->{type} ? 2 : 0) # "=x" where x is the opt type + } + @opts); + + my $maxs = max(0, + map { + length($_) + + ($self->{opts}->{$_}->{is_negatable} ? 4 : 0) + + ($self->{opts}->{$_}->{type} ? 2 : 0) + } + values %{$self->{short_opts}}); + + my $lcol = max($maxl, ($maxs + 3)); + my $rcol = 80 - $lcol - 6; + my $rpad = ' ' x ( 80 - $rcol ); + + $maxs = max($lcol - 3, $maxs); + + my $usage = $self->descr() . "\n" . $self->usage(); + + my @groups = reverse sort grep { $_ ne 'default'; } keys %{$self->{groups}}; + push @groups, 'default'; + + foreach my $group ( reverse @groups ) { + $usage .= "\n".($group eq 'default' ? 'Options' : $group).":\n\n"; + foreach my $opt ( + sort { $a->{long} cmp $b->{long} } + grep { $_->{group} eq $group } + @opts ) + { + my $long = $opt->{is_negatable} ? "[no]$opt->{long}" : $opt->{long}; + my $short = $opt->{short}; + my $desc = $opt->{desc}; + + $long .= $opt->{type} ? "=$opt->{type}" : ""; + + if ( $opt->{type} && $opt->{type} eq 'm' ) { + my ($s) = $desc =~ m/\(suffix (.)\)/; + $s ||= 's'; + $desc =~ s/\s+\(suffix .\)//; + $desc .= ". Optional suffix s=seconds, m=minutes, h=hours, " + . "d=days; if no suffix, $s is used."; + } + $desc = join("\n$rpad", grep { $_ } $desc =~ m/(.{0,$rcol}(?!\W))(?:\s+|(?<=\W)|$)/g); + $desc =~ s/ +$//mg; + if ( $short ) { + $usage .= sprintf(" --%-${maxs}s -%s %s\n", $long, $short, $desc); + } + else { + $usage .= sprintf(" --%-${lcol}s %s\n", $long, $desc); + } + } + } + + $usage .= "\nOption types: s=string, i=integer, f=float, h/H/a/A=comma-separated list, d=DSN, z=size, m=time\n"; + + if ( (my @rules = @{$self->{rules}}) ) { + $usage .= "\nRules:\n\n"; + $usage .= join("\n", map { " $_" } @rules) . "\n"; + } + if ( $self->{DSNParser} ) { + $usage .= "\n" . $self->{DSNParser}->usage(); + } + $usage .= "\nOptions and values after processing arguments:\n\n"; + foreach my $opt ( sort { $a->{long} cmp $b->{long} } @opts ) { + my $val = $opt->{value}; + my $type = $opt->{type} || ''; + my $bool = $opt->{spec} =~ m/^[\w-]+(?:\|[\w-])?!?$/; + $val = $bool ? ( $val ? 'TRUE' : 'FALSE' ) + : !defined $val ? '(No value)' + : $type eq 'd' ? $self->{DSNParser}->as_string($val) + : $type =~ m/H|h/ ? join(',', sort keys %$val) + : $type =~ m/A|a/ ? join(',', @$val) + : $val; + $usage .= sprintf(" --%-${lcol}s %s\n", $opt->{long}, $val); + } + return $usage; +} + +sub prompt_noecho { + shift @_ if ref $_[0] eq __PACKAGE__; + my ( $prompt ) = @_; + local $OUTPUT_AUTOFLUSH = 1; + print $prompt + or die "Cannot print: $OS_ERROR"; + my $response; + eval { + require Term::ReadKey; + Term::ReadKey::ReadMode('noecho'); + chomp($response = ); + Term::ReadKey::ReadMode('normal'); + print "\n" + or die "Cannot print: $OS_ERROR"; + }; + if ( $EVAL_ERROR ) { + die "Cannot read response; is Term::ReadKey installed? $EVAL_ERROR"; + } + return $response; +} + +sub _read_config_file { + my ( $self, $filename ) = @_; + open my $fh, "<", $filename or die "Cannot open $filename: $OS_ERROR\n"; + my @args; + my $prefix = '--'; + my $parse = 1; + + LINE: + while ( my $line = <$fh> ) { + chomp $line; + next LINE if $line =~ m/^\s*(?:\#|\;|$)/; + $line =~ s/\s+#.*$//g; + $line =~ s/^\s+|\s+$//g; + if ( $line eq '--' ) { + $prefix = ''; + $parse = 0; + next LINE; + } + if ( $parse + && (my($opt, $arg) = $line =~ m/^\s*([^=\s]+?)(?:\s*=\s*(.*?)\s*)?$/) + ) { + push @args, grep { defined $_ } ("$prefix$opt", $arg); + } + elsif ( $line =~ m/./ ) { + push @args, $line; + } + else { + die "Syntax error in file $filename at line $INPUT_LINE_NUMBER"; + } + } + close $fh; + return @args; +} + +sub read_para_after { + my ( $self, $file, $regex ) = @_; + open my $fh, "<", $file or die "Can't open $file: $OS_ERROR"; + local $INPUT_RECORD_SEPARATOR = ''; + my $para; + while ( $para = <$fh> ) { + next unless $para =~ m/^=pod$/m; + last; + } + while ( $para = <$fh> ) { + next unless $para =~ m/$regex/; + last; + } + $para = <$fh>; + chomp($para); + close $fh or die "Can't close $file: $OS_ERROR"; + return $para; +} + +sub clone { + my ( $self ) = @_; + + my %clone = map { + my $hashref = $self->{$_}; + my $val_copy = {}; + foreach my $key ( keys %$hashref ) { + my $ref = ref $hashref->{$key}; + $val_copy->{$key} = !$ref ? $hashref->{$key} + : $ref eq 'HASH' ? { %{$hashref->{$key}} } + : $ref eq 'ARRAY' ? [ @{$hashref->{$key}} ] + : $hashref->{$key}; + } + $_ => $val_copy; + } qw(opts short_opts defaults); + + foreach my $scalar ( qw(got_opts) ) { + $clone{$scalar} = $self->{$scalar}; + } + + return bless \%clone; +} + +sub _parse_size { + my ( $self, $opt, $val ) = @_; + + if ( lc($val || '') eq 'null' ) { + PTDEBUG && _d('NULL size for', $opt->{long}); + $opt->{value} = 'null'; + return; + } + + my %factor_for = (k => 1_024, M => 1_048_576, G => 1_073_741_824); + my ($pre, $num, $factor) = $val =~ m/^([+-])?(\d+)([kMG])?$/; + if ( defined $num ) { + if ( $factor ) { + $num *= $factor_for{$factor}; + PTDEBUG && _d('Setting option', $opt->{y}, + 'to num', $num, '* factor', $factor); + } + $opt->{value} = ($pre || '') . $num; + } + else { + $self->save_error("Invalid size for --$opt->{long}: $val"); + } + return; +} + +sub _parse_attribs { + my ( $self, $option, $attribs ) = @_; + my $types = $self->{types}; + return $option + . ($attribs->{'short form'} ? '|' . $attribs->{'short form'} : '' ) + . ($attribs->{'negatable'} ? '!' : '' ) + . ($attribs->{'cumulative'} ? '+' : '' ) + . ($attribs->{'type'} ? '=' . $types->{$attribs->{type}} : '' ); +} + +sub _parse_synopsis { + my ( $self, $file ) = @_; + $file ||= $self->{file} || __FILE__; + PTDEBUG && _d("Parsing SYNOPSIS in", $file); + + local $INPUT_RECORD_SEPARATOR = ''; # read paragraphs + open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR"; + my $para; + 1 while defined($para = <$fh>) && $para !~ m/^=head1 SYNOPSIS/; + die "$file does not contain a SYNOPSIS section" unless $para; + my @synop; + for ( 1..2 ) { # 1 for the usage, 2 for the description + my $para = <$fh>; + push @synop, $para; + } + close $fh; + PTDEBUG && _d("Raw SYNOPSIS text:", @synop); + my ($usage, $desc) = @synop; + die "The SYNOPSIS section in $file is not formatted properly" + unless $usage && $desc; + + $usage =~ s/^\s*Usage:\s+(.+)/$1/; + chomp $usage; + + $desc =~ s/\n/ /g; + $desc =~ s/\s{2,}/ /g; + $desc =~ s/\. ([A-Z][a-z])/. $1/g; + $desc =~ s/\s+$//; + + return ( + description => $desc, + usage => $usage, + ); +}; + +sub set_vars { + my ($self, $file) = @_; + $file ||= $self->{file} || __FILE__; + + my %user_vars; + my $user_vars = $self->has('set-vars') ? $self->get('set-vars') : undef; + if ( $user_vars ) { + foreach my $var_val ( @$user_vars ) { + my ($var, $val) = $var_val =~ m/([^\s=]+)=(\S+)/; + die "Invalid --set-vars value: $var_val\n" unless $var && $val; + $user_vars{$var} = { + val => $val, + default => 0, + }; + } + } + + my %default_vars; + my $default_vars = $self->read_para_after($file, qr/MAGIC_set_vars/); + if ( $default_vars ) { + %default_vars = map { + my $var_val = $_; + my ($var, $val) = $var_val =~ m/([^\s=]+)=(\S+)/; + die "Invalid --set-vars value: $var_val\n" unless $var && $val; + $var => { + val => $val, + default => 1, + }; + } split("\n", $default_vars); + } + + my %vars = ( + %default_vars, # first the tool's defaults + %user_vars, # then the user's which overwrite the defaults + ); + PTDEBUG && _d('--set-vars:', Dumper(\%vars)); + return \%vars; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +if ( PTDEBUG ) { + print '# ', $^X, ' ', $], "\n"; + if ( my $uname = `uname -a` ) { + $uname =~ s/\s+/ /g; + print "# $uname\n"; + } + print '# Arguments: ', + join(' ', map { my $a = "_[$_]_"; $a =~ s/\n/\n# /g; $a; } @ARGV), "\n"; +} + +1; +} +# ########################################################################### +# End OptionParser package +# ########################################################################### + +# ########################################################################### +# Cxn package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Cxn.pm +# t/lib/Cxn.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Cxn; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Scalar::Util qw(blessed); +use constant { + PTDEBUG => $ENV{PTDEBUG} || 0, + PERCONA_TOOLKIT_TEST_USE_DSN_NAMES => $ENV{PERCONA_TOOLKIT_TEST_USE_DSN_NAMES} || 0, +}; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(DSNParser OptionParser); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my ($dp, $o) = @args{@required_args}; + + my $dsn_defaults = $dp->parse_options($o); + my $prev_dsn = $args{prev_dsn}; + my $dsn = $args{dsn}; + if ( !$dsn ) { + $args{dsn_string} ||= 'h=' . ($dsn_defaults->{h} || 'localhost'); + + $dsn = $dp->parse( + $args{dsn_string}, $prev_dsn, $dsn_defaults); + } + elsif ( $prev_dsn ) { + $dsn = $dp->copy($prev_dsn, $dsn); + } + + my $dsn_name = $dp->as_string($dsn, [qw(h P S)]) + || $dp->as_string($dsn, [qw(F)]) + || ''; + + my $self = { + dsn => $dsn, + dbh => $args{dbh}, + dsn_name => $dsn_name, + hostname => '', + set => $args{set}, + NAME_lc => defined($args{NAME_lc}) ? $args{NAME_lc} : 1, + dbh_set => 0, + ask_pass => $args{ask_pass}, + DSNParser => $dp, + is_cluster_node => undef, + parent => $args{parent}, + }; + + return bless $self, $class; +} + +sub connect { + my ( $self, %opts ) = @_; + my $dsn = $self->{dsn}; + my $dp = $self->{DSNParser}; + + my $dbh = $self->{dbh}; + if ( !$dbh || !$dbh->ping() ) { + if ( $self->{ask_pass} && !$self->{asked_for_pass} ) { + $dsn->{p} = OptionParser::prompt_noecho("Enter MySQL password: "); + $self->{asked_for_pass} = 1; + } + $dbh = $dp->get_dbh( + $dp->get_cxn_params($dsn), + { + AutoCommit => 1, + %opts, + }, + ); + } + + $dbh = $self->set_dbh($dbh); + PTDEBUG && _d($dbh, 'Connected dbh to', $self->{hostname},$self->{dsn_name}); + return $dbh; +} + +sub set_dbh { + my ($self, $dbh) = @_; + + if ( $self->{dbh} && $self->{dbh} == $dbh && $self->{dbh_set} ) { + PTDEBUG && _d($dbh, 'Already set dbh'); + return $dbh; + } + + PTDEBUG && _d($dbh, 'Setting dbh'); + + $dbh->{FetchHashKeyName} = 'NAME_lc' if $self->{NAME_lc}; + + my $sql = 'SELECT @@hostname, @@server_id'; + PTDEBUG && _d($dbh, $sql); + my ($hostname, $server_id) = $dbh->selectrow_array($sql); + PTDEBUG && _d($dbh, 'hostname:', $hostname, $server_id); + if ( $hostname ) { + $self->{hostname} = $hostname; + } + + if ( $self->{parent} ) { + PTDEBUG && _d($dbh, 'Setting InactiveDestroy=1 in parent'); + $dbh->{InactiveDestroy} = 1; + } + + if ( my $set = $self->{set}) { + $set->($dbh); + } + + $self->{dbh} = $dbh; + $self->{dbh_set} = 1; + return $dbh; +} + +sub lost_connection { + my ($self, $e) = @_; + return 0 unless $e; + return $e =~ m/MySQL server has gone away/ + || $e =~ m/Lost connection to MySQL server/; +} + +sub dbh { + my ($self) = @_; + return $self->{dbh}; +} + +sub dsn { + my ($self) = @_; + return $self->{dsn}; +} + +sub name { + my ($self) = @_; + return $self->{dsn_name} if PERCONA_TOOLKIT_TEST_USE_DSN_NAMES; + return $self->{hostname} || $self->{dsn_name} || 'unknown host'; +} + +sub DESTROY { + my ($self) = @_; + + PTDEBUG && _d('Destroying cxn'); + + if ( $self->{parent} ) { + PTDEBUG && _d($self->{dbh}, 'Not disconnecting dbh in parent'); + } + elsif ( $self->{dbh} + && blessed($self->{dbh}) + && $self->{dbh}->can("disconnect") ) + { + PTDEBUG && _d($self->{dbh}, 'Disconnecting dbh on', $self->{hostname}, + $self->{dsn_name}); + $self->{dbh}->disconnect(); + } + + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Cxn package +# ########################################################################### + +# ########################################################################### +# Quoter package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Quoter.pm +# t/lib/Quoter.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Quoter; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +sub new { + my ( $class, %args ) = @_; + return bless {}, $class; +} + +sub quote { + my ( $self, @vals ) = @_; + foreach my $val ( @vals ) { + $val =~ s/`/``/g; + } + return join('.', map { '`' . $_ . '`' } @vals); +} + +sub quote_val { + my ( $self, $val, %args ) = @_; + + return 'NULL' unless defined $val; # undef = NULL + return "''" if $val eq ''; # blank string = '' + return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data + && !$args{is_char}; # unless is_char is true + + $val =~ s/(['\\])/\\$1/g; + return "'$val'"; +} + +sub split_unquote { + my ( $self, $db_tbl, $default_db ) = @_; + my ( $db, $tbl ) = split(/[.]/, $db_tbl); + if ( !$tbl ) { + $tbl = $db; + $db = $default_db; + } + for ($db, $tbl) { + next unless $_; + s/\A`//; + s/`\z//; + s/``/`/g; + } + + return ($db, $tbl); +} + +sub literal_like { + my ( $self, $like ) = @_; + return unless $like; + $like =~ s/([%_])/\\$1/g; + return "'$like'"; +} + +sub join_quote { + my ( $self, $default_db, $db_tbl ) = @_; + return unless $db_tbl; + my ($db, $tbl) = split(/[.]/, $db_tbl); + if ( !$tbl ) { + $tbl = $db; + $db = $default_db; + } + $db = "`$db`" if $db && $db !~ m/^`/; + $tbl = "`$tbl`" if $tbl && $tbl !~ m/^`/; + return $db ? "$db.$tbl" : $tbl; +} + +sub serialize_list { + my ( $self, @args ) = @_; + PTDEBUG && _d('Serializing', Dumper(\@args)); + return unless @args; + + my @parts; + foreach my $arg ( @args ) { + if ( defined $arg ) { + $arg =~ s/,/\\,/g; # escape commas + $arg =~ s/\\N/\\\\N/g; # escape literal \N + push @parts, $arg; + } + else { + push @parts, '\N'; + } + } + + my $string = join(',', @parts); + PTDEBUG && _d('Serialized: <', $string, '>'); + return $string; +} + +sub deserialize_list { + my ( $self, $string ) = @_; + PTDEBUG && _d('Deserializing <', $string, '>'); + die "Cannot deserialize an undefined string" unless defined $string; + + my @parts; + foreach my $arg ( split(/(? $ENV{PTDEBUG} || 0; + +use overload ( + '""' => "version", + '<=>' => "cmp", + 'cmp' => "cmp", + fallback => 1, +); + +use Carp (); + +has major => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has [qw( minor revision )] => ( + is => 'ro', + isa => 'Num', +); + +has flavor => ( + is => 'ro', + isa => 'Str', + default => sub { 'Unknown' }, +); + +has innodb_version => ( + is => 'ro', + isa => 'Str', + default => sub { 'NO' }, +); + +sub series { + my $self = shift; + return $self->_join_version($self->major, $self->minor); +} + +sub version { + my $self = shift; + return $self->_join_version($self->major, $self->minor, $self->revision); +} + +sub is_in { + my ($self, $target) = @_; + + return $self eq $target; +} + +sub _join_version { + my ($self, @parts) = @_; + + return join ".", map { my $c = $_; $c =~ s/^0\./0/; $c } grep defined, @parts; +} +sub _split_version { + my ($self, $str) = @_; + my @version_parts = map { s/^0(?=\d)/0./; $_ } $str =~ m/(\d+)/g; + return @version_parts[0..2]; +} + +sub normalized_version { + my ( $self ) = @_; + my $result = sprintf('%d%02d%02d', map { $_ || 0 } $self->major, + $self->minor, + $self->revision); + PTDEBUG && _d($self->version, 'normalizes to', $result); + return $result; +} + +sub comment { + my ( $self, $cmd ) = @_; + my $v = $self->normalized_version(); + + return "/*!$v $cmd */" +} + +my @methods = qw(major minor revision); +sub cmp { + my ($left, $right) = @_; + my $right_obj = (blessed($right) && $right->isa(ref($left))) + ? $right + : ref($left)->new($right); + + my $retval = 0; + for my $m ( @methods ) { + last unless defined($left->$m) && defined($right_obj->$m); + $retval = $left->$m <=> $right_obj->$m; + last if $retval; + } + return $retval; +} + +sub BUILDARGS { + my $self = shift; + + if ( @_ == 1 ) { + my %args; + if ( blessed($_[0]) && $_[0]->can("selectrow_hashref") ) { + PTDEBUG && _d("VersionParser got a dbh, trying to get the version"); + my $dbh = $_[0]; + local $dbh->{FetchHashKeyName} = 'NAME_lc'; + my $query = eval { + $dbh->selectall_arrayref(q/SHOW VARIABLES LIKE 'version%'/, { Slice => {} }) + }; + if ( $query ) { + $query = { map { $_->{variable_name} => $_->{value} } @$query }; + @args{@methods} = $self->_split_version($query->{version}); + $args{flavor} = delete $query->{version_comment} + if $query->{version_comment}; + } + elsif ( eval { ($query) = $dbh->selectrow_array(q/SELECT VERSION()/) } ) { + @args{@methods} = $self->_split_version($query); + } + else { + Carp::confess("Couldn't get the version from the dbh while " + . "creating a VersionParser object: $@"); + } + $args{innodb_version} = eval { $self->_innodb_version($dbh) }; + } + elsif ( !ref($_[0]) ) { + @args{@methods} = $self->_split_version($_[0]); + } + + for my $method (@methods) { + delete $args{$method} unless defined $args{$method}; + } + @_ = %args if %args; + } + + return $self->SUPER::BUILDARGS(@_); +} + +sub _innodb_version { + my ( $self, $dbh ) = @_; + return unless $dbh; + my $innodb_version = "NO"; + + my ($innodb) = + grep { $_->{engine} =~ m/InnoDB/i } + map { + my %hash; + @hash{ map { lc $_ } keys %$_ } = values %$_; + \%hash; + } + @{ $dbh->selectall_arrayref("SHOW ENGINES", {Slice=>{}}) }; + if ( $innodb ) { + PTDEBUG && _d("InnoDB support:", $innodb->{support}); + if ( $innodb->{support} =~ m/YES|DEFAULT/i ) { + my $vars = $dbh->selectrow_hashref( + "SHOW VARIABLES LIKE 'innodb_version'"); + $innodb_version = !$vars ? "BUILTIN" + : ($vars->{Value} || $vars->{value}); + } + else { + $innodb_version = $innodb->{support}; # probably DISABLED or NO + } + } + + PTDEBUG && _d("InnoDB version:", $innodb_version); + return $innodb_version; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +no Lmo; +1; +} +# ########################################################################### +# End VersionParser package +# ########################################################################### + +# ########################################################################### +# Daemon package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Daemon.pm +# t/lib/Daemon.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Daemon; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use POSIX qw(setsid); +use Fcntl qw(:DEFAULT); + +sub new { + my ($class, %args) = @_; + my $self = { + log_file => $args{log_file}, + pid_file => $args{pid_file}, + daemonize => $args{daemonize}, + force_log_file => $args{force_log_file}, + parent_exit => $args{parent_exit}, + pid_file_owner => 0, + }; + return bless $self, $class; +} + +sub run { + my ($self) = @_; + + my $daemonize = $self->{daemonize}; + my $pid_file = $self->{pid_file}; + my $log_file = $self->{log_file}; + my $force_log_file = $self->{force_log_file}; + my $parent_exit = $self->{parent_exit}; + + PTDEBUG && _d('Starting daemon'); + + if ( $pid_file ) { + eval { + $self->_make_pid_file( + pid => $PID, # parent's pid + pid_file => $pid_file, + ); + }; + die "$EVAL_ERROR\n" if $EVAL_ERROR; + if ( !$daemonize ) { + $self->{pid_file_owner} = $PID; # parent's pid + } + } + + if ( $daemonize ) { + defined (my $child_pid = fork()) or die "Cannot fork: $OS_ERROR"; + if ( $child_pid ) { + PTDEBUG && _d('Forked child', $child_pid); + $parent_exit->($child_pid) if $parent_exit; + exit 0; + } + + POSIX::setsid() or die "Cannot start a new session: $OS_ERROR"; + chdir '/' or die "Cannot chdir to /: $OS_ERROR"; + + if ( $pid_file ) { + $self->_update_pid_file( + pid => $PID, # child's pid + pid_file => $pid_file, + ); + $self->{pid_file_owner} = $PID; + } + } + + if ( $daemonize || $force_log_file ) { + PTDEBUG && _d('Redirecting STDIN to /dev/null'); + close STDIN; + open STDIN, '/dev/null' + or die "Cannot reopen STDIN to /dev/null: $OS_ERROR"; + if ( $log_file ) { + PTDEBUG && _d('Redirecting STDOUT and STDERR to', $log_file); + close STDOUT; + open STDOUT, '>>', $log_file + or die "Cannot open log file $log_file: $OS_ERROR"; + + close STDERR; + open STDERR, ">&STDOUT" + or die "Cannot dupe STDERR to STDOUT: $OS_ERROR"; + } + else { + if ( -t STDOUT ) { + PTDEBUG && _d('No log file and STDOUT is a terminal;', + 'redirecting to /dev/null'); + close STDOUT; + open STDOUT, '>', '/dev/null' + or die "Cannot reopen STDOUT to /dev/null: $OS_ERROR"; + } + if ( -t STDERR ) { + PTDEBUG && _d('No log file and STDERR is a terminal;', + 'redirecting to /dev/null'); + close STDERR; + open STDERR, '>', '/dev/null' + or die "Cannot reopen STDERR to /dev/null: $OS_ERROR"; + } + } + + $OUTPUT_AUTOFLUSH = 1; + } + + PTDEBUG && _d('Daemon running'); + return; +} + +sub _make_pid_file { + my ($self, %args) = @_; + my @required_args = qw(pid pid_file); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my $pid = $args{pid}; + my $pid_file = $args{pid_file}; + + eval { + sysopen(PID_FH, $pid_file, O_RDWR|O_CREAT|O_EXCL) or die $OS_ERROR; + print PID_FH $PID, "\n"; + close PID_FH; + }; + if ( my $e = $EVAL_ERROR ) { + if ( $e =~ m/file exists/i ) { + my $old_pid = $self->_check_pid_file( + pid_file => $pid_file, + pid => $PID, + ); + if ( $old_pid ) { + warn "Overwriting PID file $pid_file because PID $old_pid " + . "is not running.\n"; + } + $self->_update_pid_file( + pid => $PID, + pid_file => $pid_file + ); + } + else { + die "Error creating PID file $pid_file: $e\n"; + } + } + + return; +} + +sub _check_pid_file { + my ($self, %args) = @_; + my @required_args = qw(pid_file pid); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my $pid_file = $args{pid_file}; + my $pid = $args{pid}; + + PTDEBUG && _d('Checking if PID in', $pid_file, 'is running'); + + if ( ! -f $pid_file ) { + PTDEBUG && _d('PID file', $pid_file, 'does not exist'); + return; + } + + open my $fh, '<', $pid_file + or die "Error opening $pid_file: $OS_ERROR"; + my $existing_pid = do { local $/; <$fh> }; + chomp($existing_pid) if $existing_pid; + close $fh + or die "Error closing $pid_file: $OS_ERROR"; + + if ( $existing_pid ) { + if ( $existing_pid == $pid ) { + warn "The current PID $pid already holds the PID file $pid_file\n"; + return; + } + else { + PTDEBUG && _d('Checking if PID', $existing_pid, 'is running'); + my $pid_is_alive = kill 0, $existing_pid; + if ( $pid_is_alive ) { + die "PID file $pid_file exists and PID $existing_pid is running\n"; + } + } + } + else { + die "PID file $pid_file exists but it is empty. Remove the file " + . "if the process is no longer running.\n"; + } + + return $existing_pid; +} + +sub _update_pid_file { + my ($self, %args) = @_; + my @required_args = qw(pid pid_file); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my $pid = $args{pid}; + my $pid_file = $args{pid_file}; + + open my $fh, '>', $pid_file + or die "Cannot open $pid_file: $OS_ERROR"; + print { $fh } $pid, "\n" + or die "Cannot print to $pid_file: $OS_ERROR"; + close $fh + or warn "Cannot close $pid_file: $OS_ERROR"; + + return; +} + +sub remove_pid_file { + my ($self, $pid_file) = @_; + $pid_file ||= $self->{pid_file}; + if ( $pid_file && -f $pid_file ) { + unlink $self->{pid_file} + or warn "Cannot remove PID file $pid_file: $OS_ERROR"; + PTDEBUG && _d('Removed PID file'); + } + else { + PTDEBUG && _d('No PID to remove'); + } + return; +} + +sub DESTROY { + my ($self) = @_; + + if ( $self->{pid_file_owner} == $PID ) { + $self->remove_pid_file(); + } + + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Daemon package +# ########################################################################### + +# ########################################################################### +# Transformers package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Transformers.pm +# t/lib/Transformers.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Transformers; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Time::Local qw(timegm timelocal); +use Digest::MD5 qw(md5_hex); +use B qw(); + +BEGIN { + require Exporter; + our @ISA = qw(Exporter); + our %EXPORT_TAGS = (); + our @EXPORT = (); + our @EXPORT_OK = qw( + micro_t + percentage_of + secs_to_time + time_to_secs + shorten + ts + parse_timestamp + unix_timestamp + any_unix_timestamp + make_checksum + crc32 + encode_json + ); +} + +our $mysql_ts = qr/(\d\d)(\d\d)(\d\d) +(\d+):(\d+):(\d+)(\.\d+)?/; +our $proper_ts = qr/(\d\d\d\d)-(\d\d)-(\d\d)[T ](\d\d):(\d\d):(\d\d)(\.\d+)?/; +our $n_ts = qr/(\d{1,5})([shmd]?)/; # Limit \d{1,5} because \d{6} looks + +sub micro_t { + my ( $t, %args ) = @_; + my $p_ms = defined $args{p_ms} ? $args{p_ms} : 0; # precision for ms vals + my $p_s = defined $args{p_s} ? $args{p_s} : 0; # precision for s vals + my $f; + + $t = 0 if $t < 0; + + $t = sprintf('%.17f', $t) if $t =~ /e/; + + $t =~ s/\.(\d{1,6})\d*/\.$1/; + + if ($t > 0 && $t <= 0.000999) { + $f = ($t * 1000000) . 'us'; + } + elsif ($t >= 0.001000 && $t <= 0.999999) { + $f = sprintf("%.${p_ms}f", $t * 1000); + $f = ($f * 1) . 'ms'; # * 1 to remove insignificant zeros + } + elsif ($t >= 1) { + $f = sprintf("%.${p_s}f", $t); + $f = ($f * 1) . 's'; # * 1 to remove insignificant zeros + } + else { + $f = 0; # $t should = 0 at this point + } + + return $f; +} + +sub percentage_of { + my ( $is, $of, %args ) = @_; + my $p = $args{p} || 0; # float precision + my $fmt = $p ? "%.${p}f" : "%d"; + return sprintf $fmt, ($is * 100) / ($of ||= 1); +} + +sub secs_to_time { + my ( $secs, $fmt ) = @_; + $secs ||= 0; + return '00:00' unless $secs; + + $fmt ||= $secs >= 86_400 ? 'd' + : $secs >= 3_600 ? 'h' + : 'm'; + + return + $fmt eq 'd' ? sprintf( + "%d+%02d:%02d:%02d", + int($secs / 86_400), + int(($secs % 86_400) / 3_600), + int(($secs % 3_600) / 60), + $secs % 60) + : $fmt eq 'h' ? sprintf( + "%02d:%02d:%02d", + int(($secs % 86_400) / 3_600), + int(($secs % 3_600) / 60), + $secs % 60) + : sprintf( + "%02d:%02d", + int(($secs % 3_600) / 60), + $secs % 60); +} + +sub time_to_secs { + my ( $val, $default_suffix ) = @_; + die "I need a val argument" unless defined $val; + my $t = 0; + my ( $prefix, $num, $suffix ) = $val =~ m/([+-]?)(\d+)([a-z])?$/; + $suffix = $suffix || $default_suffix || 's'; + if ( $suffix =~ m/[smhd]/ ) { + $t = $suffix eq 's' ? $num * 1 # Seconds + : $suffix eq 'm' ? $num * 60 # Minutes + : $suffix eq 'h' ? $num * 3600 # Hours + : $num * 86400; # Days + + $t *= -1 if $prefix && $prefix eq '-'; + } + else { + die "Invalid suffix for $val: $suffix"; + } + return $t; +} + +sub shorten { + my ( $num, %args ) = @_; + my $p = defined $args{p} ? $args{p} : 2; # float precision + my $d = defined $args{d} ? $args{d} : 1_024; # divisor + my $n = 0; + my @units = ('', qw(k M G T P E Z Y)); + while ( $num >= $d && $n < @units - 1 ) { + $num /= $d; + ++$n; + } + return sprintf( + $num =~ m/\./ || $n + ? "%.${p}f%s" + : '%d', + $num, $units[$n]); +} + +sub ts { + my ( $time, $gmt ) = @_; + my ( $sec, $min, $hour, $mday, $mon, $year ) + = $gmt ? gmtime($time) : localtime($time); + $mon += 1; + $year += 1900; + my $val = sprintf("%d-%02d-%02dT%02d:%02d:%02d", + $year, $mon, $mday, $hour, $min, $sec); + if ( my ($us) = $time =~ m/(\.\d+)$/ ) { + $us = sprintf("%.6f", $us); + $us =~ s/^0\././; + $val .= $us; + } + return $val; +} + +sub parse_timestamp { + my ( $val ) = @_; + if ( my($y, $m, $d, $h, $i, $s, $f) + = $val =~ m/^$mysql_ts$/ ) + { + return sprintf "%d-%02d-%02d %02d:%02d:" + . (defined $f ? '%09.6f' : '%02d'), + $y + 2000, $m, $d, $h, $i, (defined $f ? $s + $f : $s); + } + elsif ( $val =~ m/^$proper_ts$/ ) { + return $val; + } + return $val; +} + +sub unix_timestamp { + my ( $val, $gmt ) = @_; + if ( my($y, $m, $d, $h, $i, $s, $us) = $val =~ m/^$proper_ts$/ ) { + $val = $gmt + ? timegm($s, $i, $h, $d, $m - 1, $y) + : timelocal($s, $i, $h, $d, $m - 1, $y); + if ( defined $us ) { + $us = sprintf('%.6f', $us); + $us =~ s/^0\././; + $val .= $us; + } + } + return $val; +} + +sub any_unix_timestamp { + my ( $val, $callback ) = @_; + + if ( my ($n, $suffix) = $val =~ m/^$n_ts$/ ) { + $n = $suffix eq 's' ? $n # Seconds + : $suffix eq 'm' ? $n * 60 # Minutes + : $suffix eq 'h' ? $n * 3600 # Hours + : $suffix eq 'd' ? $n * 86400 # Days + : $n; # default: Seconds + PTDEBUG && _d('ts is now - N[shmd]:', $n); + return time - $n; + } + elsif ( $val =~ m/^\d{9,}/ ) { + PTDEBUG && _d('ts is already a unix timestamp'); + return $val; + } + elsif ( my ($ymd, $hms) = $val =~ m/^(\d{6})(?:\s+(\d+:\d+:\d+))?/ ) { + PTDEBUG && _d('ts is MySQL slow log timestamp'); + $val .= ' 00:00:00' unless $hms; + return unix_timestamp(parse_timestamp($val)); + } + elsif ( ($ymd, $hms) = $val =~ m/^(\d{4}-\d\d-\d\d)(?:[T ](\d+:\d+:\d+))?/) { + PTDEBUG && _d('ts is properly formatted timestamp'); + $val .= ' 00:00:00' unless $hms; + return unix_timestamp($val); + } + else { + PTDEBUG && _d('ts is MySQL expression'); + return $callback->($val) if $callback && ref $callback eq 'CODE'; + } + + PTDEBUG && _d('Unknown ts type:', $val); + return; +} + +sub make_checksum { + my ( $val ) = @_; + my $checksum = uc substr(md5_hex($val), -16); + PTDEBUG && _d($checksum, 'checksum for', $val); + return $checksum; +} + +sub crc32 { + my ( $string ) = @_; + return unless $string; + my $poly = 0xEDB88320; + my $crc = 0xFFFFFFFF; + foreach my $char ( split(//, $string) ) { + my $comp = ($crc ^ ord($char)) & 0xFF; + for ( 1 .. 8 ) { + $comp = $comp & 1 ? $poly ^ ($comp >> 1) : $comp >> 1; + } + $crc = (($crc >> 8) & 0x00FFFFFF) ^ $comp; + } + return $crc ^ 0xFFFFFFFF; +} + +my $got_json = eval { require JSON }; +sub encode_json { + return JSON::encode_json(@_) if $got_json; + my ( $data ) = @_; + return (object_to_json($data) || ''); +} + + +sub object_to_json { + my ($obj) = @_; + my $type = ref($obj); + + if($type eq 'HASH'){ + return hash_to_json($obj); + } + elsif($type eq 'ARRAY'){ + return array_to_json($obj); + } + else { + return value_to_json($obj); + } +} + +sub hash_to_json { + my ($obj) = @_; + my @res; + for my $k ( sort { $a cmp $b } keys %$obj ) { + push @res, string_to_json( $k ) + . ":" + . ( object_to_json( $obj->{$k} ) || value_to_json( $obj->{$k} ) ); + } + return '{' . ( @res ? join( ",", @res ) : '' ) . '}'; +} + +sub array_to_json { + my ($obj) = @_; + my @res; + + for my $v (@$obj) { + push @res, object_to_json($v) || value_to_json($v); + } + + return '[' . ( @res ? join( ",", @res ) : '' ) . ']'; +} + +sub value_to_json { + my ($value) = @_; + + return 'null' if(!defined $value); + + my $b_obj = B::svref_2object(\$value); # for round trip problem + my $flags = $b_obj->FLAGS; + return $value # as is + if $flags & ( B::SVp_IOK | B::SVp_NOK ) and !( $flags & B::SVp_POK ); # SvTYPE is IV or NV? + + my $type = ref($value); + + if( !$type ) { + return string_to_json($value); + } + else { + return 'null'; + } + +} + +my %esc = ( + "\n" => '\n', + "\r" => '\r', + "\t" => '\t', + "\f" => '\f', + "\b" => '\b', + "\"" => '\"', + "\\" => '\\\\', + "\'" => '\\\'', +); + +sub string_to_json { + my ($arg) = @_; + + $arg =~ s/([\x22\x5c\n\r\t\f\b])/$esc{$1}/g; + $arg =~ s/\//\\\//g; + $arg =~ s/([\x00-\x08\x0b\x0e-\x1f])/'\\u00' . unpack('H2', $1)/eg; + + utf8::upgrade($arg); + utf8::encode($arg); + + return '"' . $arg . '"'; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Transformers package +# ########################################################################### + +# ########################################################################### +# Safeguards package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Safeguards.pm +# t/lib/Safeguards.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Safeguards; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +sub new { + my ($class, %args) = @_; + my $self = { + disk_bytes_free => $args{disk_bytes_free} || 104857600, # 100 MiB + disk_pct_free => $args{disk_pct_free} || 5, + }; + return bless $self, $class; +} + +sub get_disk_space { + my ($self, %args) = @_; + my $filesystem = $args{filesystem} || $ENV{PWD}; + + my $disk_space = `df -P -k "$filesystem"`; + chop($disk_space) if $disk_space; + PTDEBUG && _d('Disk space on', $filesystem, $disk_space); + + return $disk_space; +} + +sub check_disk_space() { + my ($self, %args) = @_; + my $disk_space = $args{disk_space}; + PTDEBUG && _d("Checking disk space:\n", $disk_space); + + my ($partition) = $disk_space =~ m/^\s*(\/.+)/m; + PTDEBUG && _d('Partition:', $partition); + die "Failed to parse partition from disk space:\n$disk_space" + unless $partition; + + my (undef, undef, $bytes_used, $bytes_free, $pct_used, undef) + = $partition =~ m/(\S+)/g; + PTDEBUG && _d('Bytes used:', $bytes_used, 'free:', $bytes_free, + 'Percentage used:', $pct_used); + + $bytes_used = ($bytes_used || 0) * 1024; + $bytes_free = ($bytes_free || 0) * 1024; + + $pct_used =~ s/%//; + my $pct_free = 100 - ($pct_used || 0); + + return $bytes_free >= $self->{disk_bytes_free} + && $pct_free >= $self->{disk_pct_free}; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Safeguards package +# ########################################################################### + +# ########################################################################### +# Percona::Agent::Logger package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Percona/Agent/Logger.pm +# t/lib/Percona/Agent/Logger.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Percona::Agent::Logger; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use POSIX qw(SIGALRM); + +use Lmo; +use Transformers; +use Percona::WebAPI::Resource::LogEntry; + +Transformers->import(qw(ts)); + +has 'exit_status' => ( + is => 'rw', + isa => 'ScalarRef', + required => 1, +); + +has 'pid' => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has 'service' => ( + is => 'rw', + isa => 'Maybe[Str]', + required => 0, + default => sub { return; }, +); + +has 'data_ts' => ( + is => 'rw', + isa => 'Maybe[Int]', + required => 0, + default => sub { return; }, +); + +has 'online_logging' => ( + is => 'ro', + isa => 'Bool', + required => 0, + default => sub { return 1 }, +); + +has 'online_logging_enabled' => ( + is => 'rw', + isa => 'Bool', + required => 0, + default => sub { return 0 }, +); + +has 'quiet' => ( + is => 'rw', + isa => 'Int', + required => 0, + default => sub { return 0 }, +); + +has '_buffer' => ( + is => 'rw', + isa => 'ArrayRef', + required => 0, + default => sub { return []; }, +); + +has '_pipe_write' => ( + is => 'rw', + isa => 'Maybe[FileHandle]', + required => 0, +); + +sub read_stdin { + my ( $t ) = @_; + + POSIX::sigaction( + SIGALRM, + POSIX::SigAction->new(sub { die 'read timeout'; }), + ) or die "Error setting SIGALRM handler: $OS_ERROR"; + + my $timeout = 0; + my @lines; + eval { + alarm $t; + while(defined(my $line = )) { + push @lines, $line; + } + alarm 0; + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Read error:', $EVAL_ERROR); + die $EVAL_ERROR unless $EVAL_ERROR =~ m/read timeout/; + $timeout = 1; + } + return unless scalar @lines || $timeout; + return \@lines; +} + +sub start_online_logging { + my ($self, %args) = @_; + my $client = $args{client}; + my $log_link = $args{log_link}; + my $read_timeout = $args{read_timeout} || 3; + + return unless $self->online_logging; + + $self->info("Starting online logging. No more log entries will be printed here. " + . "Agent logs are accessible through the web interface."); + + my $pid = open(my $pipe_write, "|-"); + + if ($pid) { + select $pipe_write; + $OUTPUT_AUTOFLUSH = 1; + $self->_pipe_write($pipe_write); + $self->online_logging_enabled(1); + } + else { + my @log_entries; + my $n_errors = 0; + my $oktorun = 1; + QUEUE: + while ($oktorun) { + my $lines = read_stdin($read_timeout); + last QUEUE unless $lines; + LINE: + while ( defined(my $line = shift @$lines) ) { + my ($ts, $level, $n_lines, $msg) = $line =~ m/^([^,]+),([^,]+),([^,]+),(.+)/s; + if ( !$ts || !$level || !$n_lines || !$msg ) { + warn "$line\n"; + next LINE; + } + if ( $n_lines > 1 ) { + $n_lines--; # first line + for ( 1..$n_lines ) { + $msg .= shift @$lines; + } + } + + push @log_entries, Percona::WebAPI::Resource::LogEntry->new( + pid => $self->pid, + entry_ts => $ts, + log_level => $level, + message => $msg, + ($self->service ? (service => $self->service) : ()), + ($self->data_ts ? (data_ts => $self->data_ts) : ()), + ); + } # LINE + + if ( scalar @log_entries ) { + eval { + $client->post( + link => $log_link, + resources => \@log_entries, + ); + }; + if ( my $e = $EVAL_ERROR ) { + if ( ++$n_errors <= 10 ) { + warn "Error sending log entry to API: $e"; + if ( $n_errors == 10 ) { + my $ts = ts(time, 1); # 1=UTC + warn "$ts WARNING $n_errors consecutive errors, no more " + . "error messages will be printed until log entries " + . "are sent successfully again.\n"; + } + } + } + else { + @log_entries = (); + $n_errors = 0; + } + } # have log entries + + my $n_log_entries = scalar @log_entries; + if ( $n_log_entries > 1_000 ) { + warn "$n_log_entries log entries in send buffer, " + . "removing first 100 to avoid excessive usage.\n"; + @log_entries = @log_entries[100..($n_log_entries-1)]; + } + } # QUEUE + + if ( scalar @log_entries ) { + my $ts = ts(time, 1); # 1=UTC + warn "$ts WARNING Failed to send these log entries " + . "(timestamps are UTC):\n"; + foreach my $log ( @log_entries ) { + warn sprintf("%s %s %s\n", + $log->entry_ts, + level_name($log->log_level), + $log->message, + ); + } + } + + exit 0; + } # child + + return; +} + +sub level_number { + my $name = shift; + die "No log level name given" unless $name; + my $number = $name eq 'DEBUG' ? 1 + : $name eq 'INFO' ? 2 + : $name eq 'WARNING' ? 3 + : $name eq 'ERROR' ? 4 + : $name eq 'FATAL' ? 5 + : die "Invalid log level name: $name"; +} + +sub level_name { + my $number = shift; + die "No log level name given" unless $number; + my $name = $number == 1 ? 'DEBUG' + : $number == 2 ? 'INFO' + : $number == 3 ? 'WARNING' + : $number == 4 ? 'ERROR' + : $number == 5 ? 'FATAL' + : die "Invalid log level number: $number"; +} + +sub debug { + my $self = shift; + return $self->_log('DEBUG', @_); +} + +sub info { + my $self = shift; + return $self->_log('INFO', @_); +} + +sub warning { + my $self = shift; + $self->_set_exit_status(); + return $self->_log('WARNING', @_); +} + +sub error { + my $self = shift; + $self->_set_exit_status(); + return $self->_log('ERROR', @_); +} + +sub fatal { + my $self = shift; + $self->_set_exit_status(); + $self->_log('FATAL', @_); + exit $self->exit_status; +} + +sub _set_exit_status { + my $self = shift; + my $exit_status = $self->exit_status; # get ref + $$exit_status |= 1; # deref to set + $self->exit_status($exit_status); # save back ref + return; +} + +sub _log { + my ($self, $level, $msg) = @_; + + my $ts = ts(time, 1); # 1=UTC + my $level_number = level_number($level); + + return if $self->quiet && $level_number < $self->quiet; + + chomp($msg); + my $n_lines = 1; + $n_lines++ while $msg =~ m/\n/g; + + if ( $self->online_logging_enabled ) { + while ( defined(my $log_entry = shift @{$self->_buffer}) ) { + $self->_queue_log_entry(@$log_entry); + } + $self->_queue_log_entry($ts, $level_number, $n_lines, $msg); + } + else { + if ( $self->online_logging ) { + push @{$self->_buffer}, [$ts, $level_number, $n_lines, $msg]; + } + + if ( $level_number >= 3 ) { # warning + print STDERR "$ts $level $msg\n"; + } + else { + print STDOUT "$ts $level $msg\n"; + } + } + + return; +} + +sub _queue_log_entry { + my ($self, $ts, $log_level, $n_lines, $msg) = @_; + print "$ts,$log_level,$n_lines,$msg\n"; + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +no Lmo; +1; +} +# ########################################################################### +# End Percona::Agent::Logger package +# ########################################################################### + +# ########################################################################### +# Percona::Agent::Exception::* +# ########################################################################### + +{ + package Percona::Agent::Exception::PIDNotFound; + + use Lmo; + use overload '""' => \&as_string; + + has 'pid_file' => ( + is => 'ro', + isa => 'Maybe[Str]', + required => 1, + ); + + sub as_string { + my $self = shift; + return sprintf "PID file %s does not exist and no matching " + . "process was found in ps", $self->pid_file; + } + + no Lmo; + 1; +} + +{ + package Percona::Agent::Exception::NoPID; + + use Lmo; + use overload '""' => \&as_string; + + has 'pid_file' => ( + is => 'ro', + isa => 'Maybe[Str]', + required => 1, + ); + + has 'pid_file_is_empty' => ( + is => 'ro', + isa => 'Bool', + required => 1, + ); + + sub as_string { + my $self = shift; + if ( $self->pid_file_is_empty ) { + return sprintf "PID file %s is empty", $self->pid_file; + } + else { + return sprintf "PID file %s does not exist and parsing ps output " + . "failed", $self->pid_file; + } + } + + no Lmo; + 1; +} + +{ + package Percona::Agent::Exception::PIDNotRunning; + + use Lmo; + use overload '""' => \&as_string; + + has 'pid' => ( + is => 'ro', + isa => 'Str', + required => 1, + ); + + sub as_string { + my $self = shift; + return sprintf "PID is not running", $self->pid; + } + + no Lmo; + 1; +} + +BEGIN { + $INC{'Percona/Agent/Exception/PIDNotFound.pm'} = __FILE__; + $INC{'Percona/Agent/Exception/NoPID.pm'} = __FILE__; + $INC{'Percona/Agent/Exception/PIDNotRunning.pm'} = __FILE__; +} + +# ########################################################################### +# This is a combination of modules and programs in one -- a runnable module. +# http://www.perl.com/pub/a/2006/07/13/lightning-articles.html?page=last +# Or, look it up in the Camel book on pages 642 and 643 in the 3rd edition. +# +# Check at the end of this package for the call to main() which actually runs +# the program. +# ########################################################################### +package pt_agent; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Scalar::Util qw(blessed); +use POSIX qw(signal_h); +use Time::HiRes qw(sleep time); +use File::Temp qw(tempfile); +use File::Path; +use File::Basename; +use FindBin; + +use Percona::Toolkit; +use Percona::WebAPI::Client; +use Percona::WebAPI::Exception::Request; +use Percona::WebAPI::Exception::Resource; +use Percona::WebAPI::Resource::Agent; +use Percona::WebAPI::Resource::Config; +use Percona::WebAPI::Resource::Service; +use Percona::WebAPI::Representation; +use Percona::Agent::Exception::PIDNotFound; +use Percona::Agent::Exception::NoPID; +use Percona::Agent::Exception::PIDNotRunning; + +Percona::Toolkit->import(qw(_d Dumper have_required_args)); +Percona::WebAPI::Representation->import(qw(as_json as_config)); +Transformers->import(qw(ts)); + +use sigtrap 'handler', \&sig_int, 'normal-signals'; +use sigtrap 'handler', \&reload_signal, 'USR1'; + +my $oktorun = 1; +my $exit_status = 0; +my $state = {}; +my $exit_on_signals = 0; +my $logger; + +use constant MAX_DATA_FILE_SIZE => 15_728_640; # 15M + +my %deps = ( + 'DBI' + => [qw(DBI libdbi-perl perl-DBI)], + 'DBD::mysql' + => [qw(DBD::mysql libdbd-mysql-perl perl-DBD-MySQL)], + 'JSON' + => [qw(JSON libjson-perl perl-JSON)], + 'LWP' + => [qw(LWP libwww-perl perl-libwww-perl)], + 'IO::Socket::SSL' + => [qw(IO::Socket::SSL libio-socket-ssl-perl perl-IO-Socket-SSL)], +); + +# Will check this later. +eval { + require JSON; +}; + +sub main { + local @ARGV = @_; + + # Reset global vars else tests will fail in strange ways. + $oktorun = 1; + $exit_status = 0; + $state = {}; + $exit_on_signals = 0; + + # ######################################################################## + # Get configuration information. + # ######################################################################## + my $o = new OptionParser(); + $o->get_specs(); + $o->get_opts(); + + my $dp = $o->DSNParser(); + $dp->prop('set-vars', $o->set_vars()); + + # We're _not_ running as root, so unless --pid and --log have + # already been configured, the defaults won't work. In this + # case, use tmp values until a new config is received. + if ( $EUID != 0 ) { + $o->set('pid', '/tmp/pt-agent.pid') unless $o->got('pid'); + $o->set('log', '/tmp/pt-agent.log') unless $o->got('log'); + $o->set('lib', '/tmp/pt-agent' ) unless $o->got('lib'); + } + + if ( !$o->get('help') ) { + } + + $o->usage_or_errors(); + + if ( $o->get('interactive') || $o->get('install') ) { + $OUTPUT_AUTOFLUSH = 1 + } + + # ######################################################################## + # Fail-safe: if the agent somehow runs away, i.e. starts to fork-bomb, + # stop everything. + # ######################################################################## + my $lib_dir = $o->get('lib'); + if ( too_many_agents(lib_dir => $lib_dir) ) { + schedule_services( + services => [], + lib_dir => $lib_dir, + ); + die "Too many agents are running. Remove the PID files in " + . "$lib_dir/pids/ if the agents are no longer running. Else, " + . "check the log files in $lib_dir/logs/ and online to see " + . "if the agent is stuck in a loop. Please contact Percona " + . "if you need urgent help.\n"; + } + + # ######################################################################## + # Connect to MysSQL later, maybe. + # ######################################################################## + my $cxn = Cxn->new( + dsn_string => '', + OptionParser => $o, + DSNParser => $dp, + ); + + # ######################################################################## + # Make a logger, not online yet. + # ######################################################################## + $logger = Percona::Agent::Logger->new( + exit_status => \$exit_status, + pid => $PID, + online_logging => $o->get('log-api') ? 1 : 0, + ); + + # ######################################################################## + # --install and exit. + # ######################################################################## + if ( $o->get('install') ) { + $exit_on_signals = 1; + install( + OptionParser => $o, + Cxn => $cxn, + interactive => $o->get('interactive'), + flags => $o->get('install-options'), + ); + return $exit_status; + } + + # ######################################################################## + # Nothing works without required Perl modules. + # ######################################################################## + if ( missing_perl_module_deps() ) { + $logger->fatal("Missing required Perl modules"); + } + + # ######################################################################## + # Nothing works without an API key. + # ######################################################################## + my $api_key = $o->get('api-key'); + if ( !$api_key ) { + $logger->fatal("No API key was found or specified. pt-agent requires a " + . "Percona Cloud Tools API key. Put your API key " + . "in a --config file or specify it with --api-key."); + } + + # ######################################################################## + # --status, --stop, and --reset + # ######################################################################## + if ( $o->get('status') ) { + agent_status( + api_key => $o->get('api-key'), + pid_file => $o->get('pid'), + lib_dir => $o->get('lib'), + ); + return $exit_status; + } + elsif ( $o->get('stop') ) { + stop_agent( + pid_file => $o->get('pid'), + lib_dir => $o->get('lib'), + ); + $logger->info("Done stopping pt-agent, exit $exit_status"); + return $exit_status; + } + elsif ( my $n = $o->get('reset') ) { + $exit_on_signals = 1; + + my $api_key = $o->get('api-key'); + if ( !$api_key && $n < 2 ) { + my $config_file = get_config_file(); + if ( -f $config_file ) { + die "Cannot reset pt-agent because an API key is not set in " + . "$config_file and --api-key was not specified. Specify " + . "--api-key to force the reset. Else specify --reset " + . "twice to do a hard reset, after which you will need to " + . "re-install pt-agent.\n"; + } + else { + die "Cannot reset pt-agent because an API key is not set in " + . "$config_file. Add 'api-key=' to $config_file " + . "or specify it with --api-key. Else specify --reset " + . "twice to do a hard reset, after which you will need to " + . "re-install pt-agent.\n"; + } + } + reset_agent( + pid_file => $o->get('pid'), # for stop_agent() + lib_dir => $o->get('lib'), + spool_dir => $o->get('spool'), + log_file => $o->get('log'), + api_key => $api_key, # optional + ); + if ( $exit_status != 0 ) { + $logger->error("Failed to completely reset pt-agent. " + . "Check the warnings and errors and above and try again."); + } + else { + $logger->info("pt-agent has been completely reset."); + } + return $exit_status; + } + elsif ( $o->get('reload') ) { + reload_agent( + pid_file => $o->get('pid'), + ); + return $exit_status; + } + + # ######################################################################## + # --run-service and exit. + # ######################################################################## + if ( my $service = $o->get('run-service') ) { + eval { + run_service( + api_key => $api_key, + service => $service, + lib_dir => $o->get('lib'), + spool_dir => $o->get('spool'), + Cxn => $cxn, + ); + $logger->info("Done running $service, exit $exit_status"); + }; + if ( $EVAL_ERROR ) { + $logger->fatal("Error running service $service: $EVAL_ERROR"); + } + return $exit_status; + } + + # ######################################################################## + # --send-data and exit. + # ######################################################################## + if ( my $service = $o->get('send-data') ) { + eval { + send_data( + api_key => $api_key, + service => $service, + lib_dir => $o->get('lib'), + spool_dir => $o->get('spool'), + interactive => $o->get('interactive'), + ); + $logger->info("Done sending data for $service, exit $exit_status"); + }; + if ( $EVAL_ERROR ) { + $logger->fatal("Error running sending $service data: $EVAL_ERROR"); + } + return $exit_status; + } + + # ######################################################################## + # This is the main pt-agent daemon, a long-running and resilient + # process. Only internal errors should cause it to stop. Else, + # external errors, like Percona web API not responding, should be + # retried forever. + # ######################################################################## + + # Check the config file. This should probably never fail because + # the config file is $HOME/.pt-agent.conf, so the user should + # be able to write to their home dir. --run-service and --send-data + # don't need to do this because if there's no valid config, they should + # fail; they'll probably die due to --lib missing, which they verify + # but don't create. + my $config_file = get_config_file(); + if ( -f $config_file && !-w $config_file ) { + $logger->fatal("$config_file exists but is not writable") + } + + # Start, i.e. init/create/update, the agent. This forks and daemonizes, + # so we're the child/daemon process when it returns. To remember how + # this differs from run_agent(): first you start a car, then you put it + # in drive to "run" (drive) it. + my $running = start_agent( + api_key => $api_key, + Cxn => $cxn, + lib_dir => $o->get('lib'), + daemonize => $o->get('daemonize'), + pid_file => $o->get('pid'), + log_file => $o->get('log'), + ); + + # Wait time between checking for new config and services. + # Use the tool's built-in default until a config is gotten, + # then config->{check-interval} will be pass in. + my $check_interval = $o->get('check-interval'); + my $interval = sub { + my ($t, $quiet) = @_; + return unless $oktorun; + $t ||= $check_interval; + $logger->info("Sleeping $t seconds") unless $quiet; + sleep $t; + }; + + my $safeguards = Safeguards->new( + disk_bytes_free => $o->get('disk-bytes-free'), + disk_pct_free => $o->get('disk-pct-free'), + ); + + # Run the agent's main loop which doesn't return until the service + # is stopped, killed, or has an internal bug. + eval { + run_agent( + agent => $running->{agent}, + client => $running->{client}, + daemon => $running->{daemon}, + interval => $interval, + safeguards => $safeguards, + Cxn => $cxn, + lib_dir => $o->get('lib'), + ); + }; + if ( $EVAL_ERROR ) { + $logger->fatal("Error running agent: $EVAL_ERROR"); + } + + $logger->info("pt-agent exit $exit_status, oktorun $oktorun"); + + return $exit_status; +} + +# ############################################################################ +# Subroutines +# ############################################################################ + +# ################################################## # +# Percona Web API subs for agent and spool processes # +# ################################################## # + +# Create and connect a Percona Web API client. +sub get_api_client { + my (%args) = @_; + + have_required_args(\%args, qw( + api_key + interval + )) or die; + my $api_key = $args{api_key}; + my $interval = $args{interval}; + + # Optional args + my $tries = $args{tries}; + my $_oktorun = $args{oktorun} || sub { return $oktorun }; + my $entry_link = $args{entry_link} || $ENV{PCT_ENTRY_LINK}; + my $quiet = $args{quiet}; + + my $client = Percona::WebAPI::Client->new( + api_key => $api_key, + ($entry_link ? (entry_link => $entry_link) : ()), + ); + + my $entry_links; + while ( $_oktorun->() && (!defined $tries || $tries--) ) { + if ( !$state->{connecting_to_api}++ ) { + $logger->info("Connecting to Percona Web API") # once + } + + eval { + $entry_links = $client->get(link => $client->entry_link); + }; + if ( $EVAL_ERROR ) { + $logger->warning($EVAL_ERROR); + } + elsif ( + !$entry_links + || (ref($entry_links) || '') ne 'HASH' + || !scalar keys %$entry_links + ) { + $logger->info('Connected, but did not receive valid entry links: ' + . Dumper($entry_links)); + } + elsif ( !$entry_links->{agents} ) { + $logger->info('Connected, but did not receive agents link: ' + . Dumper($entry_links)); + } + else { + $logger->info("Connected"); + delete $state->{connecting_to_api}; + last; # success + } + $interval->(); # failure, try again + } + + # Create another client for Percona::Agent::Logger. If the primary + # client was created, then the API key and entry link worked, so + # just duplicate them for the new logger client. We don't need to + # connect the logger client because clients are stateless so knowing + # the primary client connected ensures that the logger client can/will + # connect to with the same API and entry link. + my $logger_client; + if ( $client && $entry_links ) { + $logger_client = Percona::WebAPI::Client->new( + api_key => $api_key, + ($entry_link ? (entry_link => $entry_link) : ()), + ); + } + + return $client, $entry_links, $logger_client; +} + +sub load_local_agent { + my (%args) = @_; + + have_required_args(\%args, qw( + lib_dir + )) or die; + my $lib_dir = $args{lib_dir}; + + # Optional args + my $agent_uuid = $args{agent_uuid}; + my $quiet = $args{quiet}; + + my $agent; + my $agent_file = $lib_dir . "/agent"; + if ( -f $agent_file ) { + $logger->info("Reading saved Agent from $agent_file") unless $quiet; + my $agent_hashref = JSON::decode_json(slurp($agent_file)); + $agent = Percona::WebAPI::Resource::Agent->new(%$agent_hashref); + if ( !$agent->uuid ) { + $logger->fatal("No UUID for Agent in $agent_file."); + } + } + else { + $logger->info("No local agent") unless $quiet; + } + + return $agent; +} + +# Initialize the agent, i.e. create and return an Agent resource. +# If there's an agent_id, then its updated (PUT), else a new agent +# is created (POST). Doesn't return until successful. +sub init_agent { + my (%args) = @_; + + have_required_args(\%args, qw( + agent + action + link + client + interval + )) or die; + my $agent = $args{agent}; + my $action = $args{action}; + my $link = $args{link}; + my $client = $args{client}; + my $interval = $args{interval}; + + # Optional args + my $_oktorun = $args{oktorun} || sub { return $oktorun }; + my $tries = $args{tries}; + my $actions = $args{actions}; + + # Update these attribs every time the agent is initialized. + # Other optional attribs, like versions, are left to the caller. + chomp(my $hostname = `hostname`); + $agent->hostname($hostname); + $agent->username($ENV{USER} || $ENV{LOGNAME}); + + # Try to create/update the Agent. + my $success = 0; + while ( $_oktorun->() && (!defined $tries || $tries--) ) { + $logger->info($action eq 'put' ? "Updating agent " . $agent->name + : "Creating new agent"); + my $agent_uri = eval { + $client->$action( + link => $link, + resources => $agent, + ); + }; + if ( $EVAL_ERROR ) { + $logger->warning($EVAL_ERROR); + } + elsif ( !$agent_uri ) { + $logger->info("No URI for Agent " . $agent->name); + } + else { + # The Agent URI will have been returned in the Location header + # of the POST or PUT response. GET the Agent (even after PUT) + # to get a link to the agent's config. + eval { + $agent = $client->get( + link => $agent_uri, + ); + }; + if ( $EVAL_ERROR ) { + $logger->warning($EVAL_ERROR); + } + else { + $success = 1; + last; # success + } + } + if ( !defined $tries || $tries ) { + $interval->(); # failure, try again + } + } + + $logger->info("Agent " . $agent->name . " (" . $agent->uuid . ") is ready"); + return $agent, $success; +} + +# Check and init the --lib dir. This dir is used to save the Agent resource +# (/agent), Service resources (/services/), and crontab for services(/conrtab, +# /crontab.err). +sub init_lib_dir { + my (%args) = @_; + have_required_args(\%args, qw( + lib_dir + )) or die; + my $lib_dir = $args{lib_dir}; + + # Optiona args + my $verify = $args{verify}; + my $quiet = $args{quiet}; + + $logger->info(($verify ? 'Verify' : 'Initializing') . " --lib $lib_dir") + unless $quiet; + + if ( ! -d $lib_dir ) { + if ( $verify ) { + die "$lib_dir does not exist\n"; + } + else { + $logger->info("$lib_dir does not exist, creating") + unless $quiet; + _safe_mkdir($lib_dir); + } + } + elsif ( ! -w $lib_dir ) { + die "--lib $lib_dir is not writable.\n"; + } + + foreach my $dir ( qw(services logs pids meta) ) { + my $dir = "$lib_dir/$dir"; + if ( ! -d $dir ) { + if ( $verify ) { + die "$dir does not exist\n"; + } + else { + $logger->info("$dir does not exist, creating") + unless $quiet; + _safe_mkdir($dir); + } + } + elsif ( ! -w $dir ) { + die "$dir is not writable.\n"; + } + } + + return; +} + +# ################################ # +# Agent (main daemon) process subs # +# ################################ # + +sub start_agent { + my (%args) = @_; + + have_required_args(\%args, qw( + api_key + lib_dir + Cxn + )) or die; + my $api_key = $args{api_key}; + my $lib_dir = $args{lib_dir}; + my $cxn = $args{Cxn}; + + # Optional args + my $agent_uuid = $args{agent_uuid}; + my $daemonize = $args{daemonize}; + my $pid_file = $args{pid_file}; + my $log_file = $args{log_file}; + my $_oktorun = $args{oktorun} || sub { return $oktorun }; + my $tries = $args{tries}; + my $interval = $args{interval} || sub { sleep 60; }; + my $versions = $args{versions}; # for testing + my $client = $args{client}; # for testing + my $entry_links = $args{entry_links}; # for testing + my $logger_client = $args{logger_client}; # for testing + + $logger->info('Starting agent'); + + # Daemonize first so all output goes to the --log. + my $daemon; + if ( $daemonize ) { + $daemon = Daemon->new( + daemonize => $daemonize, + pid_file => $pid_file, + log_file => $log_file, + parent_exit => sub { + my $child_pid = shift; + print "pt-agent has daemonized and is running as PID $child_pid: + + --lib " . ($lib_dir || '') . " + --log " . ($log_file || '') . " + --pid " . ($pid_file || '') . " + +These values can change if a different configuration is received. +", + } + ); + $daemon->run(); + + # If we daemonized, the parent has already exited and we're the child. + # We shared a copy of every Cxn with the parent, and the parent's copies + # were destroyed but the dbhs were not disconnected because the parent + # attrib was true. Now, as the child, set it false so the dbhs will be + # disconnected when our Cxn copies are destroyed. If we didn't daemonize, + # then we're not really a parent (since we have no children), so set it + # false to auto-disconnect the dbhs when our Cxns are destroyed. + $cxn->{parent} = 0; + } + + # Make --lib and its subdirectories. + eval { + init_lib_dir( + lib_dir => $lib_dir, + ); + }; + if ( $EVAL_ERROR ) { + chomp($EVAL_ERROR); + $logger->info("Error initializing --lib $lib_dir: $EVAL_ERROR. " + . "Configure the agent to use a writeable --lib directory."); + } + + # Connect to the API and get entry links. Since we're in start_agent(), + # try forever because the agent needs an API connection to start. + if ( !$client || !$entry_links ) { + ($client, $entry_links, $logger_client) = get_api_client( + api_key => $api_key, + tries => undef, # forever + interval => sub { sleep 60 }, + ); + } + return unless $_oktorun->(); + + # Do a version-check every time the agent starts. If versions + # have changed, this can affect how services are implemented. + # Since this is the only thing we use the Cxn for, get_versions() + # connects and disconnect it, if possible. If not possible, the + # MySQL version isn't sent in hopes that it becomes possible to get + # it later. + if ( !$versions || !$versions->{MySQL} ) { + $versions = get_versions( + Cxn => $cxn, + ); + } + return unless $_oktorun->(); + + # Load and update the local (i.e. existing) agent, or create a new one. + my $agent; + my $action; + my $link; + if ( $agent_uuid ) { + $logger->info("Re-creating Agent with UUID $agent_uuid"); + $agent = Percona::WebAPI::Resource::Agent->new( + uuid => $agent_uuid, + versions => $versions, + ); + $action = 'put'; # update + $link = $entry_links->{agents} . '/' . $agent->uuid; + } + else { + # First try to load the local agent. + $agent = load_local_agent( + lib_dir => $lib_dir, + ); + if ( $agent ) { + # Loaded local agent. + $action = 'put'; # update + $link = $entry_links->{agents} . '/' . $agent->uuid; + $agent->{versions} = $versions; + } + else { + # No local agent and --agent-uuid wasn't give. + $agent = Percona::WebAPI::Resource::Agent->new( + versions => $versions, + ); + $action = 'post'; # create + $link = $entry_links->{agents}; + } + } + + ($agent) = init_agent( + agent => $agent, + action => $action, # put or post + link => $link, + client => $client, + interval => sub { sleep 60 }, + tries => $tries, # optional + oktorun => $_oktorun, # optional + ); + + # Give the logger its client so that it will also POST every log entry + # to /agent/{uuid}/log. This is done asynchronously by a thread so a + # simple info("Hello world!") to STDOUT won't block if the API isn't + # responding. -- Both client and log_link are required to enable this. + if ( $agent->links->{log} && $logger_client ) { + $logger->start_online_logging( + client => $logger_client, + log_link => $agent->links->{log}, + ); + } + + save_agent( + agent => $agent, + lib_dir => $lib_dir, + ); + + return { + agent => $agent, + client => $client, + daemon => $daemon, + }; +} + +# Run the agent, i.e. exec the main loop to check/update the config +# and services. Doesn't return until the service is stopped or killed. +sub run_agent { + my (%args) = @_; + + have_required_args(\%args, qw( + agent + client + daemon + interval + lib_dir + safeguards + Cxn + )) or die; + my $agent = $args{agent}; + my $client = $args{client}; + my $daemon = $args{daemon}; + my $interval = $args{interval}; + my $lib_dir = $args{lib_dir}; + my $safeguards = $args{safeguards}; + my $cxn = $args{Cxn}; + + # Optional args + my $_oktorun = $args{oktorun} || sub { return $oktorun }; + + $logger->info('Running agent ' . $agent->name); + + # ####################################################################### + # Main agent loop + # ####################################################################### + $state->{first_config} = 1; + my $first_config_interval = 60; + $logger->info("Checking silently every $first_config_interval seconds" + . " for the first config"); + + my $success; + my $new_daemon; + my $config; + my $services = {}; + while ( $_oktorun->() ) { + check_if_mysql_restarted( + Cxn => $cxn, + ); + + if ( $state->{need_mysql_version} ) { + my $versions = get_versions( + Cxn => $cxn, + ); + if ( $versions->{MySQL} ) { + $agent->versions($versions); + my $updated_agent; + ($agent, $updated_agent) = init_agent( + agent => $agent, + action => 'put', + link => $agent->links->{self}, + client => $client, + interval => sub { return; }, + tries => 1, # optional + ); + if ( $updated_agent ) { + $logger->info("Got MySQL versions"); + save_agent( + agent => $agent, + lib_dir => $lib_dir, + ); + } + else { + $state->{need_mysql_version} = 1; + } + } + } + + ($config, $lib_dir, $new_daemon, $success) = get_config( + link => $agent->links->{config}, + agent => $agent, + client => $client, + daemon => $daemon, + lib_dir => $lib_dir, + config => $config, + quiet => $state->{first_config}, + ); + + # Get services only if we successfully got the config because the services + # may depened on the current config, specifically the --spool dir. + if ( $success && $config && $config->links->{services} ) { + if ( $state->{first_config} ) { + delete $state->{first_config}; + $logger->info('Agent has been successfully configured'); + } + if ( $new_daemon ) { + # NOTE: Daemon objects use DESTROY to auto-remove their pid file + # when they lose scope (i.e. ref count goes to zero). This + # assignment destroys (old) $daemon, so it auto-removes the old + # pid file. $new_daemon maintains scope and the new pid file + # by becoming $daemon which was defined in the outer scope so + # it won't destroy again when we leave this block. Fancy! + # About sharing_pid_file: see the comment in apply_config(). + if ( $new_daemon->{sharing_pid_file} ) { + $daemon->{pid_file_owner} = 0; + delete $new_daemon->{sharing_pid_file}; + } + $daemon = $new_daemon; + } + + # Check the safeguards. + my ($disk_space, $disk_space_ok); + eval { + $disk_space = $safeguards->get_disk_space( + filesystem => $config->options->{spool}, + ); + $disk_space_ok = $safeguards->check_disk_space( + disk_space => $disk_space, + ); + }; + if ( $EVAL_ERROR ) { + $logger->warning("Error checking disk space: $EVAL_ERROR"); + $disk_space_ok = 1; + } + if ( !$disk_space_ok ) { + $logger->warning("Disk bytes free/percentage threshold: " + . $safeguards->{disk_bytes_free} + . '/' + . $safeguards->{disk_pct_free}); + $logger->warning("Disk space is low, stopping all services:\n" + . $disk_space); + if ( !$state->{all_services_are_stopped} ) { + stop_all_services( + lib_dir => $lib_dir, + ); + } + $logger->warning('Services will restart when disk space " + . "threshold checks pass'); + } + else { + # Have config, safeguards are ok, now get/update the services. + ($services, $success) = get_services( + link => $config->links->{services}, + agent => $agent, + client => $client, + lib_dir => $lib_dir, + services => $services, + json => $args{json}, # optional, for testing + bin_dir => $args{bin_dir}, # optional, for testing + ); + } + } + + # If configured, wait the given interval. Else, retry more + # quickly so we're ready to go soon after we're configured. + $interval->( + $config ? ($config->options->{'check-interval'}, 0) + : ($first_config_interval , 1) # 1=quiet + ); + } + + stop_all_services( + lib_dir => $lib_dir, + ); + + # This shouldn't happen until the service is stopped/killed. + $logger->info('Agent ' . $agent->name . ' has stopped'); + return; +} + +sub get_config { + my (%args) = @_; + have_required_args(\%args, qw( + link + agent + client + daemon + lib_dir + )) or die; + my $link = $args{link}; + my $agent = $args{agent}; + my $client = $args{client}; + my $daemon = $args{daemon}; + my $lib_dir = $args{lib_dir}; + + # Optional args + my $config = $args{config}; # may not be defined yet + my $quiet = $args{quiet}; + + my $success = 0; + my $new_daemon; + + $logger->info('Getting config') unless $quiet; + my $new_config = eval { + $client->get( + link => $link, + ); + }; + if ( my $e = $EVAL_ERROR ) { + if (blessed($e)) { + if ($e->isa('Percona::WebAPI::Exception::Request')) { + if ( $e->status == 404 ) { + $logger->info('Agent ' . $agent->name. ' is not configured.') + unless $quiet; + } + else { + $logger->info("$e"); # API error? + } + } + elsif ($e->isa('Percona::WebAPI::Exception::Resource')) { + $logger->warning("$e"); + } + } + else { + $logger->error($e); # internal error + } + } + else { + eval { + if ( !$quiet ) { + $logger->info("Running config: " . ($config ? $config->ts : '')); + $logger->info("Current config: " . $new_config->ts); + } + if ( !$config || $new_config->ts > $config->ts ) { + ($lib_dir, $new_daemon) = apply_config( + agent => $agent, + old_config => $config, + new_config => $new_config, + lib_dir => $lib_dir, + daemon => $daemon, + ); + $config = $new_config; + $success = 1; + $logger->info('Config ' . $config->ts . ' applied successfully'); + } + else { + $success = 1; + $logger->info('Config has not changed') unless $quiet; + } + }; + if ( $EVAL_ERROR ) { + chomp $EVAL_ERROR; + $logger->warning("Failed to apply config " . $new_config->ts + . ": $EVAL_ERROR Will try again."); + } + } + + return ($config, $lib_dir, $new_daemon, $success); +} + +sub apply_config { + my (%args) = @_; + + have_required_args(\%args, qw( + agent + new_config + lib_dir + daemon + )) or die; + my $agent = $args{agent}; + my $new_config = $args{new_config}; + my $lib_dir = $args{lib_dir}; + my $daemon = $args{daemon}; + + # Optional args + my $old_config = $args{old_config}; + + $logger->info('Applying config ' . $new_config->ts); + + # If the --lib dir has changed, init the new one and re-write + # the Agent resource in it. + my $new_lib_dir = $new_config->options->{lib}; + if ( ($new_lib_dir ne $lib_dir) || $state->{first_config} ) { + $logger->info($state->{first_config} ? "Applying first config" + : "New --lib direcotry: $new_lib_dir"); + init_lib_dir( + lib_dir => $new_lib_dir, + ); + + # TODO: copy old-lib/services/* to new-lib/services/ ? + + # Save agent as --lib/agent so next time the tool starts it + # loads the agent from the latest --lib dir. + save_agent( + agent => $agent, + lib_dir => $new_lib_dir, + ); + } + + # If --pid or --log has changed, we need to "re-daemonize", + # i.e. change these files while running, but the program + # does _not_ actually restart. + my $new_daemon; + my $make_new_daemon = 0; + my $old_pid = $daemon->{pid_file} || ''; + my $old_log = $daemon->{log_file} || ''; + my $new_pid = $new_config->options->{pid} || ''; + my $new_log = $new_config->options->{log} || ''; + if ( $old_pid ne $new_pid ) { + $logger->info('NOTICE: Changing --pid file from ' . ($old_pid || '(none)') + . ' to ' . ($new_pid || '(none)')); + $make_new_daemon = 1; + } + if ( $daemon->{daemonize} ) { + # --log only matters if we're daemonized + if ( $old_log ne $new_log ) { + $logger->info('NOTICE: Changing --log file from ' + . ($old_log || '(none)') . ' to ' . ($new_log || '(none)')); + $make_new_daemon = 1; + } + } + if ( $make_new_daemon ) { + # We're either already daemonized or we didn't daemonize in the first + # place, so daemonize => 0 here. Also, if log hasn't changed, the + # effect is simply closing and re-opening the same log. + # TODO: If log changes but pid doesn't? will probably block itself. + $new_daemon = Daemon->new( + daemonize => 0, + pid_file => $new_pid, + log_file => $new_log, + force_log_file => $daemon->{daemonize}, + ); + eval { + $new_daemon->run(); + + if ( $daemon->{daemonize} && $old_log ne $new_log ) { + $logger->info('New log file, previous was ' + . ($old_log || 'unset')); + } + if ( $old_pid eq $new_pid ) { + # If the PID file has not, then the old/original daemon and + # the new daemon are sharing the same pid file. The old one + # created it, but the new one will continue to hold it when + # the old one goes away. Set sharing_pid_file to signal to + # the caller that they need to set old daemon pid_file_owner=0 + # so it does not auto-remove the shared pid file when it goes + # away. + $new_daemon->{sharing_pid_file} = 1; + } + }; + if ( $EVAL_ERROR ) { + die "Error changing --pid and/or --log: $EVAL_ERROR\n"; + } + } + + # Save config in $HOME/.pt-agent.conf if successful. + write_config( + config => $new_config, + ); + + return ($new_lib_dir || $lib_dir), $new_daemon; +} + +# Write a Config resource to a Percona Toolkit config file, +# usually $HOME/pt-agent.conf. +sub write_config { + my (%args) = @_; + + have_required_args(\%args, qw( + config + )) or die; + my $config = $args{config}; + + my $file = get_config_file(); + $logger->info("Writing config to $file"); + + # Get the api-key line if any; we don't want to/can't clobber this. + my $api_key; + my $no_log_api; + if ( -f $file ) { + open my $fh, "<", $file + or die "Error opening $file: $OS_ERROR"; + my $contents = do { local $/ = undef; <$fh> }; + close $fh; + ($api_key) = $contents =~ m/^(api-key=\S+)$/m; + ($no_log_api) = $contents =~ m/^(no-log-api)$/m; + } + + # Re-write the api-key, if any, then write the config. + open my $fh, '>', $file + or die "Error opening $file: $OS_ERROR"; + if ( $api_key ) { + print { $fh } $api_key, "\n" + or die "Error writing to $file: $OS_ERROR"; + } + if ( $no_log_api ) { + print { $fh } $no_log_api, "\n" + or die "Error writing to $file: $OS_ERROR"; + } + print { $fh } as_config($config) + or die "Error writing to $file: $OS_ERROR"; + close $fh + or die "Error closing $file: $OS_ERROR"; + + return; +} + +sub get_services { + my (%args) = @_; + have_required_args(\%args, qw( + link + agent + client + lib_dir + services + )) or die; + my $link = $args{link}; + my $agent = $args{agent}; + my $client = $args{client}; + my $lib_dir = $args{lib_dir}; + my $prev_services = $args{services}; + + my $success = 0; + + eval { + $logger->info('Getting services'); + my $curr_services = $client->get( + link => $link, + ); + + if ( !$curr_services ) { + $logger->error("GET $link did not return anything, " + . "expected a list of services"); + } + elsif ( !scalar @$curr_services && !scalar keys %$prev_services ) { + $logger->warning("No services are enabled for this agent"); + + # Remove these state that no longer matter if there are no services. + if ( $state->{mysql_restarted} ) { + $state->{last_uptime} = 0; + $state->{last_uptime_check} = 0; + delete $state->{mysql_restarted}; + } + if ( $state->{all_services_are_stopped} ) { + delete $state->{all_services_are_stopped}; + } + } + else { + if ( $state->{all_services_are_stopped} ) { + $logger->info('Restarting services after safeguard shutdown'); + # If prev_services is empty, then it's like agent startup: + # get all the latest services and start them, and remove + # any old services. We could just start-* the services we + # already have, but since they were shut down due to a safeguard, + # maybe (probably) they've changed. + $prev_services = {}; + delete $state->{all_services_are_stopped}; + } + elsif ( my $ts = $state->{mysql_restarted} ) { + $logger->info("Restarting services after MySQL restart at $ts"); + $prev_services = {}; + $state->{last_uptime} = 0; + $state->{last_uptime_check} = 0; + delete $state->{mysql_restarted}; + } + + # Determine which services are new (added), changed/updated, + # and removed. + my $sorted_services = sort_services( + prev_services => $prev_services, + curr_services => $curr_services, + ); + + # First, stop and remove services. Do this before write_services() + # because this call looks for --lib/services/stop-service which + # write_services() removes. I.e. use the service's stop- meta + # counterpart (if any) before we remove the service. + my $removed_ok = apply_services( + action => 'stop', + services => $sorted_services->{removed}, + lib_dir => $lib_dir, + bin_dir => $args{bin_dir}, # optional, for testing + exec_cmd => $args{exec_cmd}, # optional, for testing + ); + + # Second, save each service in --lib/services/. Do this before + # the next calls to apply_services() because those calls look for + # --lib/services/start-service which won't exist for new services + # until written by this call. + write_services( + sorted_services => $sorted_services, + lib_dir => $lib_dir, + json => $args{json}, # optional, for testing + ); + + # Start new services and restart existing updated services. + # Do this before calling schedule_services() so that, for example, + # start-query-history is ran before query-history is scheduled + # and starts running. + + # Run services with the run_once flag. Unlike apply_services(), + # this call runs the service directly, whether it's meta or not, + # then it removes it from the services hashref so there's no + # chance of running it again unless it's received again. + apply_services_once( + services => $sorted_services->{services}, + lib_dir => $lib_dir, + bin_dir => $args{bin_dir}, # optional, for testing + exec_cmd => $args{exec_cmd}, # optional, for testing + ); + + # Start new services. + my $started_ok = apply_services( + action => 'start', + services => $sorted_services->{added}, + lib_dir => $lib_dir, + bin_dir => $args{bin_dir}, # optional, for testing + exec_cmd => $args{exec_cmd}, # optional, for testing + ); + + # Restart existing updated services. + my $restarted_ok = apply_services( + action => 'restart', + services => $sorted_services->{updated}, + lib_dir => $lib_dir, + bin_dir => $args{bin_dir}, # optional, for testing + exec_cmd => $args{exec_cmd}, # optional, for testing + ); + + # Schedule any services with a run_schedule or spool_schedule. + # This must be called last, after write_services() and + # apply_services() because, for example, a service schedule + # to run at */5 may run effectively immediate if we write + # the new crontab at 00:04:59, so everything has to be + # ready to go at this point. + if ( scalar @$removed_ok + || scalar @$started_ok + || scalar @$restarted_ok ) + { + schedule_services( + services => [ + @$started_ok, + @$restarted_ok, + @{$sorted_services->{unchanged}}, + ], + lib_dir => $lib_dir, + bin_dir => $args{bin_dir}, # optional, for testing + exec_cmd => $args{exec_cmd}, # optional, for testing + ); + + $logger->info('Service changes applied successfully'); + } + else { + $logger->info('Services have not changed'); + } + # TODO: probably shouldn't keep re-assigning this unless necessary + $prev_services = $sorted_services->{services}; + $success = 1; + } + }; + if ( $EVAL_ERROR ) { + $logger->warning($EVAL_ERROR); + } + + return $prev_services, $success; +} + +sub sort_services { + my (%args) = @_; + + have_required_args(\%args, qw( + prev_services + curr_services + )) or die; + my $prev_services = $args{prev_services}; # hashref + my $curr_services = $args{curr_services}; # arrayref + + my $services = {}; # curr_services as hashref keyed on service name + my @added; + my @updated; + my @removed; + my @unchanged; + + foreach my $service ( @$curr_services ) { + my $name = $service->name; + $services->{$name} = $service; + + # apply_services() only needs real services, from which it can infer + # certain meta-services like "start-foo" for real service "foo", + # but write_services() needs meta-services too so it can know to + # remove their files from --lib/services/. + + if ( !exists $prev_services->{$name} ) { + push @added, $service; + } + elsif ( $service->ts > $prev_services->{$name}->ts ) { + push @updated, $service; + } + else { + push @unchanged, $service; + } + } + if ( scalar keys %$prev_services ) { + @removed = grep { !exists $services->{$_->name} } values %$prev_services; + } + + if ( scalar @added ) { + $logger->info("Added services: " + . join(', ', map { $_->name } @added)); + } + if ( scalar @updated ) { + $logger->info("Services updated: " + . join(', ', map { $_->name } @updated)); + } + if ( scalar @removed ) { + $logger->info("Services removed: " + . join(', ', map { $_->name } @removed)); + } + + my $sorted_services = { + services => $services, + added => \@added, + updated => \@updated, + removed => \@removed, + unchanged => \@unchanged, + }; + return $sorted_services; +} + +# Write each service to its own file in --lib/. Remove services +# that are not longer implemented (i.e. not in the services array). +sub write_services { + my (%args) = @_; + + have_required_args(\%args, qw( + sorted_services + lib_dir + )) or die; + my $sorted_services = $args{sorted_services}; + my $lib_dir = $args{lib_dir}; + + # Optional args + my $json = $args{json}; # for testing + + $lib_dir .= '/services'; + + $logger->info("Writing services to $lib_dir"); + + # Save current, active services. + foreach my $service ( + @{$sorted_services->{added}}, @{$sorted_services->{updated}} + ) { + my $file = $lib_dir . '/' . $service->name; + my $action = -f $file ? 'Updated' : 'Added'; + open my $fh, '>', $file + or die "Error opening $file: $OS_ERROR"; + print { $fh } as_json($service, with_links => 1, json => $json) + or die "Error writing to $file: $OS_ERROR"; + close $fh + or die "Error closing $file: $OS_ERROR"; + $logger->info("$action $file"); + } + + # Remove old services. + foreach my $service ( @{$sorted_services->{removed}} ) { + my $file = $lib_dir . '/' . $service->name; + if ( -f $file ) { + unlink $file + or die "Error removing $file: $OS_ERROR"; + $logger->info("Removed $file"); + } + } + + return; +} + +# Write Service->run_schedule and Service->spool_schedule lines to crontab, +# along with any other non-pt-agent lines, then reload crontab. +sub schedule_services { + my (%args) = @_; + + have_required_args(\%args, qw( + services + lib_dir + )) or die; + my $services = $args{services}; + my $lib_dir = $args{lib_dir}; + + # Optional args + my $quiet = $args{quiet}; + my $exec_cmd = $args{exec_cmd} || sub { return system(@_) }; + + $logger->info("Scheduling services") unless $quiet; + + # Only schedule "periodic" services, i.e. ones that run periodically, + # not just once. + my @periodic_services = grep { $_->run_schedule || $_->spool_schedule } + @$services; + my $new_crontab = make_new_crontab( + %args, + services => \@periodic_services, + ); + $logger->info("New crontab:\n" . $new_crontab || '') unless $quiet; + + my $crontab_file = "$lib_dir/crontab"; + open my $fh, '>', $crontab_file + or die "Error opening $crontab_file: $OS_ERROR"; + print { $fh } $new_crontab + or die "Error writing to $crontab_file: $OS_ERROR"; + close $fh + or die "Error closing $crontab_file: $OS_ERROR"; + + my $err_file = "$lib_dir/crontab.err"; + my $retval = $exec_cmd->("crontab $crontab_file > $err_file 2>&1"); + if ( $retval ) { + my $error = -f $err_file ? `cat $err_file` : ''; + die "Error setting new crontab: $error"; + } + + return; +} + +# Combine Service->run_schedule and (optionally) Service->spool_schedule +# lines with non-pt-agent lines, i.e. don't clobber the user's other +# crontab lines. +sub make_new_crontab { + my (%args) = @_; + + have_required_args(\%args, qw( + services + )) or die; + my $services = $args{services}; + + # Optional args + my $crontab_list = defined $args{crontab_list} ? $args{crontab_list} + : `crontab -l 2>/dev/null`; + my $bin_dir = defined $args{bin_dir} ? $args{bin_dir} + : "$FindBin::Bin/"; + + my @other_lines + = grep { $_ !~ m/pt-agent (?:--run-service|--send-data)/ } + split("\n", $crontab_list); + PTDEBUG && _d('Other crontab lines:', Dumper(\@other_lines)); + + my $env_vars = env_vars(); + + my @pt_agent_lines; + foreach my $service ( @$services ) { + if ( $service->run_schedule ) { + push @pt_agent_lines, + $service->run_schedule + . ($env_vars ? " $env_vars" : '') + . " ${bin_dir}pt-agent --run-service " + . $service->name; + } + if ( $service->spool_schedule ) { + push @pt_agent_lines, + $service->spool_schedule + . ($env_vars ? " $env_vars" : '') + . " ${bin_dir}pt-agent --send-data " + . $service->name; + } + } + PTDEBUG && _d('pt-agent crontab lines:', Dumper(\@pt_agent_lines)); + + my $new_crontab = join("\n", @other_lines, @pt_agent_lines) . "\n"; + + return $new_crontab; +} + +# Start real services, i.e. non-meta services. A real service is like +# "query-history", which probably has meta-services like "start-query-history" +# and "stop-query-history". We infer these start/stop meta-services +# from the real service's name. A service doesn't require meta-services; +# there may be nothing to do to start it, in which case the real service +# starts running due to its run_schedule and schedule_services(). +sub apply_services { + my (%args) = @_; + have_required_args(\%args, qw( + action + services + lib_dir + )) or die; + my $action = $args{action}; + my $services = $args{services}; + my $lib_dir = $args{lib_dir}; + + # Optional args + my $bin_dir = defined $args{bin_dir} ? "$args{bin_dir}" + : "$FindBin::Bin/"; + my $exec_cmd = $args{exec_cmd} || sub { return system(@_) }; + + $bin_dir .= '/' unless $bin_dir =~ m/\/$/; + + my $env_vars = env_vars(); + my $log = "$lib_dir/logs/start-stop.log"; + my $cmd_fmt = ($env_vars ? "$env_vars " : '') + . $bin_dir . "pt-agent --run-service %s >> $log 2>&1"; + + my @applied_ok; + SERVICE: + foreach my $service ( @$services ) { + next if $service->meta; # only real services + + my $name = $service->name; + + # To restart, one must first stop, then start afterwards. + if ( $action eq 'stop' || $action eq 'restart' ) { + if ( -f "$lib_dir/services/stop-$name" ) { + if ( $action eq 'stop' ) { + # If all we're doing is stopping services, then always + # returned them as "applied OK" even if they fail to run + # because the caller uses returns values to know to + # update crontab. So if stop-foo fails, at least we'll + # still remove --run-service foo from crontab. + push @applied_ok, $service; + } + my $cmd = sprintf $cmd_fmt, "stop-$name"; + $logger->info("Stopping $name: $cmd"); + my $cmd_exit_status = $exec_cmd->($cmd); + if ( $cmd_exit_status != 0 ) { + $logger->warning("Error stopping $name, check $log and " + . "$lib_dir/logs/$name.run"); + # This doesn't matter for stop, but for restart a failure + # to first stop means we shouldn't continue and try to start + # the service (since it hasn't been stopped yet). + next SERVICE; + } + } + } + + if ( $action eq 'start' || $action eq 'restart' ) { + # Remove old meta files. Meta files are generally temporary + # in any case, persisting info from one interval to the next. + # If the service has changed (e.g., report interval is longer), + # there's no easy way to tranistion from old metadata to new, + # so we just rm the old metadata and start anew. + my $meta_files = "$lib_dir/meta/$name*"; + foreach my $meta_file ( glob $meta_files ) { + if ( unlink $meta_file ) { + $logger->info("Removed $meta_file"); + } + else { + $logger->warning("Cannot remove $meta_file: $OS_ERROR"); + } + } + + # Start the service and wait for it to exit. If it dies + # really early (before it really begins), our log file will + # have the error; else, the service should automatically + # switch to its default log file ending in ".run". + if ( -f "$lib_dir/services/start-$name" ) { + my $cmd = sprintf $cmd_fmt, "start-$name"; + $logger->info("Starting $name: $cmd"); + my $cmd_exit_status = $exec_cmd->($cmd); + if ( $cmd_exit_status != 0 ) { + $logger->warning("Error starting $name, check $log and " + ."$lib_dir/logs/$name.run"); + next SERVICE; + } + push @applied_ok, $service; + $logger->info("Started $name successfully"); + } + } + } + + return \@applied_ok; +} + +sub apply_services_once { + my (%args) = @_; + have_required_args(\%args, qw( + services + lib_dir + )) or die; + my $services = $args{services}; + my $lib_dir = $args{lib_dir}; + + # Optional args + my $bin_dir = defined $args{bin_dir} ? $args{bin_dir} + : "$FindBin::Bin/"; + my $exec_cmd = $args{exec_cmd} || sub { return system(@_) }; + + my $env_vars = env_vars(); + my $log = "$lib_dir/logs/run-once.log"; + my $cmd_fmt = ($env_vars ? "$env_vars " : '') + . $bin_dir . "pt-agent --run-service %s >> $log 2>&1"; + + my @ran_ok; + SERVICE: + foreach my $name ( sort keys %$services ) { + my $service = $services->{$name}; + next unless $service->run_once; + + delete $services->{$name}; + + my $cmd = sprintf $cmd_fmt, $name; + $logger->info("Running $name: $cmd"); + my $cmd_exit_status = $exec_cmd->($cmd); + if ( $cmd_exit_status != 0 ) { + $logger->error("Error running $name, check $log and " + ."$lib_dir/logs/$name.run"); + next SERVICE; + } + push @ran_ok, $service; + $logger->info("Ran $name successfully"); + } + + return \@ran_ok; +} + +# ########################## # +# --run-service process subs # +# ########################## # + +sub run_service { + my (%args) = @_; + + have_required_args(\%args, qw( + api_key + service + lib_dir + spool_dir + Cxn + )) or die; + my $api_key = $args{api_key}; + my $service = $args{service}; + my $lib_dir = $args{lib_dir}; + my $spool_dir = $args{spool_dir}; + my $cxn = $args{Cxn}; + + # Optional args + my $bin_dir = defined $args{bin_dir} ? $args{bin_dir} : "$FindBin::Bin/"; + my $client = $args{client}; # for testing + my $agent = $args{agent}; # for testing + my $entry_links = $args{entry_links}; # for testing + my $json = $args{json}; # for testing + my $prefix = $args{prefix} || int(time); # for testing + my $max_data = $args{max_data} || MAX_DATA_FILE_SIZE; + + my $start_time = time; + + # Can't do anything with the lib dir. Since we haven't started + # logging yet, cron should capture this error and email the user. + init_lib_dir( + lib_dir => $lib_dir, + verify => 1, # die unless ok, don't create + quiet => 1, + ); + + my $daemon = Daemon->new( + daemonize => 0, # no need: we're running from cron + pid_file => "$lib_dir/pids/$service.$PID", + log_file => "$lib_dir/logs/$service.run", + force_log_file => 1, + ); + $daemon->run(); + + $logger->info("Running $service service"); + + # Connect to Percona, get entry links. + my $logger_client; + if ( !$client || !$entry_links ) { + ($client, $entry_links, $logger_client) = get_api_client( + api_key => $api_key, + tries => 2, + interval => sub { return 1; }, + ); + if ( !$client || !$entry_links ) { + $logger->warning("Failed to connect to Percona Web API"); + } + } + + # Load and update the local (i.e. existing) agent, or create a new one. + if ( !$agent ) { + # If this fails, there's no local agent, but that shouldn't happen + # because a local agent originally scheduled this --send-data process. + # Maybe that agent was deleted from the system but the crontab entry + # was not and was left running. + $agent = load_local_agent ( + lib_dir => $lib_dir, + ); + if ( !$agent ) { + $logger->fatal("No agent exists ($lib_dir/agent) and --agent-uuid was " + . "not specified. Check that the agent is properly installed."); + } + } + + if ( $entry_links && $entry_links->{agents} ) { + $agent = eval { + $client->get( + link => $entry_links->{agents} . '/' . $agent->uuid, + ); + }; + if ( $EVAL_ERROR ) { + $logger->error("Failed to get the agent: $EVAL_ERROR"); + } + my $log_link = $agent->links->{log}; + $logger->service("$service running"); + $logger->data_ts($prefix); # TODO Not needed unless $use_spool + $logger->start_online_logging( + client => $logger_client, + log_link => $log_link, + ); + } + else { + $logger->warning("File logging only"); + } + + # Load the Service object from local service JSON file. + # $service changes from a string scalar to a Service object. + $service = load_service( + service => $service, + lib_dir => $lib_dir, + ); + + # Check if any task spools data or uses MySQL. Any task that spools + # should also use metadata because all data samples have at least a + # start_ts and end_ts as metadata. + my $tasks = $service->tasks; + my $use_spool = 0; + my $use_mysql = 0; + foreach my $task ( @$tasks ) { + $use_spool = 1 if ($task->output || '') eq 'spool'; + $use_mysql = 1 if $task->query; + } + + # $data_dir will be undef if $use_spool is undef; that's ok because + # only $tmp_dir is always needed. + my ($data_dir, $tmp_dir) = init_spool_dir( + spool_dir => $spool_dir, + service => $use_spool ? $service->name : undef, + ); + + # Connect to MySQL or quit. + if ( $use_mysql ) { + $logger->info("Connecting to MySQL"); + TRY: + for ( 1..2 ) { + eval { + $cxn->connect(); + }; + if ( $EVAL_ERROR ) { + $logger->warning("Cannot connect to MySQL: $EVAL_ERROR"); + sleep(3); + next TRY; + } + last TRY; + } + if ( !$cxn->dbh ) { + $logger->error("Cannot run " . $service->name . " because it requires " + . "MySQL but failed to connect to MySQL"); + return; + } + } + + # Run the tasks, spool any data. + my @output_files; + my $recursive_service = '--run-service ' . $service->name; + my $data_file = $prefix . '.' . $service->name . '.data'; + my $tmp_data_file = "$tmp_dir/$data_file"; + my $taskno = 0; + my $metadata = { data_ts => $prefix }; + my $store = {}; + my $env_vars = env_vars(); + + TASK: + foreach my $task ( @$tasks ) { + PTDEBUG && _d("Task $taskno:", $task->name); + + # Set up the output file, i.e. where this run puts its results. + # Runs can access each other's output files. E.g. run0 may + # write to fileX, then subsequent tasks can access that file + # with the special var __RUN_N_OUTPUT__ where N=0. Output files + # have this format: (prefix.)service.type(.n), where prefix is + # an optional unique ID for this run (usually a Unix ts); service + # is the service name; type is "data", "tmp", "meta", etc.; and + # n is an optional ID or instance of the type. The .data is the + # only file required: it's the data sent by send_data(). + my $task_output_file = "$tmp_dir/$prefix." + . $service->name + . ".output.$taskno"; + my $append = 0; + my $output_file; + my $join_char; + my ($store_key, $store_key_value_tuple); + + my $output = $task->output || ''; + if ( $output eq 'spool' ) { + $output_file = $tmp_data_file; + } + elsif ( $output =~ m/^stage:(\S+)/ ) { + my $file_suffix = $1; + $output_file = "$tmp_dir/$prefix." . $service->name . "$file_suffix"; + } + elsif ( $output =~ m/^meta:(\S+)/ ) { + my $attrib = $1; + $output_file = "$lib_dir/meta/" . $service->name . ".meta.$attrib"; + } + elsif ( $output =~ m/^join:(.)$/ ) { + $join_char = $1; + $output_file = $task_output_file; + } + elsif ( $output =~ m/store:key:([\w-]+)/ ) { + $store_key = $1; + $output_file = $task_output_file; + } + elsif ( $output eq 'store:output' ) { + $store_key = $taskno; + $output_file = $task_output_file; + } + elsif ( $output eq 'store:key_value_tuple' ) { + $store_key_value_tuple = 1; + } + elsif ( $output eq 'store:output' ) { + $store_key = $taskno; + $output_file = $task_output_file; + } + elsif ( $output =~ m/append:(\S+)/ ) { + $output_file = $1; + $append = 1; + } + elsif ( $output eq 'tmp' ) { + $output_file = $task_output_file; + } + + if ( !$output_file ) { + $output_file = '/dev/null'; + push @output_files, undef, + } + else { + push @output_files, $output_file; + } + PTDEBUG && _d("Task $taskno output:", Dumper(\@output_files)); + + if ( my $query = $task->query ) { + $query = replace_special_vars( + cmd => $query, + spool_dir => $spool_dir, + output_files => \@output_files, # __RUN_n_OUTPUT__ + service => $service->name, # __SERVICE__ + lib_dir => $lib_dir, # __LIB__ + meta_dir => "$lib_dir/meta", # __META__ + stage_dir => $tmp_dir, # __STAGE__ + store => $store, # __STORE_key__ + ts => $prefix, # __TS__ + bin_dir => $bin_dir, # __BIN_DIR__ + env => $env_vars, # __ENV__ + ); + $logger->info("Task $taskno query: $query"); + my $rows; + my $t0 = time; + eval { + if ( $join_char || $store_key_value_tuple ) { + $rows = $cxn->dbh->selectall_arrayref($query); + } + else { + $cxn->dbh->do($query); + } + }; + if ( $EVAL_ERROR ) { + $logger->error("Error executing $query: $EVAL_ERROR"); + last TASK; + } + + if ( $rows ) { + $logger->info('Query returned ' . scalar @$rows . ' rows'); + if ( $join_char ) { + my $fh; + if ( !open($fh, '>', $output_file) ) { + $logger->error("Cannot open $output_file: $OS_ERROR"); + last TASK; + } + foreach my $row ( @$rows ) { + print { $fh } join($join_char, + map { defined $_ ? $_ : 'NULL' } @$row), "\n" + or $logger->error("Cannot write to $output_file: $OS_ERROR"); + } + close $fh + or $logger->warning("Cannot close $output_file: $OS_ERROR"); + } + elsif ( $store_key_value_tuple ) { + foreach my $row ( @$rows ) { + $store->{$row->[0]} = defined $row->[1] ? $row->[1] : 'NULL'; + } + } + } + my $t1 = time; + + push @{$metadata->{tasks}}, { + start_ts => ts($t0, 1), + end_ts => ts($t1, 1), + run_time => sprintf('%.6f', $t1 - $t0), + }; + } + elsif ( my $program = $task->program ) { + # Create the full command line to execute, replacing any + # special vars like __RUN_N_OUTPUT__, __TMPDIR__, etc. + my $cmd = join(' ', + $task->program, + ($append ? '>>' : '>'), + $output_file, + ); + $cmd = replace_special_vars( + cmd => $cmd, + spool_dir => $spool_dir, + output_files => \@output_files, # __RUN_n_OUTPUT__ + service => $service->name, # __SERVICE__ + lib_dir => $lib_dir, # __LIB__ + meta_dir => "$lib_dir/meta", # __META__ + stage_dir => $tmp_dir, # __STAGE__ + store => $store, # __STORE_key__ + ts => $prefix, # __TS__ + bin_dir => $bin_dir, # __BIN_DIR__ + env => $env_vars, # __ENV__ + ); + $logger->info("Task $taskno command: $cmd"); + + if ( $cmd =~ m/$recursive_service/ ) { + $logger->fatal("Recursive service detected: $cmd"); + die; # fatal() should die, but just in case + } + + # Execute this run. + my $t0 = time; + system($cmd); + if ( $store_key ) { + my $value = slurp($output_file); + chomp($value) if $value; + $store->{$store_key} = $value; + } + my $t1 = time; + my $cmd_exit_status = $CHILD_ERROR >> 8; + $logger->info("Task $taskno: exit $cmd_exit_status"); + $exit_status |= $cmd_exit_status; + + push @{$metadata->{tasks}}, { + start_ts => ts($t0, 1), + end_ts => ts($t1, 1), + run_time => sprintf('%.6f', $t1 - $t0), + exit_status => $cmd_exit_status, + }; + + if ( $cmd_exit_status == 200 && !$service->meta ) { + # Equivalent to 0 for meta-services that need to stop early + # but let the non-meta, i.e. real, parent service continue. + } + elsif ( $cmd_exit_status == 253 ) { + $logger->error($task->name . ' exit status not zero, ' + . 'stopping ' . $service->name . ' service'); + stop_service( + service => $service->name, + lib_dir => $lib_dir, + ); + last TASK; + } + elsif ( $cmd_exit_status == 254 ) { + $logger->error($task->name . ' exit status not zero, ' + . 'stopping all services'); + stop_all_services( + lib_dir => $lib_dir + ); + last TASK; + } + elsif ( $cmd_exit_status != 0 ) { + $logger->info($task->name . ' exit status not zero, ' + . 'stopping tasks'); + last TASK; + } + } + else { + $logger->error('Invalid Task resource:', Dumper($task)); + last TASK; + } + + $taskno++; + } + + # Move the spool file from --spool/.tmp/ to --spool// + # if 1) the service spools data and 2) there is data. + my $file_size = (-s $tmp_data_file) || 0; + $logger->info("$tmp_data_file size: " . ($file_size || 0) . " bytes"); + if ( $file_size > $max_data ) { + $logger->error("Data file is larger than $max_data, the service " + . "may be malfunctioning, stopping service"); + stop_service( + service => $service->name, + lib_dir => $lib_dir, + ); + } + elsif ( $use_spool && $file_size ) { + # Save metadata about this sample _first_, because --send-data looks + # for the data file first, then for a corresponding .meta file. If + # we write the data file first, then we create a race condition: while + # we're writing, --send-data could see the data file but not the + # .meta file because we haven't written it yet. So writing the .meta + # file first guarantees that if --send-data sees a data file, the + # .meta already exists. (And there's no race condition on writing + # the data file because we use a quasi-atomic system mv.) + read_metadata( + service => $service->name, + prefix => $prefix, + metadata => $metadata, + stage_dir => $tmp_dir, + ); + $metadata->{run_time} = sprintf('%.6f', time - $start_time); + (my $meta_file = $data_file) =~ s/\.data/\.meta/; + write_to_file( + data => as_json($metadata, json => $json), + file => "$data_dir/$meta_file", + ); + + # Use system mv instead of Perl File::Copy::move() because it's + # unknown if the Perl version will do an optimized move, i.e. + # simply move the inode, _not_ copy the file. A system mv on + # the same filesystem is pretty much guaranteed to do an optimized, + # i.e. quasi-atomic, move. + my $cmd = "mv $tmp_data_file $data_dir"; + $logger->info($cmd); + system($cmd); + my $cmd_exit_status = $CHILD_ERROR >> 8; + if ( $cmd_exit_status != 0 ) { + $logger->error("Move failed: $cmd") + } + $exit_status |= $cmd_exit_status; + } + + # Remove staged files. Anything to save should have been moved + # from staging by a task. + foreach my $file ( glob "$tmp_dir/$prefix." . $service->name . ".*" ) { + unlink $file + or $logger->warning("Error removing $file: $OS_ERROR"); + } + + return $exit_status; # returning global var for testing +} + +sub load_service { + my (%args) = @_; + + have_required_args(\%args, qw( + service + lib_dir + )) or die; + my $service = $args{service}; + my $lib_dir = $args{lib_dir}; + + my $service_file = "$lib_dir/services/$service"; + if ( ! -f $service_file ) { + $logger->fatal("Cannot load $service: $service_file does not exist."); + } + + my $service_obj; + eval { + my $service_hash = JSON::decode_json(slurp($service_file)); + $service_obj = Percona::WebAPI::Resource::Service->new(%$service_hash); + }; + if ( $EVAL_ERROR ) { + $logger->fatal("Cannot load $service: $EVAL_ERROR"); + } + + return $service_obj; +} + +sub replace_special_vars { + my (%args) = @_; + + have_required_args(\%args, qw( + cmd + spool_dir + output_files + service + lib_dir + meta_dir + stage_dir + store + ts + bin_dir + )) or die; + my $cmd = $args{cmd}; + my $spool_dir = $args{spool_dir}; + my $output_files = $args{output_files}; + my $service = $args{service}; + my $lib_dir = $args{lib_dir}; + my $meta_dir = $args{meta_dir}; + my $stage_dir = $args{stage_dir}; + my $store = $args{store}; + my $ts = $args{ts}; + my $bin_dir = $args{bin_dir}; + + # Optional args + my $env = $args{env} || ''; + + my $word; + my $new_cmd; + eval { + $new_cmd = join(' ', + map { + $word = $_; + $word =~ s/__RUN_(\d+)_OUTPUT__/$output_files->[$1]/g; + $word =~ s/__STORE_([\w-]+)__/$store->{$1}/g; + $word =~ s/__TS__/$ts/g; + $word =~ s/__LIB__/$lib_dir/g; + $word =~ s/__META__/$meta_dir/g; + $word =~ s/__STAGE__/$stage_dir/g; + $word =~ s/__SERVICE__/$service/g; + $word =~ s/__STAGE_FILE__/$stage_dir\/$ts.$service/g; + $word =~ s/__META_FILE__/$meta_dir\/$service.meta/g; + $word =~ s/__BIN_DIR__/$bin_dir/g; + $word =~ s/__TRASH__/$spool_dir\/.trash/g; + $word =~ s/__ENV__/$env/g; + $word; + } + split(/\s+/, $cmd) + ); + }; + if ( $EVAL_ERROR ) { + $logger->fatal("Error replacing " . ($word || '') + . " in $cmd: $EVAL_ERROR"); + } + + return $new_cmd; +} + +sub init_spool_dir { + my (%args) = @_; + + have_required_args(\%args, qw( + spool_dir + )) or die; + my $spool_dir = $args{spool_dir}; + + # Optional args + my $service = $args{service}; + my $quiet = $args{quiet}; + + if ( !-d $spool_dir ) { + $logger->info("$spool_dir does not exist, creating") + unless $quiet; + _safe_mkdir($spool_dir); + } + elsif ( !-w $spool_dir ) { + die "$spool_dir is not writeable\n"; + } + + foreach my $subdir ( $service, '.tmp', '.trash' ) { + next unless $subdir; # service may be undef + my $dir = "$spool_dir/$subdir"; + if ( ! -d $dir ) { + $logger->info("$dir does not exist, creating") + unless $quiet; + _safe_mkdir($dir); + } + elsif ( !-w $dir ) { + die "$dir does not writeable\n"; + } + } + + my $data_dir = $service ? "$spool_dir/$service" : undef; + my $tmp_dir = "$spool_dir/.tmp"; + + return $data_dir, $tmp_dir; +} + +sub read_metadata { + my (%args) = @_; + + have_required_args(\%args, qw( + service + prefix + metadata + stage_dir + )) or die; + my $service = $args{service}; + my $prefix = $args{prefix}; + my $metadata = $args{metadata}; + my $stage_dir = $args{stage_dir}; + + # Example filename: 123456.query-history.meta.stop_offset + foreach my $file ( glob "$stage_dir/$prefix.$service.meta.*" ) { + PTDEBUG && _d('metadata file:', $file); + my ($attrib) = $file =~ m/\.meta\.(\S+)$/; + my $value = slurp($file); + chomp($value) if $value; + PTDEBUG && _d('metadata', $attrib, '=', $value); + $metadata->{$attrib} = $value; + unlink $file + or $logger->warning("Cannot rm $file: $OS_ERROR"); + } + + return; +} + +# ######################## # +# --send-data process subs # +# ######################## # + +# Send every file or directory in each service's directory in --spool/. +# E.g. --spool/query-monitor should contain files with pt-query-digest +# output. The per-service dirs are created in run_service(). +sub send_data { + my (%args) = @_; + + have_required_args(\%args, qw( + api_key + service + lib_dir + spool_dir + )) or die; + my $api_key = $args{api_key}; + my $service = $args{service}; + my $lib_dir = $args{lib_dir}; + my $spool_dir = $args{spool_dir}; + + # Optional args + my $interactive = $args{interactive}; + my $max_data = $args{max_data} || MAX_DATA_FILE_SIZE; + my $agent = $args{agent}; # for testing + my $client = $args{client}; # for testing + my $entry_links = $args{entry_links}; # for testing + my $json = $args{json}; # for testing + + # Can't do anything with the lib dir. Since we haven't started + # logging yet, cron should capture this error and email the user. + init_lib_dir( + lib_dir => $lib_dir, + verify => 1, + quiet => 1, + ); + + # Log all output to a file. + my $daemon = Daemon->new( + daemonize => 0, # no need: we're running from cron + pid_file => "$lib_dir/pids/$service.send", + log_file => "$lib_dir/logs/$service.send", + force_log_file => $interactive ? 0 : 1, + ); + $daemon->run(); + + $logger->info("Sending $service data"); + + # Connect to Percona, get entry links. + my $logger_client; + if ( !$client || !$entry_links ) { + ($client, $entry_links, $logger_client) = get_api_client( + api_key => $api_key, + tries => 3, + interval => sub { sleep 10 }, + ); + if ( !$client || !$entry_links ) { + $logger->fatal("Failed to connect to Percona Web API") + } + } + + # Load and update the local (i.e. existing) agent, or create a new one. + if ( !$agent ) { + # If this fails, there's no local agent, but that shouldn't happen + # because a local agent originally scheduled this --send-data process. + # Maybe that agent was deleted from the system but the crontab entry + # was not and was left running. + $agent = load_local_agent ( + lib_dir => $lib_dir, + ); + if ( !$agent ) { + $logger->fatal("No agent exists ($lib_dir/agent) and --agent-uuid was " + . "not specified. Check that the agent is properly installed."); + } + } + + $agent = eval { + $client->get( + link => $entry_links->{agents} . '/' . $agent->uuid, + ); + }; + + if ( $EVAL_ERROR ) { + $logger->fatal("Failed to get the agent: $EVAL_ERROR"); + } + my $log_link = $agent->links->{log}; + $logger->service("$service sending"); + $logger->start_online_logging( + client => $logger_client, + log_link => $log_link, + ); + + # Load the Service object from local service JSON file. + # $service changes from a string scalar to a Service object. + $service = load_service( + service => $service, + lib_dir => $lib_dir, + ); + + my ($service_dir) = init_spool_dir( + spool_dir => $spool_dir, + service => $service->name, + ); + + # Send data files in the service's spool dir. + # Only iterator over data files because run_service() writes + # them last to avoid a race condition with us. See the code + # comment about writing the .meta file first in run_service(). + my @data_files = glob "$service_dir/*.data"; + $logger->info('Sending ' . scalar @data_files . ' data files'); + DATA_FILE: + foreach my $data_file ( @data_files ) { + (my $meta_file = $data_file) =~ s/\.data/.meta/; + + if ( $interactive ) { + my $key; + PROMPT: + while ( !$key ) { + print "\n", `ls -l $data_file`; + print "Send [Ynaq]: "; + $key = ; + chomp($key); + $key = lc($key); + last DATA_FILE if $key eq 'q'; + next DATA_FILE if $key eq 'n'; + last PROMPT if $key eq 'y'; + if ( $key eq 'a' ) { + print "Sending all remaining files...\n"; + $interactive = 0; + last PROMPT; + } + warn "Invalid response: $key\n"; + } + } + + my $data_file_size = (-s $data_file) || 0; + if ( $data_file_size > $max_data ) { + $logger->error("Not sending $data_file because it is too large: " + . "$data_file_size > $max_data. This should not happen; " + . "please contact Percona or file a bug, and verify that " + . "all services are running properly."); + next DATA_FILE; + } + + eval { + # Send the file as-is. The --run-service process should + # have written the data in a format that's ready to send. + send_file( + client => $client, + agent => $agent, + meta_file => $meta_file, + data_file => $data_file, + link => $service->links->{data}, + json => $json, + ); + }; + if ( my $e = $EVAL_ERROR ) { + if (blessed($e) && $e->isa('Percona::WebAPI::Exception::Request')) { + my $error_msg = $client->response->content; + $logger->warning('Error ' . $e->status . " sending $data_file: " + . ($error_msg || '(No error message from server)')); + } + else { + chomp $e; + $logger->warning("Error sending $data_file: $e"); + } + next DATA_FILE; + } + + # Data file sent successfully; now remove it. Failure here + # is an error, not a warning, because if we can't remove the + # file then we risk re-sending it, and currently we have no + # way to determine if a file has been sent or not other than + # whether it exists or not. + eval { + unlink $data_file or die $OS_ERROR; + }; + if ( $EVAL_ERROR ) { + chomp $EVAL_ERROR; + $logger->warning("Sent $data_file but failed to remove it: " + . $EVAL_ERROR); + last DATA_FILE; + } + + if ( -f $meta_file ) { + unlink $meta_file or $logger->warning($OS_ERROR); + } + + $logger->info("Sent and removed $data_file"); + } + + return; +} + +# Send the Agent and file's contents as-is as a multi-part POST. +sub send_file { + my (%args) = @_; + + have_required_args(\%args, qw( + client + agent + data_file + link + )) or die; + my $client = $args{client}; + my $agent = $args{agent}; + my $data_file = $args{data_file}; + my $link = $args{link}; + + # Optional args + my $meta_file = $args{meta_file}; + my $json = $args{json}; # for testing + + my $data_file_size = -s $data_file; + $logger->info("Sending $data_file ($data_file_size bytes) to $link"); + + # Create a multi-part resource: first the Agent, so Percona knows + # from whom the sample data is coming, then metadata about the sample, + # then the actual sample data. Each part is separated by a special + # boundary value. The contents of the data file are sent as-is + # because here we don't know or care about the data; that's a job + # for the PWS server. + my $boundary = 'Ym91bmRhcnk'; # "boundary" in base64, without a trailing = + + my $agent_json = as_json($agent, json => $json); + chomp($agent_json); + + my $meta = -f $meta_file && -s $meta_file ? slurp($meta_file) : ''; + $meta =~ s/^\s+//; + $meta =~ s/\s+$//; + + my $data = -s $data_file ? slurp($data_file) : ''; + $data =~ s/^\s+//; + $data =~ s/\s+$//; + + # Put it all together: + my $resource = <post( + link => $link, + resources => $resource, + headers => { + 'Content-Type' => "multipart/form-data; boundary=$boundary", + } + ); + + return; +} + +# ############################################ # +# --status, --stop, --reload, and --reset subs # +# ############################################ # + +sub agent_status { + my (%args) = @_; + + have_required_args(\%args, qw( + pid_file + lib_dir + )) or die; + my $pid_file = $args{pid_file}; + my $lib_dir = $args{lib_dir}; + + # Optional args + my $api_key = $args{api_key}; + my $crontab_list = defined $args{crontab_list} ? $args{crontab_list} + : `crontab -l 2>/dev/null`; + my $bin_dir = defined $args{bin_dir} ? $args{bin_dir} + : "$FindBin::Bin/"; + + # Check if pt-agent is running. + my $pid = eval { + get_agent_pid( + pid_file => $pid_file, + ); + }; + if ( my $e = $EVAL_ERROR ) { + if ( !blessed($e) ) { + $logger->warning("Sorry, an error occured while getting the pt-agent PID: $e"); + } + elsif ( $e->isa('Percona::Agent::Exception::PIDNotFound') ) { + $logger->info("pt-agent is not running"); + } + elsif ( $e->isa('Percona::Agent::Exception::PIDNotRunning') ) { + $logger->warning("$e. pt-agent may have stopped unexpectedly or crashed."); + } + else { # unhandled exception + $logger->warning("Sorry, an unknown exception occured while getting " + . "the pt-agent PID: $e"); + } + } + else { + $logger->info("pt-agent is running as PID $pid") + } + + if ( $api_key ) { + $logger->info("API key: " . ($api_key || '')); + } + else { + $logger->warning("No API key is set"); + } + + # Get the agent's info. + if ( -f "$lib_dir/agent" ) { + my $agent = JSON::decode_json(slurp("$lib_dir/agent")); + foreach my $attrib ( qw(uuid hostname username) ) { + $logger->info("Agent $attrib: " . ($agent->{$attrib} || '')); + } + } + else { + $logger->warning("$lib_dir/agent does not exist"); + } + + # Parse pt-agent lines from crontab to see what's scheduled/running. + my %scheduled = map { + my $line = $_; + my ($service) = $line =~ m/pt-agent (?:--run-service|--send-data) (\S+)/; + $service => 1; + } + grep { $_ =~ m/pt-agent (?:--run-service|--send-data)/ } + split("\n", $crontab_list); + + my %have_service; + if ( -d "$lib_dir/services" ) { + SERVICE: + foreach my $service_file ( glob "$lib_dir/services/*" ) { + my $service = eval { + JSON::decode_json(slurp($service_file)); + }; + if ( $EVAL_ERROR ) { + $logger->warning("$service_file is corrupt"); + next SERVICE; + } + $service = Percona::WebAPI::Resource::Service->new(%$service); + next if $service->meta; # only real services + $have_service{$service->name} = 1; + if ( $scheduled{$service->name} ) { + if ( $pid ) { + $logger->info($service->name . " is running"); + } + else { + $logger->warning($service->name . " is running but pt-agent is not"); + } + } + else { + if ( $pid ) { + $logger->warning($service->name . " is not running"); + } + else { + $logger->info($service->name . " has stopped"); + } + } + } + } + else { + $logger->warning("$lib_dir/services does not exist"); + } + + # Look for services that are still scheduled/running but that we'll + # don't/shouldn't have. This can happen if the crontab gets messed + # up, --stop fails, etc. + foreach my $scheduled_service ( sort keys %scheduled ) { + if ( !$have_service{$scheduled_service} ) { + $logger->warning("$scheduled_service is running but " + . "$lib_dir/services/$scheduled_service does not exist"); + } + } + + return; +} + +sub stop_agent { + my (%args) = @_; + + have_required_args(\%args, qw( + pid_file + lib_dir + )) or die; + my $pid_file = $args{pid_file}; + my $lib_dir = $args{lib_dir}; + + my $stopped = 0; + + # Get the agent's PID and kill it. If the PID file doesn't + # exist for some reason, get_agent_pid() will attempt to find + # pt-agent --daemonize in ps. And if pt-agent doesn't respond + # to the TERM signal after a short while, we kill it with + # the KILL signal. + my $pid = eval { + get_agent_pid( + pid_file => $pid_file, + ); + }; + if ( my $e = $EVAL_ERROR ) { + if ( !blessed($e) ) { + $logger->warning("Sorry, an error occured while getting the pt-agent PID: $e"); + } + elsif ( $e->isa('Percona::Agent::Exception::PIDNotFound') ) { + $logger->info("pt-agent is not running"); + $stopped = 1; + } + elsif ( $e->isa('Percona::Agent::Exception::PIDNotRunning') ) { + $logger->warning("$e. pt-agent may have stopped unexpectedly or crashed."); + $stopped = 1; + } + else { # unhandled exception + $logger->warning("Sorry, an unknown exception occured while getting " + . "the pt-agent PID: $e"); + } + } + else { + $logger->info("Stopping pt-agent..."); + kill 15, $pid; + my $running; + for (1..5) { + $running = kill 0, $pid; + last if !$running; + sleep 0.5; + } + $running = kill 0, $pid; + if ( $running ) { + $logger->warning("pt-agent did not respond to the TERM signal, using " + . "the KILL signal..."); + kill 9, $pid; + for (1..2) { + $running = kill 0, $pid; + last if !$running; + sleep 0.5; + } + $running = kill 0, $pid; + if ( $running ) { + # Shouldn't happen: + $logger->warning("pt-agent did not response to the KILL signal"); + } + else { + $logger->info("Killed pt-agent"); + $stopped = 1; + } + } + else { + $logger->info("pt-agent has stopped"); + $stopped = 1; + } + + # pt-agent should remove its own PID file, but in case it didn't, + # (e.g we had to kill -9 it), we remove the PID file manually. + if ( -f $pid_file ) { + unlink $pid_file + or $logger->warning("Cannot remove $pid_file: $OS_ERROR. Remove " + . "this file manually."); + } + } + + stop_all_services( + lib_dir => $lib_dir, + ); + + # TODO: kill --lib/pids/* + + return $stopped; +} + +sub stop_all_services { + my (%args) = @_; + + have_required_args(\%args, qw( + lib_dir + )) or die; + my $lib_dir = $args{lib_dir}; + + # Optional args + my $bin_dir = defined $args{bin_dir} ? $args{bin_dir} + : "$FindBin::Bin/"; + + # Un-schedule all services, i.e. remove them from the user's crontab, + # leaving the user's other tasks untouched. + $logger->info("Removing all services from crontab..."); + eval { + schedule_services( + services => [], + lib_dir => $lib_dir, + quiet => 1, + ); + }; + if ( $EVAL_ERROR ) { + $logger->error("Error removing services from crontab: $EVAL_ERROR"); + } + + # Stop all real services by running their stop- meta-service. + # If a real service doesn't have a stop- meta-service, then + # presumably nothing needs to be done to stop it other than un-scheduling + # it, which we've already done. + if ( -d "$lib_dir/services" ) { + my $env_vars = env_vars(); + + SERVICE: + foreach my $file ( glob "$lib_dir/services/stop-*" ) { + my $service = basename($file); + my $stop_log = "$lib_dir/logs/$service.stop"; + my $run_log = "$lib_dir/logs/$service.run"; + my $cmd = ($env_vars ? "$env_vars " : '') + . "${bin_dir}pt-agent --run-service $service" + . " $stop_log 2>&1"; + $logger->info("Stopping $service..."); + PTDEBUG && _d($cmd); + system($cmd); + my $cmd_exit_status = $CHILD_ERROR >> 8; + if ( $cmd_exit_status != 0 ) { + my $err = -f $run_log ? slurp($run_log) : ''; + $logger->error("Error stopping $service. Check $stop_log and the " + . "online logs for details. The service may still be running."); + next SERVICE; + } + unlink $stop_log + or $logger->warning("Cannot remove $stop_log: $OS_ERROR"); + } + } + else { + $logger->info("$lib_dir/services does not exist, no services to stop") + } + + $state->{all_services_are_stopped} = 1; + + return; +} + +sub stop_service { + my (%args) = @_; + + have_required_args(\%args, qw( + service + lib_dir + )) or die; + my $service = $args{service}; + my $lib_dir = $args{lib_dir}; + + # Optional args + my $bin_dir = defined $args{bin_dir} ? $args{bin_dir} + : "$FindBin::Bin/"; + + if ( -d "$lib_dir/services" ) { + my $stop_service_file = "$lib_dir/services/stop-$service"; + if ( -f $stop_service_file ) { + my $stop_service = basename($stop_service_file); + my $env_vars = env_vars(); + my $stop_log = "$lib_dir/logs/$service.stop"; + my $run_log = "$lib_dir/logs/$service.run"; + my $cmd = ($env_vars ? "$env_vars " : '') + . "${bin_dir}pt-agent --run-service $stop_service" + . " $stop_log 2>&1"; + $logger->info("Stopping $service..."); + PTDEBUG && _d($cmd); + system($cmd); + my $cmd_exit_status = $CHILD_ERROR >> 8; + if ( $cmd_exit_status != 0 ) { + my $err = -f $run_log ? slurp($run_log) : ''; + $logger->error("Error stopping $service. Check $stop_log, " + . "$run_log, and the online online logs for details. " + . "$service may still be running."); + } + else { + unlink $stop_log + or $logger->warning("Cannot remove $stop_log: $OS_ERROR"); + } + } + else { + $logger->warning("$stop_service_file does not exist, cannot stop $service"); + } + } + else { + $logger->warning("$lib_dir/services does not exist, cannot stop $service"); + } + + return; +} + +sub reset_agent { + my (%args) = @_; + + have_required_args(\%args, qw( + pid_file + lib_dir + spool_dir + log_file + )) or die; + my $pid_file = $args{pid_file}; # for stop_agent() + my $lib_dir = $args{lib_dir}; + my $spool_dir = $args{spool_dir}; + my $log_file = $args{log_file}; + + # Optional args + my $api_key = $args{api_key}; + + if ( -t STDIN ) { + print "\nWARNING: All services will stop and all data in $spool_dir/ " + ."will be deleted. Are you sure you want to reset pt-agent?\n\n" + . "Press any key to continue, else Ctrl-C to abort.\n"; + my $confirmation = ; + } + + $logger->info('Stopping pt-agent...'); + my $stopped = stop_agent( + pid_file => $pid_file, + lib_dir => $lib_dir, + ); + if ( !$stopped ) { + $logger->warning('Failed to stop pt-agent. Stop the agent, or verify that ' + . 'it is no longer running, and try again.'); + return; + } + + my $agent = load_local_agent( + lib_dir => $lib_dir, + quiet => 1, + ); + if ( !$agent ) { + $logger->warning("$lib_dir/agent does not exist. You will need to re-install " + . "pt-agent after the reset."); + } + + $logger->info("Removing $lib_dir/..."); + rmtree($lib_dir) + or $logger->warning("Cannot remove $lib_dir/: $OS_ERROR"); + init_lib_dir( + lib_dir => $lib_dir, + ); + + if ( $agent ) { + my $new_agent = Percona::WebAPI::Resource::Agent->new( + uuid => $agent->uuid, + ); + save_agent( + lib_dir => $lib_dir, + agent => $new_agent, + ); + } + + $logger->info("Removing $spool_dir/..."); + rmtree($spool_dir) + or $logger->warning("Cannot remove $spool_dir/: $OS_ERROR"); + init_spool_dir( + spool_dir => $spool_dir, + ); + + my $config_file = get_config_file(); + my $config = -f $config_file ? slurp($config_file) : ''; + $logger->info("Resetting $config_file..."); + open my $fh, '>', $config_file + or $logger->error("Cannot write to $config_file: $OS_ERROR"); + if ( $api_key ) { + print { $fh } "api-key=$api_key\n"; + } + foreach my $line ( split("\n", $config) ) { + next unless $line =~ m/^\s*(?:user|host|password|socket|defaults-file|port)/; + print { $fh } $line, "\n"; + } + close $fh + or $logger->warning("Cannot close $config_file: $OS_ERROR"); + + if ( -f $log_file ) { + $logger->info("Removing $log_file..."); + unlink $log_file + or $logger->warning("Cannot remove $log_file: $OS_ERROR"); + } + + return; +} + +sub get_agent_pid { + my (%args) = @_; + my $pid_file = $args{pid_file}; + + my $pid; + if ( -f $pid_file ) { + PTDEBUG && _d('Reading PID from', $pid_file); + chop($pid = slurp($pid_file)); + } + else { + my $ps_output = `ps ax | grep 'pt-agent --daemonize' | grep -v grep`; + PTDEBUG && _d('Reading PID from ps', $ps_output); + if ( !$ps_output ) { + die Percona::Agent::Exception::PIDNotFound->new( + pid_file => $pid_file, + ); + } + # Match the first digits, which should be the PID. + ($pid) =~ $ps_output =~ m/(\d+)/; + } + + if ( !$pid ) { + die Percona::Agent::Exception::NoPID->new( + pid_file => $pid_file, + pid_file_is_empty => -f $pid_file, + ); + } + + my $running = kill 0, $pid; + if ( !$running ) { + die Percona::Agent::Exception::PIDNotRunning->new( + pid => $pid, + ); + } + + return $pid; +} + +sub reload_signal { + my ( $signal ) = @_; + print STDERR "\n# Caught SIG$signal, reloading configuration.\n"; + $state->{reload} = 1; + return; +} + +sub reload_agent { + my (%args) = @_; + + have_required_args(\%args, qw( + pid_file + )) or die; + my $pid_file = $args{pid_file}; + my $lib_dir = $args{lib_dir}; + + my $pid = eval { + get_agent_pid( + pid_file => $pid_file, + ); + }; + if ( my $e = $EVAL_ERROR ) { + if ( !blessed($e) ) { + $logger->warning("Sorry, an error occured while getting the pt-agent PID: $e"); + } + elsif ( $e->isa('Percona::Agent::Exception::PIDNotFound') ) { + $logger->warning("pt-agent is not running"); + } + elsif ( $e->isa('Percona::Agent::Exception::PIDNotRunning') ) { + $logger->warning("$e. pt-agent may have stopped unexpectedly or crashed."); + } + else { # unhandled exception + $logger->warning("Sorry, an unknown exception occured while getting " + . "the pt-agent PID: $e"); + } + } + else { + kill 10, $pid; # SIGUSR1, caught in reload_signal() + $logger->info("Sent reload signal (SIGUSR1) to pt-agent PID $pid"); + } + + return; +} + +# ############## # +# --install subs # +# ############## # + +sub install { + my (%args) = @_; + have_required_args(\%args, qw( + OptionParser + Cxn + flags + )) or die; + my $o = $args{OptionParser}; + my $cxn = $args{Cxn}; + my $flags = $args{flags}; + + # Optional args + my $interactive = $args{interactive}; + + $logger->quiet(Percona::Agent::Logger::level_number('ERROR')); + + my $agent_my_cnf = '/etc/percona/agent/my.cnf'; + my $config_file = get_config_file(); + + my $step_result; + my $stepno = 0; + my $skip = 0; + my $step_fmt = "Step %d of %d: %s: "; + my @steps = ( + "Verify the user is root", + "Check Perl module dependencies", + "Check for crontab", + "Verify the API key", + "Connect to MySQL", + "Check if MySQL is a slave", + "Create a MySQL user for the agent", + "Initialize $agent_my_cnf", + "Initialize $config_file", + "Initialize directories", + "Initialize the agent", + "Run the agent", + ); + my $n_steps = scalar @steps; + my $next_step = sub { + my (%args) = @_; + my $repeat = $args{repeat}; + my $done = $args{done}; + # Result of the previous step + my $result = 'OK'; + if ( $step_result ) { + $result = $step_result; + $step_result = undef; + } + print "$result\n" if $stepno && !$repeat; + while ( $skip ) { + printf $step_fmt, + $stepno + ($repeat ? 0 : 1), + $n_steps, + $steps[$repeat ? $stepno - 1 : $stepno]; + $stepno++; + print "SKIP\n"; + $skip--; + } + if ( $done ) { + print "INSTALLATION COMPLETE\n"; + return; + } + # This step + printf $step_fmt, + $stepno + ($repeat ? 0 : 1), + $n_steps, + $steps[$repeat ? $stepno - 1 : $stepno]; + $stepno++ unless $repeat; + }; + + # ######################################################################## + # Pre-install checklist + # ######################################################################## + + # Must be root for --install. + $next_step->(); + if ( $EUID != 0 ) { + die "You must run pt-agent --install as root.\n"; + } + + # Check Perl module dependencies + $next_step->(); + exit 1 if missing_perl_module_deps(); + + # Check for crontab + $next_step->(); + my $crontab = `which crontab 2>/dev/null`; + if ( !$crontab ) { + die "cron is not installed, or crontab is not in your PATH.\n"; + } + + # Must have a valid API key. + my $api_key = $o->get('api-key'); + if ( !$api_key ) { + print "\n"; + if ( $interactive || -t STDIN ) { + while ( !$api_key ) { + print "Enter your API key: "; + $api_key = ; + chomp($api_key) if $api_key; + if ( !$api_key || length($api_key) < 32 ) { + warn "Invalid API key; it should be at least 32 characters long. Please try again.\n"; + $api_key = ''; + } + } + $next_step->(repeat => 1); # repeat + } + else { + die "Please specify your --api-key.\n"; + } + } + my $client; + my $entry_links; + if ( $flags->{offline} ) { + $skip++; + } + else { + $next_step->(); + eval { + ($client, $entry_links) = get_api_client( + api_key => $api_key, + interval => sub { return; }, + tries => 1, + ); + }; + if ( my $e = $EVAL_ERROR ) { + die "Sorry, an error occurred while verifying the API key: $e"; + } + elsif ( !$entry_links ) { + if ( $client->response->code && $client->response->code == 403 ) { + die "Sorry, the API key $api_key is not valid. Please check the API key and try again.\n"; + } + else { + my $err = $client->response->message || 'Unknown error'; + die "Sorry, an error occured while verifying the API key: $err\n"; + } + } + } + + # Must be able to connect to MySQL to create pt_agent user. + $next_step->(); + eval { + $cxn->connect(); + }; + if ( $EVAL_ERROR ) { + chomp $EVAL_ERROR; + die "Cannot connect to MySQL: $EVAL_ERROR\n" + . "Please re-run pt-agent --install and specify MySQL connection " + . "options like --user and --host to connect to MySQL as a user " + . "with sufficient privileges to create MySQL users.\n"; + } + + # Check if MySQL is a slave + $next_step->(); + my $slave = $cxn->dbh->selectrow_hashref("SHOW SLAVE STATUS"); + if ( $slave ) { + $step_result = 'YES, TO MASTER ' . $slave->{master_host} || '?'; + } + else { + $step_result = 'NO'; + } + + # ######################################################################## + # Do the install + # ######################################################################## + + # Create a MySQL user for the agent + $next_step->(); + if ( -f $agent_my_cnf ) { + $step_result = "NO, USE EXISTING $agent_my_cnf"; + } + else { + if ( !$slave ) { # master + create_mysql_user($cxn, $agent_my_cnf); + } + else { # slave + if ( $flags->{force_dangerous_slave_install} ) { + create_mysql_user($cxn, $agent_my_cnf); + } + else { + die "Sorry, cannot install the agent because MySQL is a slave " + . "and $agent_my_cnf does not exist. It is not safe to " + . "write to a slave, so a MySQL user for the agent cannot " + . "be created. First install the agent on the master, then " + . "copy $agent_my_cnf from the master server to this server. " + . "See --install-options for how to force a dangerous slave " + . "install.\n"; + } + } + } + + # Save the API key and defaults file in ~/.pt-agent.conf. + $next_step->(); + eval { + write_to_file( + data => "api-key=$api_key\ndefaults-file=$agent_my_cnf\n", + file => $config_file, + ); + }; + if ( $EVAL_ERROR ) { + die "Sorry, an error occured while initializing $config_file: " + . $EVAL_ERROR; + } + + # Init --lib and --spool. pt-agent would do this itself, but we'll + # do it now in case there are problems. + $next_step->(); + init_lib_dir( + lib_dir => $o->get('lib'), + ); + init_spool_dir( + spool_dir => $o->get('spool'), + ); + + # 8. Start the agent, don't run it yet. Normally this forks in + # anticipation of run_agent() being called next, but we don't do + # this during install; we run the agent manually later. + if ( $flags->{offline} ) { + $skip++; # Init agent + $skip++; # Run agent + } + else { + $next_step->(); + my $running = eval { + start_agent( + api_key => $api_key, + lib_dir => $o->get('lib'), + Cxn => $cxn, + client => $client, + entry_links => $entry_links, + agent_uuid => $o->get('agent-uuid'), + daemonize => 0, + pid_file => undef, + log_file => undef, + interval => sub { sleep 2; }, + tries => 2, + ); + }; + if ( $EVAL_ERROR ) { + die "Sorry, an error occurred while starting the agent: $EVAL_ERROR"; + } + + # 9. Run the agent daemon. If all the previous worked, the agent + # should be able to start without problems. It will get and apply + # the default config, then get and apply any services (probably won't + # have any yet). + $next_step->(); + my $env = env_vars(); + my $cmd = "$env $FindBin::Bin/pt-agent --daemonize"; + my $ret = system($cmd); + if ( $ret >> 8 ) { + die "Sorry, an error occured while starting pt-agent.\n"; + } + } + + # ######################################################################## + # Done installing + # ######################################################################## + $next_step->(done => 1); + + my $hostname = `hostname`; + chomp($hostname); + + if ( $flags->{offline} ) { + print "The agent has been installed, but it was not started. " + . "Run pt-agent --daemonize to start the agent, then go to " + . "https://cloud.percona.com/agents#$hostname to enable services " + . "for the agent.\n"; + } + else { + print "The agent has been installed and started, but it is not " + . "running any services yet. Go to " + . "https://cloud.percona.com/agents#$hostname to enable services " + . "for the agent.\n"; + } + + return; +} + +sub create_mysql_user { + my ($cxn, $agent_my_cnf) = @_; + + my $random_pass = pseudo_random_password(); + my $sql = "GRANT SUPER,USAGE ON *.* TO 'pt_agent'\@'localhost' " + . "IDENTIFIED BY '$random_pass'"; + eval { + $cxn->dbh->do($sql); + }; + if ( $EVAL_ERROR ) { + die "Sorry, an error occurred while creating a MySQL user for the agent: " + . $EVAL_ERROR; + } + $cxn->dbh->disconnect(); + + # Init $agent_my_cnf + # We could set user= and pass= in ~/.pt-agent.conf, but each new agent + # has a different MySQL password but shares the same default agent + # config, so if we set pass=foo, the next agent would set it to + # pass=bar, etc. Instead, every agent sets/uses + # defaults-file=/etc/percona/agent/my.cnf in the default config, but + # the contents of that file is different for each agent. + + if ( !-d '/etc/percona' ) { + _safe_mkdir('/etc/percona'); + } + if ( !-d '/etc/percona/agent' ) { + _safe_mkdir('/etc/percona/agent'); + } + my $my_cnf = "[client]\nuser=pt_agent\npass=$random_pass\n"; + my $dsn = $cxn->dsn; + if ( $dsn->{h} ) { + $my_cnf .= "host=$dsn->{h}\n"; + } + if ( $dsn->{P} ) { + $my_cnf .= "port=$dsn->{P}\n"; + } + if ( $dsn->{S} ) { + $my_cnf .= "socket=$dsn->{S}\n"; + } + eval { + write_to_file( + data => $my_cnf, + file => $agent_my_cnf, + ); + }; + if ( $EVAL_ERROR ) { + die "Sorry, an error occured while initializing $agent_my_cnf: " + . $EVAL_ERROR; + } + + return; +} + +sub pseudo_random_password { + my @chars = ("A".."Z", "a".."z", "0".."9"); + my $string; + $string .= $chars[rand @chars] for 1..9; + return $string; +} + +sub missing_perl_module_deps { + my @missing_deps; + foreach my $pm ( sort keys %deps ) { + my $dep = $deps{$pm}; + eval "require $dep->[0]"; + if ( $EVAL_ERROR ) { + push @missing_deps, $dep; + } + } + if ( @missing_deps ) { + warn "These Perl modules need to be installed:\n\n"; + foreach my $dep ( @missing_deps ) { + warn "$dep->[0]\n apt-get install $dep->[1]\n yum install $dep->[2]\n\n"; + } + } + return scalar @missing_deps; +} + +# ################## # +# Misc and util subs # +# ################## # + +sub get_config_file { + my $home_dir = $ENV{HOME} || $ENV{HOMEPATH} || $ENV{USERPROFILE} || '.'; + my $config_file = "$home_dir/.pt-agent.conf"; + return $config_file; +} + +sub save_agent { + my (%args) = @_; + have_required_args(\%args, qw( + agent + lib_dir + )) or die; + my $agent = $args{agent}; + my $lib_dir = $args{lib_dir}; + my $file = $lib_dir . '/agent'; + $logger->info("Saving Agent to $file"); + eval { + open my $fh, '>', $file + or die "Error opening $file: $OS_ERROR"; + print { $fh } as_json($agent) + or die "Error writing to $file: $OS_ERROR"; + close $fh + or die "Error closing $file: $OS_ERROR"; + }; + if ( $EVAL_ERROR ) { + if ( !$state->{save_agent_error}++ ) { + chomp($EVAL_ERROR); + $logger->warning("Cannot save agent to $lib_dir: $EVAL_ERROR. " + . "Configure the agent to use a writeable --lib directory. " + . "This warning will not be printed again."); + } + } + delete $state->{save_agent_error}; + return; +} + +sub slurp { + my ($file) = @_; + return unless -f $file; + open my $fh, '<', $file + or die "Error opening $file: $OS_ERROR"; + my $data = do { + local $INPUT_RECORD_SEPARATOR = undef; + <$fh>; + }; + close $fh; + return $data; +} + +sub write_to_file { + my (%args) = @_; + my $data = $args{data}; + my $file = $args{file}; + die "No file" unless $file; + open my $fh, '>', $file + or die "Error opening $file: $OS_ERROR"; + print { $fh } $data; + close $fh; + return; +} + +sub _set_logger { + my $new_logger = shift; + $logger = $new_logger; + return; +} + +sub get_versions { + my (%args) = @_; + my $cxn = $args{Cxn}; + my $tries = $args{tries} || 1; + my $interval = $args{interval} || sub { sleep 3; }; + + my $have_mysql = 0; + if ( $cxn ) { + $logger->info("Connecting to MySQL"); + foreach my $tryno ( 1..$tries ) { + eval { + $cxn->connect(); + }; + if ( $EVAL_ERROR ) { + $logger->warning("Cannot connect to MySQL: $EVAL_ERROR"); + } + else { + $have_mysql = 1; + delete $state->{need_mysql_version}; + last; # success + } + if ( $tryno < $tries ) { + sleep $interval; # failure, try again + } + else { + $state->{need_mysql_version} = 1; + $logger->warning("Cannot get MySQL version, will try again later"); + last; # failure + } + } + } + + # This is currently the actual response from GET v.percona.com + my $fake_response = < $fake_response, + ); + + my $instances = [ + { name => 'system', id => 0, }, + ]; + + if ( $have_mysql ) { + my ($name, $id) = VersionCheck::get_instance_id( + { dbh => $cxn->dbh, dsn => $cxn->dsn }, + ); + push @$instances, + { name => $name, id => $id, dbh => $cxn->dbh, dsn => $cxn->dsn }; + } + + my $versions = VersionCheck::get_versions( + items => $items, + instances => $instances, + ); + + my %version_for; + foreach my $item ( sort keys %$items ) { + next unless exists $versions->{$item}; + if ( ref($versions->{$item}) eq 'HASH' ) { + my $mysql_versions = $versions->{$item}; + for my $id ( sort keys %$mysql_versions ) { + $version_for{$item} = $mysql_versions->{$id}; + } + } + else { + $version_for{$item} = $versions->{$item}; + } + } + + PTDEBUG && _d('Versions:', Dumper(\%version_for)); + return \%version_for; +} + +sub env_vars { + my @vars; + foreach my $var ( qw( + PTDEBUG + PERCONA_TOOLKIT_TEST_USE_DSN_NAMES + PCT_ENTRY_LINK + )) { + if ( my $val = $ENV{$var} ) { + push @vars, "$var=\"$val\""; + } + } + return join(' ', @vars); +} + +sub _safe_mkdir { + my $dir = shift; + + # Multiple processes are running at once, all running the same code, + # all trying to init pt-agent's various directories if necessary, so + # race conditions abound. Another process may have created the dir + # between -d checking for it and now, so if mkdir throws a "File exists" + # error and the dir does now exist, then that's ok. Else, it's an error. + eval { + mkdir $dir or die $OS_ERROR; + }; + if ( my $e = $EVAL_ERROR ) { + if ( $e =~ /exists/i && -d $dir ) { + PTDEBUG && _d('Another process created', $dir); + } + else { + die "Cannot mkdir $dir: $e"; + } + } + return; +} + +sub check_if_mysql_restarted { + my (%args) = @_; + have_required_args(\%args, qw( + Cxn + )) or die; + my $cxn = $args{Cxn}; + + # Optional args + my $uptime = $args{uptime}; # for testing + my $margin = $args{margin} || 5; + + if ( !$uptime ) { + $logger->info("Connecting to MySQL"); + my $t0 = time; + my $e; + my $tries = 2; + my $have_mysql = 0; + TRY: + foreach my $tryno ( 1..$tries ) { + eval { + $cxn->connect(); + }; + $e = $EVAL_ERROR; + if ( $e ) { + sleep 3 if $tryno < $tries; # failure, try again + } + else { + $have_mysql = 1; + last TRY; # success + } + } + if ( $have_mysql ) { + eval { + (undef, $uptime) = $cxn->dbh->selectrow_array("SHOW STATUS LIKE 'uptime'"); + }; + if ( $EVAL_ERROR ) { + $logger->warning("Cannot check if MySQL restarted because " + . "SHOW STATUS query failed: $EVAL_ERROR"); + return; + } + } + else { + $logger->warning("Cannot check if MySQL restarted because " + . "connection to MySQL failed: $e"); + return; + } + } + + my $now = int(time); + + if ( !$state->{last_uptime} || !$state->{last_uptime_check} ) { + $logger->info("MySQL uptime: $uptime"); + delete $state->{mysql_restarted}; + } + elsif ( !$state->{mysql_restarted} ) { + my $elapsed_time = $now - $state->{last_uptime_check}; + my $exepected_uptime = $state->{last_uptime} + $elapsed_time; + my $mysql_restarted = $uptime > ($exepected_uptime - $margin) && $uptime < ($exepected_uptime + $margin) ? 0 : 1; + $logger->info("MySQL uptime check: last=$state->{last_uptime} elapsed=$elapsed_time expected=$exepected_uptime " + . "+/- ${margin}s actual=$uptime"); + if ( $mysql_restarted ) { + $logger->warning("MySQL restarted"); + $state->{mysql_restarted} = ts(time, 1); # 1=UTC + } + } + + $state->{last_uptime} = $uptime; + $state->{last_uptime_check} = $now; + + return; +} + +sub too_many_agents { + my (%args) = @_; + have_required_args(\%args, qw( + lib_dir + )) or die; + my $lib_dir = $args{lib_dir}; + return unless -d "$lib_dir/pids"; + my @pids = glob "$lib_dir/pids/*"; + return scalar @pids > 10 ? 1 : 0; +} + +sub _logger { + my $_logger = shift; + $logger = $_logger if $_logger; + return $logger; +} + +sub _state { + my $_state = shift; + $state = $_state if $_state; + return $state; +} + +# Catches signals so we can exit gracefully. +sub sig_int { + my ( $signal ) = @_; + $oktorun = 0; + if ( $exit_on_signals ) { + print STDERR "\n# Caught SIG$signal, exiting.\n"; + exit 1; + } + print STDERR "# Caught SIG$signal. Use 'kill -ABRT $PID' if " + . "the tool does not exit normally in a few seconds.\n"; + return; +} + +# ############################################################################ +# Run the program. +# ############################################################################ + +if ( !caller ) { exit main(@ARGV); } + +1; # Because this is a module as well as a script. + +# ############################################################################ +# Documentation +# ############################################################################ +=pod + +=head1 NAME + +pt-agent - Agent for Percona Cloud Tools + +=head1 SYNOPSIS + +Usage: pt-agent [OPTIONS] + +pt-agent is the client-side agent for Percona Cloud Tools. It is not +a general command line tool like other tools in Percona Toolkit, it is +configured and controlled through the web at https://cloud.percona.com. +Please contact Percona or vist https://cloud.percona.com for more information. + +=head1 DESCRIPTION + +pt-agent is the client-side agent for Percona Cloud Tools (PCT). It is +controlled and configured through the web app at https://clodu.percona.com. +An account with Percona is required to use pt-agent. Please contact Percona +or vist https://cloud.percona.com for more information. + +pt-agent, or "the agent", is a single, unique instance of the tool running +on a server. Two agents cannot run on the same server (see L<"--pid">). + +The agent is a daemon that runs as root. It should be started with +L<"--daemonize">. It connects periodically to Percona to update +its configuration and services, and it schedules L<"--run-service"> and +L<"--send-data"> instances of itself. Other than L<"INSTALLING"> and starting +the agent locally, all control and configuration is done through the web +at https://cloud.percona.com. + +=head1 INSTALLING + +pt-agent must be installed and ran as root. It is possible to run as +a non-root user, but this requires a more complicated and manual installation. +Please contact Percona for help if you need to run pt-agent as a non-root user. + +Installing the agent as root is very simple: + + # pt-agent --install + +The agent will prompt you for your Percona Cloud Tools API key. Then it +will verify the API key, create a MySQL user for the agent, and run the agent. +When the install process is complete, go to https://cloud.percona.com to enable +services for agent. + +Please contact Percona if you need help installing the agent. + +=head1 EXIT STATUS + +pt-agent exists zero if no errors or warnings occurred, else it exits non-zero. + +=head1 OPTIONS + +L<"--run-service"> and L<"--send-data"> are mutually exclusive. + +L<"--status">, L<"--stop">, and L<"--reset"> are mutually exclusive. + +=over + +=item --agent-uuid + +type: string + +Existing agent UUID for re-installing an agent. + +=item --api-key + +type: string + +Your secret Percona Cloud Tools API key. + +=item --check-interval + +type: time; default: 1m + +How often to check for a new configuration and services. + +=item --config + +type: Array + +Read this comma-separated list of config files; if specified, this must be the +first option on the command line. + +See the L<"--help"> output for a list of default config files. + +=item --daemonize + +Daemonize the agent. This causes the agent to fork into the background and +L<"--log"> all output. + +Fork to the background and detach from the shell. POSIX operating systems only. + +=item --defaults-file + +short form: -F; type: string + +Only read MySQL options from the given file. You must give an absolute +pathname. + +=item --disk-bytes-free + +type: size; default: 100M + +Stop all services if the disk has less than this much free space. +This prevents the agent from filling up the disk with service data. + +Valid size value suffixes are k, M, G, and T. + +=item --disk-pct-free + +type: int; default: 5 + +Stop all services if the disk has less than this percent free space. +This prevents the agent from filling up the disk with service data. + +This option works similarly to L<"--disk-bytes-free"> but specifies a +percentage margin of safety instead of a bytes margin of safety. +The agent honors both options, and will not collect any data unless both +margins are satisfied. + +=item --help + +Print the agent's help and exit. + +=item --host + +short form: -h; type: string; default: localhost + +MySQL host. + +=item --install + +Install pt-agent as root. + +=item --install-options + +type: Hash + +Comma-separated list of L<"--install"> options. Options are: + +=over + +=item offline + +Do not verify the API key or start the agent. + +=item force_dangerous_slave_install + +Like the option's name suggests: this forces a dangerous slave install, +so you should not use this option unless you are aware of the potential +consequences. To install the agent on a slave, C +must exist because it is not safe to create the agent's MySQL user on +a slave. The agent should be installed on the master first, then +C copied from the master server to the slave +server. Using this option forces the agent to create the agent's MySQL +user on the slave. B: writing to a slave is dangerous and could +cause replication to crash. + +=back + +=item --interactive + +Run in interactive mode (disables L<"--[no]log-api">). + +=item --lib + +type: string; default: /var/lib/pt-agent + +Directory in which to save local data. pt-agent is remotely controlled and +configured, but it also saves data locally. These files should not be edited +manually. + +=item --log + +type: string; default: /var/log/pt-agent.log + +Log all output to this file when daemonized. + +=item --[no]log-api + +default: yes + +Log everything through the Percona Cloud Tools API. + +=item --password + +short form: -p; type: string + +MySQL password. + +=item --pid + +type: string; default: /var/run/pt-agent.pid + +Create the given PID file. The file contains the process ID of the script. +The PID file is removed when the script exits. Before starting, the script +checks if the PID file already exists. If it does not, then the script creates +and writes its own PID to it. If it does, then the script checks the following: +if the file contains a PID and a process is running with that PID, then +the script dies; or, if there is no process running with that PID, then the +script overwrites the file with its own PID and starts; else, if the file +contains no PID, then the script dies. + +=item --port + +short form: -P; type: int + +MySQL port number. + +=item --reload + +Force pt-agent to reload its configuration immediately. + +=item --reset + +cumulative: yes; default: 0 + +Reset pt-agent to a clean post-install state. + +B: all L<"--spool"> data will be deleted. + +=item --run-service + +type: string + +Run a service and spool its data for L<"--send-data">. I The main pt-agent daemon schedules +instances of itself with this option. + +=item --send-data + +type: string + +Send data for a service to Percona. I The main pt-agent daemon schedules instances of itself with +this option. + +=item --set-vars + +type: Array + +Set the MySQL variables in this comma-separated list of C pairs. + +By default, the agent sets: + +=for comment ignore-pt-internal-value +MAGIC_set_vars + + wait_timeout=10000 + +Variables specified on the command line override these defaults. For +example, specifying C<--set-vars wait_timeout=500> overrides the default +value of C<10000>. + +The agent prints a warning and continues if a variable cannot be set. + +=item --socket + +short form: -S; type: string + +MySQL socket file. + +=item --spool + +type: string; default: /var/spool/pt-agent + +Directory in which to save service data before sending to Pecona. +L<"--run-service"> saves data in this directory, and L<"--send-data"> +reads data from this directory. Each service has its own subdirectory, +like C<--spool/query-history> for the Query History service. Data +is removed by L<"--send-data"> after it is successfully sent to Percona. + +=item --status + +Print the status of pt-agent. + +=item --stop + +Stop pt-agent and all services. + +=item --user + +short form: -u; type: string + +MySQL user, if not the current system user. + +=item --version + +Print the agent's version and exit. + +=back + +=head1 DSN OPTIONS + +These DSN options are used to create a DSN. Each option is given like +C. The options are case-sensitive, so P and p are not the +same option. There cannot be whitespace before or after the C<=> and +if the value contains whitespace it must be quoted. DSN options are +comma-separated. See the L manpage for full details. + +=over + +=item * A + +dsn: charset; copy: yes + +Default character set. + +=item * D + +copy: no + +Default database when connecting. + +=item * F + +dsn: mysql_read_default_file; copy: yes + +Defaults file for connection values. + +=item * h + +dsn: host; copy: yes + +MySQL host. + +=item * p + +dsn: password; copy: yes + +MySQL passowrd. + +=item * P + +dsn: port; copy: yes + +MySQL port number. + +=item * S + +dsn: mysql_socket; copy: no + +MySQL socket file. + +=item * u + +dsn: user; copy: yes + +MySQL user, if not the current system user. + +=back + +=head1 ENVIRONMENT + +The environment variable C enables verbose debugging output to STDERR. +To enable debugging and capture all output to a file, run the tool like: + + PTDEBUG=1 pt-agent ... > FILE 2>&1 + +Be careful: debugging output is voluminous and can generate several megabytes +of output. + +=head1 SYSTEM REQUIREMENTS + +pt-agent requires: + +=over + +=item * An account with Percona + +=item * Access to https://cloud-api.percona.com + +=item * Perl 5.8 or newer + +=item * Standard Linux bin tools (grep, awk, stat, etc.) + +=item * cron + +=item * A Bash shell + +=item * Core Perl modules + +=item * DBD::mysql Perl module + +=item * JSON Perl module + +=item * LWP Perl module + +=item * IO::Socket::SSL Perl module + +=back + +=head1 BUGS + +For a list of known bugs, see L. + +Please report bugs at L. +Include the following information in your bug report: + +=over + +=item * Complete command-line used to run the tool + +=item * Tool L<"--version"> + +=item * MySQL version of all servers involved + +=item * Output from the tool including STDERR + +=item * Input files (log/dump/config files, etc.) + +=back + +If possible, include debugging output by running the tool with C; +see L<"ENVIRONMENT">. + +=head1 DOWNLOADING + +Visit L to download the +latest release of Percona Toolkit. Or, get the latest release from the +command line: + + wget percona.com/get/percona-toolkit.tar.gz + + wget percona.com/get/percona-toolkit.rpm + + wget percona.com/get/percona-toolkit.deb + +You can also get individual tools from the latest release: + + wget percona.com/get/TOOL + +Replace C with the name of any tool. + +=head1 AUTHORS + +Daniel Nichter + +=head1 ABOUT PERCONA TOOLKIT + +This tool is part of Percona Toolkit, a collection of advanced command-line +tools developed by Percona for MySQL support and consulting. Percona Toolkit +was forked from two projects in June, 2011: Maatkit and Aspersa. Those +projects were created by Baron Schwartz and developed primarily by him and +Daniel Nichter, both of whom are employed by Percona. Visit +L for more software developed by Percona. + +=head1 COPYRIGHT, LICENSE, AND WARRANTY + +This program is copyright 2013 Percona Inc. + +THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +systems, you can issue `man perlgpl' or `man perlartistic' to read these +licenses. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., 59 Temple +Place, Suite 330, Boston, MA 02111-1307 USA. + +=head1 VERSION + +pt-agent 2.2.2 + +=cut diff --git a/bin/pt-query-digest b/bin/pt-query-digest index a3d6c2b2..eb5b7a48 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -4897,6 +4897,7 @@ sub new { my ( $class ) = @_; my $self = { pending => [], + last_event_offset => undef, }; return bless $self, $class; } @@ -4933,6 +4934,7 @@ sub parse_event { or defined($stmt = $next_event->()) ) { my @properties = ('cmd', 'Query', 'pos_in_log', $pos_in_log); + $self->{last_event_offset} = $pos_in_log; $pos_in_log = $tell->(); if ( $stmt =~ s/$slow_log_hd_line//go ){ # Throw away header lines in log @@ -5058,9 +5060,15 @@ sub parse_event { PTDEBUG && _d('Properties of event:', Dumper(\@properties)); my $event = { @properties }; - if ( $args{stats} ) { - $args{stats}->{events_read}++; - $args{stats}->{events_parsed}++; + if ( !$event->{arg} ) { + PTDEBUG && _d('Partial event, no arg'); + } + else { + $self->{last_event_offset} = undef; + if ( $args{stats} ) { + $args{stats}->{events_read}++; + $args{stats}->{events_parsed}++; + } } return $event; } # EVENT @@ -6654,7 +6662,7 @@ sub hostname { sub files { my ( $self, %args ) = @_; if ( $args{files} ) { - return "# Files: " . join(', ', @{$args{files}}) . "\n"; + return "# Files: " . join(', ', map { $_->{name} } @{$args{files}}) . "\n"; } return; } @@ -7658,17 +7666,50 @@ use constant PTDEBUG => $ENV{PTDEBUG} || 0; my $have_json = eval { require JSON }; -our $pretty_json = 0; -our $sorted_json = 0; +our $pretty_json = $ENV{PTTEST_PRETTY_JSON} || 0; +our $sorted_json = $ENV{PTTEST_PRETTY_JSON} || 0; + extends qw(QueryReportFormatter); +has 'QueryRewriter' => ( + is => 'ro', + isa => 'Object', + required => 1, +); + +has 'QueryParser' => ( + is => 'ro', + isa => 'Object', + required => 1, +); + +has 'Quoter' => ( + is => 'ro', + isa => 'Object', + required => 1, +); + has _json => ( is => 'ro', init_arg => undef, builder => '_build_json', ); +has 'max_query_length' => ( + is => 'rw', + isa => 'Int', + required => 0, + default => sub { return 10_000; }, # characters, not bytes +); + +has 'max_fingerprint_length' => ( + is => 'rw', + isa => 'Int', + required => 0, + default => sub { return 5_000; }, # characters, not bytes +); + sub _build_json { return unless $have_json; return JSON->new->utf8 @@ -7700,15 +7741,102 @@ override query_report => sub { foreach my $arg ( qw(ea worst orderby groupby) ) { die "I need a $arg argument" unless defined $arg; } + my $ea = $args{ea}; + my $worst = $args{worst}; + my $orderby = $args{orderby}; + my $groupby = $args{groupby}; - my $ea = $args{ea}; - my $worst = $args{worst}; - + my $results = $ea->results(); my @attribs = @{$ea->get_attributes()}; - my %string_args = map { $_ => 1 } qw( db host arg user bytes pos_in_log ); - - my @queries; + my $q = $self->Quoter; + my $qr = $self->QueryRewriter; + + my $global_data = { + metrics => {}, + files => $args{files}, + ($args{resume} && scalar keys %{$args{resume}} ? (resume => $args{resume}) : ()), + }; + + my $global_cnt = $results->{globals}->{$orderby}->{cnt} || 0; + my $global_unq = scalar keys %{$results->{classes}}; + + my ($qps, $conc) = (0, 0); + if ( $global_cnt && $results->{globals}->{ts} + && ($results->{globals}->{ts}->{max} || '') + gt ($results->{globals}->{ts}->{min} || '') ) + { + eval { + my $min = parse_timestamp($results->{globals}->{ts}->{min}); + my $max = parse_timestamp($results->{globals}->{ts}->{max}); + my $diff = unix_timestamp($max) - unix_timestamp($min); + $qps = $global_cnt / ($diff || 1); + $conc = $results->{globals}->{$orderby}->{sum} / $diff; + }; + } + + $global_data->{query_count} = $global_cnt; + $global_data->{unique_query_count} = $global_unq; + $global_data->{queries_per_second} = $qps if $qps; + $global_data->{concurrency} = $conc if $conc; + + my %hidden_attrib = ( + arg => 1, + fingerprint => 1, + pos_in_log => 1, + ts => 1, + ); + + foreach my $attrib ( grep { !$hidden_attrib{$_} } @attribs ) { + my $type = $ea->type_for($attrib) || 'string'; + next if $type eq 'string'; + next unless exists $results->{globals}->{$attrib}; + + my $store = $results->{globals}->{$attrib}; + my $metrics = $ea->stats()->{globals}->{$attrib}; + my $int = $attrib =~ m/(?:time|wait)$/ ? 0 : 1; + + my $real_attrib = $attrib eq 'bytes' ? 'Query_length' : $attrib; + + if ( $type eq 'num' ) { + foreach my $m ( qw(sum min max) ) { + if ( $int ) { + $global_data->{metrics}->{$real_attrib}->{$m} + = sprintf('%d', $store->{$m} || 0); + } + else { # microsecond + $global_data->{metrics}->{$real_attrib}->{$m} + = sprintf('%.6f', $store->{$m} || 0); + } + } + foreach my $m ( qw(pct_95 stddev median) ) { + if ( $int ) { + $global_data->{metrics}->{$real_attrib}->{$m} + = sprintf('%d', $metrics->{$m} || 0); + } + else { # microsecond + $global_data->{metrics}->{$real_attrib}->{$m} + = sprintf('%.6f', $metrics->{$m} || 0); + } + } + if ( $int ) { + $global_data->{metrics}->{$real_attrib}->{avg} + = sprintf('%d', $store->{sum} / $store->{cnt}); + } + else { + $global_data->{metrics}->{$real_attrib}->{avg} + = sprintf('%.6f', $store->{sum} / $store->{cnt}); + } + } + elsif ( $type eq 'bool' ) { + my $store = $results->{globals}->{$real_attrib}; + $global_data->{metrics}->{$real_attrib}->{cnt} + = sprintf('%d', $store->{sum}); + } + } + + + my @classes; foreach my $worst_info ( @$worst ) { my $item = $worst_info->[0]; my $stats = $ea->results->{classes}->{$item}; @@ -7716,17 +7844,29 @@ override query_report => sub { my $all_log_pos = $ea->{result_classes}->{$item}->{pos_in_log}->{all}; my $times_seen = sum values %$all_log_pos; - - my %class = ( - sample => $sample->{arg}, - fingerprint => $item, - checksum => make_checksum($item), - cnt => $times_seen, - ); - + + my $distill = $groupby eq 'fingerprint' ? $qr->distill($sample->{arg}) + : undef; + my $fingerprint = substr($item, 0, $self->max_fingerprint_length); + my $checksum = make_checksum($item); + my $class = { + checksum => $checksum, + fingerprint => $fingerprint, + distillate => $distill, + attribute => $groupby, + query_count => $times_seen, + example => { + query => substr($sample->{arg}, 0, $self->max_query_length), + ts => $sample->{ts} ? parse_timestamp($sample->{ts}) : undef, + }, + }; + my %metrics; foreach my $attrib ( @attribs ) { - $metrics{$attrib} = $ea->metrics( + my $real_attrib = $attrib eq 'bytes' ? 'Query_length' : $attrib; + next if $real_attrib eq 'Rows_affected' + && $distill && $distill =~ m/^(?:SELECT|SHOW|SET|ADMIN)/; + $metrics{$real_attrib} = $ea->metrics( attrib => $attrib, where => $item, ); @@ -7737,6 +7877,8 @@ override query_report => sub { delete $metrics{$attrib}; next; } + delete $metrics{pos_in_log}; + delete $metrics{$attrib}->{cnt}; if ($attrib eq 'ts') { my $ts = delete $metrics{ts}; @@ -7744,31 +7886,87 @@ override query_report => sub { next unless defined $ts && defined $ts->{$thing}; $ts->{$thing} = parse_timestamp($ts->{$thing}); } - $class{ts_min} = $ts->{min}; - $class{ts_max} = $ts->{max}; + $class->{ts_min} = $ts->{min}; + $class->{ts_max} = $ts->{max}; } - elsif ( $string_args{$attrib} ) { - $metrics{$attrib} = { value => $metrics{$attrib}{max} }; - } - elsif ( ($ea->{type_for}->{$attrib} || '') eq 'num' ) { - for my $value ( values %{$metrics{$attrib}} ) { - next unless $value; - $value = sprintf '%.6f', $value; + else { + my $type = $attrib eq 'Query_length' ? 'num' : $ea->type_for($attrib) || 'string'; + if ( $type eq 'string' ) { + $metrics{$attrib} = { value => $metrics{$attrib}{max} }; } - if ( my $pct = $metrics{$attrib}->{pct} ) { - $metrics{$attrib}->{pct} = sprintf('%.2f', $pct); + elsif ( $type eq 'num' ) { + foreach my $value ( values %{$metrics{$attrib}} ) { + next unless defined $value; + if ( $attrib =~ m/_(?:time|wait)$/ ) { + $value = sprintf('%.6f', $value); + } + else { + $value = sprintf('%d', $value); + } + } + } + elsif ( $type eq 'bool' ) { + $metrics{$attrib} = { + yes => sprintf('%d', $metrics{$attrib}->{sum}), + }; } } } - push @queries, { - class => \%class, - attributes => \%metrics, - }; + + my @tables; + if ( $groupby eq 'fingerprint' ) { + my $default_db = $sample->{db} ? $sample->{db} + : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} + : undef; + my @table_names = $self->QueryParser->extract_tables( + query => $sample->{arg} || '', + default_db => $default_db, + Quoter => $q, + ); + foreach my $db_tbl ( @table_names ) { + my ( $db, $tbl ) = @$db_tbl; + my $status + = 'SHOW TABLE STATUS' + . ($db ? " FROM `$db`" : '') + . " LIKE '$tbl'\\G"; + my $create + = "SHOW CREATE TABLE " + . $q->quote(grep { $_ } @$db_tbl) + . "\\G"; + push @tables, { status => $status, create => $create }; + } + + if ( $item =~ m/^(?:[\(\s]*select|insert|replace)/ ) { + if ( $item =~ m/^(?:insert|replace)/ ) { + } + else { + + } + } + else { + my $converted = $qr->convert_to_select( + $sample->{arg} || '', + ); + if ( $converted && $converted =~ m/^[\(\s]*select/i ) { + $class->{example}->{as_select} = $converted; + } + } + } + + $class->{metrics} = \%metrics; + if ( @tables ) { + $class->{tables} = \@tables; + } + push @classes, $class; } - my $json = $self->encode_json(\@queries); - $json .= "\n" if $json !~ /\n\Z/; - return $json . "\n"; + my $data = { + global => $global_data, + classes => \@classes, + }; + my $json = $self->encode_json($data); + $json .= "\n" unless $json =~ /\n\Z/; + return $json; }; no Lmo; @@ -12523,7 +12721,9 @@ my $ps_dbh; # For Processlist my $aux_dbh; # For --aux-dsn (--since/--until "MySQL expression") my $resume_file; +my $resume = {}; my $offset; +my $exit_status = 0; (my $tool = __PACKAGE__) =~ tr/_/-/; @@ -12531,8 +12731,9 @@ sub main { # Reset global vars, else tests will fail. local @ARGV = @_; $oktorun = 1; - $resume_file = undef; + $resume = {}; $offset = undef; + $exit_status = 0; # ########################################################################## # Get configuration information. @@ -12816,30 +13017,79 @@ sub main { if ( $fh ) { PTDEBUG && _d('Reading', $filename); PTDEBUG && _d('File size:', $filesize); - push @read_files, $filename || "STDIN"; + push @read_files, { name => ($filename || "STDIN"), size => $filesize }; # Read the file offset for --resume. if ( ($resume_file = $o->get('resume')) && $filename ) { if ( -s $resume_file ) { - open my $resume_fh, '<', $resume_file - or die "Error opening $resume_file: $OS_ERROR"; - chomp(my $resume_offset = <$resume_fh>); + open my $resume_fh, "<", $resume_file + or die "Cannot open $resume_file: $OS_ERROR"; + my $resume_offset = do { local $/; <$resume_fh> }; close $resume_fh or die "Error close $resume_file: $OS_ERROR"; - if ( !looks_like_number($resume_offset) ) { - die "Offset $resume_offset in $resume_file " - . "does not look like a number.\n"; + chomp($resume_offset) if $resume_offset; + if ( looks_like_number($resume_offset) ) { + PTDEBUG && _d('Resuming at offset', $resume_offset); + $resume->{simple} = 1; + seek $fh, $resume_offset, 0 + or die "Error seeking to $resume_offset in " + . "$resume_file: $OS_ERROR"; + warn "# Resuming $filename from offset " + . "$resume_offset (file size: $filesize)...\n"; + } + else { + $resume->{simple} = 0; # enhanced resume file + map { + my $line = $_; + chomp $line; + my ($key, $value) = split('=', $line); + if ( !$key + || !defined $value + || !looks_like_number($value) + || $value < 0 ) + { + $exit_status = 1; + warn "Invalid line in --resume $resume_file: $line\n"; + $oktorun = 0; + return; + } + $resume->{$key} = $value; + } split("\n", $resume_offset); + if ( $resume->{end_offset} && + $resume->{end_offset} <= + ($resume->{stop_offset} || 0) ) + { + close $args->{input_fh} if $args->{input_fh}; + $args->{input_fh} = undef; + $args->{more_events} = 0; + $oktorun = 0; + $resume_file = ''; + warn "# Not resuming $filename because " + . "end_offset $resume->{end_offset} is " + . "less than or equal to stop_offset " + . ($resume->{stop_offset} || 0) . "\n"; + } + else { + $resume_offset = $resume->{stop_offset} + || $resume->{start_offset} + || 0; + seek $fh, $resume_offset, 0 + or die "Error seeking to $resume_offset in " + . "$resume_file: $OS_ERROR"; + warn "# Resuming $filename from offset " + . "$resume_offset to " + . ($resume->{end_offset} ? $resume->{end_offset} + : "end of file") + . " (file size: $filesize)...\n"; + } } - PTDEBUG && _d('Resuming at offset', $resume_offset); - seek $fh, $resume_offset, 0 - or die "Error seeking to $resume_offset in " - . "$resume_file: $OS_ERROR"; - warn "Resuming $filename from offset $resume_offset " - . "(file size: $filesize)...\n"; } else { - PTDEBUG && _d('Not resuming', $filename, 'because', - $resume_file, 'does not exist'); + warn "# Resuming $filename from offset 0 because " + . "resume file $filename does not exist " + . "(file size: $filesize)...\n"; + $resume->{simple} = 0; + $resume->{start_offset} = 0; } } @@ -12883,13 +13133,24 @@ sub main { $args->{more_events} = 0; } } - $pr->update($args->{tell}) if $pr; + elsif ( $resume->{end_offset} + && $offset >= $resume->{end_offset} ) { + PTDEBUG && _d('Offset', $offset, 'at end_offset', + $resume->{end_offset}); + close $args->{input_fh} if $args->{input_fh}; + $args->{input_fh} = undef; + $args->{more_events} = 0; + } + else { + $pr->update($args->{tell}) if $pr; + } return $args; }, ); } # input my $ps_dsn; + my @parsers; { # event my $misc; if ( $ps_dsn = $o->get('processlist') ) { @@ -12991,7 +13252,8 @@ sub main { } die "Failed to load $module module: $EVAL_ERROR"; } - + push @parsers, $parser; + $pipeline->add( name => ref $parser, process => sub { @@ -13244,6 +13506,10 @@ sub main { if ( $report ) { PTDEBUG && _d("Iteration", $args->{iter}, "stopped at",ts(time)); + save_resume_offset( + last_event_offset => $parsers[0]->{last_event_offset}, + ); + # Get this before calling print_reports() because that sub # resets each ea and we may need this later for stats. my $n_events_aggregated = $ea[0]->events_processed(); @@ -13262,7 +13528,9 @@ sub main { ); } else { - print "\n# No events processed.\n"; + if ( $o->get('output') eq 'report' ) { + print "\n# No events processed.\n"; + } } if ( PTDEBUG ) { @@ -13596,6 +13864,9 @@ sub main { Last_errno => 'string', Thread_id => 'string', InnoDB_trx_id => 'string', + host => 'string', + ip => 'string', + port => 'string', Killed => 'bool', }; @@ -13686,7 +13957,9 @@ sub main { } PTDEBUG && _d("Pipeline data:", Dumper($pipeline_data)); - save_resume_offset(); + save_resume_offset( + last_event_offset => $parsers[0]->{last_event_offset}, + ); # Disconnect all open $dbh's map { @@ -13696,7 +13969,7 @@ sub main { grep { $_ } ($qv_dbh, $qh_dbh, $ps_dbh, $ep_dbh, $aux_dbh); - return 0; + return $exit_status; } # End main() # ############################################################################ @@ -13858,7 +14131,8 @@ sub print_reports { files => $args{files}, log_type => $o->get('type')->[0], variations => $o->get('variations'), - group => { map { $_=>1 } qw(rusage date hostname files header) } + group => { map { $_=>1 } qw(rusage date hostname files header) }, + resume => $resume, ); } @@ -14213,6 +14487,9 @@ sub verify_run_time { } sub save_resume_offset { + my (%args) = @_; + my $last_event_offset = $args{last_event_offset}; + if ( !$resume_file || !$offset ) { PTDEBUG && _d('Not saving resume offset because there is no ' . 'resume file or offset:', $resume_file, $offset); @@ -14222,10 +14499,26 @@ sub save_resume_offset { PTDEBUG && _d('Saving resume at offset', $offset, 'to', $resume_file); open my $resume_fh, '>', $resume_file or die "Error opening $resume_file: $OS_ERROR"; - print { $resume_fh } $offset, "\n"; + + if ( $resume->{simple} ) { + print { $resume_fh } $offset, "\n"; + warn "\n# Saved resume file offset $offset to $resume_file\n"; + } + else { + # 2.2.3+ enhanced resume file + $resume->{stop_offset} = defined $last_event_offset ? $last_event_offset + : $offset; + foreach my $key ( sort keys %$resume ) { + next if $key eq 'simple'; + print { $resume_fh } "$key=$resume->{$key}\n"; + } + warn "\n# Saved resume file stop_offset $resume->{stop_offset} to " + . "$resume_file\n"; + } + close $resume_fh or die "Error close $resume_file: $OS_ERROR"; - warn "\n# Saved resume file offset $offset to $resume_file\n"; + return; } diff --git a/lib/Cxn.pm b/lib/Cxn.pm index 266f0131..b59bdd30 100644 --- a/lib/Cxn.pm +++ b/lib/Cxn.pm @@ -49,7 +49,6 @@ use constant { # # Required Arguments: # DSNParser - object -# OptionParser - object # dsn - DSN hashref, or... # dsn_string - ... DSN string like "h=127.1,P=12345" # @@ -109,7 +108,7 @@ sub new { set => $args{set}, NAME_lc => defined($args{NAME_lc}) ? $args{NAME_lc} : 1, dbh_set => 0, - ask_pass => $o->get('ask-pass'), + ask_pass => $args{ask_pass}, DSNParser => $dp, is_cluster_node => undef, parent => $args{parent}, diff --git a/lib/Daemon.pm b/lib/Daemon.pm index 2e53cccd..8d756a64 100644 --- a/lib/Daemon.pm +++ b/lib/Daemon.pm @@ -1,4 +1,4 @@ -# This program is copyright 2008-2011 Percona Ireland Ltd. +# This program is copyright 2008-2013 Percona Ireland Ltd. # Feedback and improvements are welcome. # # THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED @@ -17,191 +17,255 @@ # ########################################################################### # Daemon package # ########################################################################### -{ -# Package: Daemon -# Daemon daemonizes the caller and handles daemon-related tasks like PID files. package Daemon; use strict; use warnings FATAL => 'all'; use English qw(-no_match_vars); + use constant PTDEBUG => $ENV{PTDEBUG} || 0; use POSIX qw(setsid); +use Fcntl qw(:DEFAULT); -# The required o arg is an OptionParser object. sub new { - my ( $class, %args ) = @_; - foreach my $arg ( qw(o) ) { - die "I need a $arg argument" unless $args{$arg}; - } - my $o = $args{o}; + my ($class, %args) = @_; my $self = { - o => $o, - log_file => $o->has('log') ? $o->get('log') : undef, - PID_file => $o->has('pid') ? $o->get('pid') : undef, + log_file => $args{log_file}, + pid_file => $args{pid_file}, + daemonize => $args{daemonize}, + force_log_file => $args{force_log_file}, + parent_exit => $args{parent_exit}, + pid_file_owner => 0, }; - - # undef because we can't call like $self->check_PID_file() yet. - check_PID_file(undef, $self->{PID_file}); - - PTDEBUG && _d('Daemonized child will log to', $self->{log_file}); return bless $self, $class; } -sub daemonize { - my ( $self ) = @_; +sub run { + my ($self) = @_; - PTDEBUG && _d('About to fork and daemonize'); - defined (my $pid = fork()) or die "Cannot fork: $OS_ERROR"; - if ( $pid ) { - PTDEBUG && _d('Parent PID', $PID, 'exiting after forking child PID',$pid); - exit; - } + # Just for brevity: + my $daemonize = $self->{daemonize}; + my $pid_file = $self->{pid_file}; + my $log_file = $self->{log_file}; + my $force_log_file = $self->{force_log_file}; + my $parent_exit = $self->{parent_exit}; - # I'm daemonized now. - PTDEBUG && _d('Daemonizing child PID', $PID); - $self->{PID_owner} = $PID; - $self->{child} = 1; + PTDEBUG && _d('Starting daemon'); - POSIX::setsid() or die "Cannot start a new session: $OS_ERROR"; - chdir '/' or die "Cannot chdir to /: $OS_ERROR"; - - $self->_make_PID_file(); - - $OUTPUT_AUTOFLUSH = 1; - - # We used to only reopen STDIN to /dev/null if it's a tty because - # otherwise it may be a pipe, in which case we didn't want to break - # it. However, Perl -t is not reliable. This is true and false on - # various boxes even when the same code is ran, or it depends on if - # the code is ran via cron, Jenkins, etc. Since there should be no - # sane reason to `foo | pt-tool --daemonize` for a tool that reads - # STDIN, we now just always close STDIN. - PTDEBUG && _d('Redirecting STDIN to /dev/null'); - close STDIN; - open STDIN, '/dev/null' - or die "Cannot reopen STDIN to /dev/null: $OS_ERROR"; - - if ( $self->{log_file} ) { - PTDEBUG && _d('Redirecting STDOUT and STDERR to', $self->{log_file}); - close STDOUT; - open STDOUT, '>>', $self->{log_file} - or die "Cannot open log file $self->{log_file}: $OS_ERROR"; - - # If we don't close STDERR explicitly, then prove Daemon.t fails - # because STDERR gets written before STDOUT even though we print - # to STDOUT first in the tests. I don't know why, but it's probably - # best that we just explicitly close all fds before reopening them. - close STDERR; - open STDERR, ">&STDOUT" - or die "Cannot dupe STDERR to STDOUT: $OS_ERROR"; - } - else { - if ( -t STDOUT ) { - PTDEBUG && _d('No log file and STDOUT is a terminal;', - 'redirecting to /dev/null'); - close STDOUT; - open STDOUT, '>', '/dev/null' - or die "Cannot reopen STDOUT to /dev/null: $OS_ERROR"; - } - if ( -t STDERR ) { - PTDEBUG && _d('No log file and STDERR is a terminal;', - 'redirecting to /dev/null'); - close STDERR; - open STDERR, '>', '/dev/null' - or die "Cannot reopen STDERR to /dev/null: $OS_ERROR"; - } - } - - return; -} - -# The file arg is optional. It's used when new() calls this sub -# because $self hasn't been created yet. -sub check_PID_file { - my ( $self, $file ) = @_; - my $PID_file = $self ? $self->{PID_file} : $file; - PTDEBUG && _d('Checking PID file', $PID_file); - if ( $PID_file && -f $PID_file ) { - my $pid; + # First obtain the pid file or die trying. NOTE: we're still the parent + # so the pid file will contain the parent's pid at first. This is done + # to avoid a race condition between the parent checking for the pid file, + # forking, and the child actually obtaining the pid file. This way, if + # the parent obtains the pid file, the child is guaranteed to be the only + # process running. + if ( $pid_file ) { eval { - chomp($pid = (slurp_file($PID_file) || '')); + $self->_make_pid_file( + pid => $PID, # parent's pid + pid_file => $pid_file, + ); }; - if ( $EVAL_ERROR ) { - # Be safe and die if we can't check that a process is - # or is not already running. - die "The PID file $PID_file already exists but it cannot be read: " - . $EVAL_ERROR; + die "$EVAL_ERROR\n" if $EVAL_ERROR; + if ( !$daemonize ) { + # We're not going to daemonize, so mark the pid file as owned + # by the parent. Otherwise, daemonize/fork and the child will + # take ownership. + $self->{pid_file_owner} = $PID; # parent's pid } - PTDEBUG && _d('PID file exists; it contains PID', $pid); - if ( $pid ) { - my $pid_is_alive = kill 0, $pid; - if ( $pid_is_alive ) { - die "The PID file $PID_file already exists " - . " and the PID that it contains, $pid, is running"; - } - else { - warn "Overwriting PID file $PID_file because the PID that it " - . "contains, $pid, is not running"; - } + } + + # Fork, exit parent, continue as child process. + if ( $daemonize ) { + defined (my $child_pid = fork()) or die "Cannot fork: $OS_ERROR"; + if ( $child_pid ) { + # I'm the parent. + PTDEBUG && _d('Forked child', $child_pid); + $parent_exit->($child_pid) if $parent_exit; + exit 0; + } + + # I'm the child. + POSIX::setsid() or die "Cannot start a new session: $OS_ERROR"; + chdir '/' or die "Cannot chdir to /: $OS_ERROR"; + + # Now update the pid file to contain the child's pid. + if ( $pid_file ) { + $self->_update_pid_file( + pid => $PID, # child's pid + pid_file => $pid_file, + ); + $self->{pid_file_owner} = $PID; + } + } + + if ( $daemonize || $force_log_file ) { + # We used to only reopen STDIN to /dev/null if it's a tty because + # otherwise it may be a pipe, in which case we didn't want to break + # it. However, Perl -t is not reliable. This is true and false on + # various boxes even when the same code is ran, or it depends on if + # the code is ran via cron, Jenkins, etc. Since there should be no + # sane reason to `foo | pt-tool --daemonize` for a tool that reads + # STDIN, we now just always close STDIN. + PTDEBUG && _d('Redirecting STDIN to /dev/null'); + close STDIN; + open STDIN, '/dev/null' + or die "Cannot reopen STDIN to /dev/null: $OS_ERROR"; + if ( $log_file ) { + PTDEBUG && _d('Redirecting STDOUT and STDERR to', $log_file); + close STDOUT; + open STDOUT, '>>', $log_file + or die "Cannot open log file $log_file: $OS_ERROR"; + + # If we don't close STDERR explicitly, then prove Daemon.t fails + # because STDERR gets written before STDOUT even though we print + # to STDOUT first in the tests. I don't know why, but it's probably + # best that we just explicitly close all fds before reopening them. + close STDERR; + open STDERR, ">&STDOUT" + or die "Cannot dupe STDERR to STDOUT: $OS_ERROR"; } else { - # Be safe and die if we can't check that a process is - # or is not already running. - die "The PID file $PID_file already exists but it does not " - . "contain a PID"; + if ( -t STDOUT ) { + PTDEBUG && _d('No log file and STDOUT is a terminal;', + 'redirecting to /dev/null'); + close STDOUT; + open STDOUT, '>', '/dev/null' + or die "Cannot reopen STDOUT to /dev/null: $OS_ERROR"; + } + if ( -t STDERR ) { + PTDEBUG && _d('No log file and STDERR is a terminal;', + 'redirecting to /dev/null'); + close STDERR; + open STDERR, '>', '/dev/null' + or die "Cannot reopen STDERR to /dev/null: $OS_ERROR"; + } } + + $OUTPUT_AUTOFLUSH = 1; } - else { - PTDEBUG && _d('No PID file'); - } + + PTDEBUG && _d('Daemon running'); return; } # Call this for non-daemonized scripts to make a PID file. -sub make_PID_file { - my ( $self ) = @_; - if ( exists $self->{child} ) { - die "Do not call Daemon::make_PID_file() for daemonized scripts"; +sub _make_pid_file { + my ($self, %args) = @_; + my @required_args = qw(pid pid_file); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my $pid = $args{pid}; + my $pid_file = $args{pid_file}; + + # "If O_CREAT and O_EXCL are set, open() shall fail if the file exists. + # The check for the existence of the file and the creation of the file + # if it does not exist shall be atomic with respect to other threads + # executing open() naming the same filename in the same directory with + # O_EXCL and O_CREAT set. + eval { + sysopen(PID_FH, $pid_file, O_RDWR|O_CREAT|O_EXCL) or die $OS_ERROR; + print PID_FH $PID, "\n"; + close PID_FH; + }; + if ( my $e = $EVAL_ERROR ) { + if ( $e =~ m/file exists/i ) { + # Check if the existing pid is running. If yes, then die, + # else this returns and we overwrite the pid file. + my $old_pid = $self->_check_pid_file( + pid_file => $pid_file, + pid => $PID, + ); + if ( $old_pid ) { + warn "Overwriting PID file $pid_file because PID $old_pid " + . "is not running.\n"; + } + $self->_update_pid_file( + pid => $PID, + pid_file => $pid_file + ); + } + else { + die "Error creating PID file $pid_file: $e\n"; + } } - $self->_make_PID_file(); - # This causes the PID file to be auto-removed when this obj is destroyed. - $self->{PID_owner} = $PID; + return; } -# Do not call this sub directly. For daemonized scripts, it's called -# automatically from daemonize() if there's a --pid opt. For non-daemonized -# scripts, call make_PID_file(). -sub _make_PID_file { - my ( $self ) = @_; +sub _check_pid_file { + my ($self, %args) = @_; + my @required_args = qw(pid_file pid); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my $pid_file = $args{pid_file}; + my $pid = $args{pid}; - my $PID_file = $self->{PID_file}; - if ( !$PID_file ) { - PTDEBUG && _d('No PID file to create'); + PTDEBUG && _d('Checking if PID in', $pid_file, 'is running'); + + if ( ! -f $pid_file ) { + PTDEBUG && _d('PID file', $pid_file, 'does not exist'); return; } - # We checked this in new() but we'll double check here. - $self->check_PID_file(); + open my $fh, '<', $pid_file + or die "Error opening $pid_file: $OS_ERROR"; + my $existing_pid = do { local $/; <$fh> }; + chomp($existing_pid) if $existing_pid; + close $fh + or die "Error closing $pid_file: $OS_ERROR"; - open my $PID_FH, '>', $PID_file - or die "Cannot open PID file $PID_file: $OS_ERROR"; - print $PID_FH $PID - or die "Cannot print to PID file $PID_file: $OS_ERROR"; - close $PID_FH - or die "Cannot close PID file $PID_file: $OS_ERROR"; + if ( $existing_pid ) { + if ( $existing_pid == $pid ) { + # This happens when pt-agent "re-daemonizes". + warn "The current PID $pid already holds the PID file $pid_file\n"; + return; + } + else { + PTDEBUG && _d('Checking if PID', $existing_pid, 'is running'); + my $pid_is_alive = kill 0, $existing_pid; + if ( $pid_is_alive ) { + die "PID file $pid_file exists and PID $existing_pid is running\n"; + } + } + } + else { + # PID file but no PID: not sure what to do, so be safe and die; + # let the user figure it out (i.e. rm the pid file). + die "PID file $pid_file exists but it is empty. Remove the file " + . "if the process is no longer running.\n"; + } + + return $existing_pid; +} + +sub _update_pid_file { + my ($self, %args) = @_; + my @required_args = qw(pid pid_file); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my $pid = $args{pid}; + my $pid_file = $args{pid_file}; + + open my $fh, '>', $pid_file + or die "Cannot open $pid_file: $OS_ERROR"; + print { $fh } $pid, "\n" + or die "Cannot print to $pid_file: $OS_ERROR"; + close $fh + or warn "Cannot close $pid_file: $OS_ERROR"; - PTDEBUG && _d('Created PID file:', $self->{PID_file}); return; } -sub _remove_PID_file { - my ( $self ) = @_; - if ( $self->{PID_file} && -f $self->{PID_file} ) { - unlink $self->{PID_file} - or warn "Cannot remove PID file $self->{PID_file}: $OS_ERROR"; +sub remove_pid_file { + my ($self, $pid_file) = @_; + $pid_file ||= $self->{pid_file}; + if ( $pid_file && -f $pid_file ) { + unlink $self->{pid_file} + or warn "Cannot remove PID file $pid_file: $OS_ERROR"; PTDEBUG && _d('Removed PID file'); } else { @@ -211,7 +275,7 @@ sub _remove_PID_file { } sub DESTROY { - my ( $self ) = @_; + my ($self) = @_; # Remove the PID file only if we created it. There's two cases where # it might be removed wrongly. 1) When the obj first daemonizes itself, @@ -220,21 +284,16 @@ sub DESTROY { # have it. 2) When daemonized code forks its children get copies of # the Daemon obj which will also call this sub when they exit. We # don't remove it then because the daemonized parent code won't have it. - # This trick works because $self->{PID_owner}=$PID is set once to the + # This trick works because $self->{pid_file_owner}=$PID is set once to the # owner's $PID then this value is copied on fork. But the "== $PID" # here is the forked copy's PID which won't match the owner's PID. - $self->_remove_PID_file() if ($self->{PID_owner} || 0) == $PID; + if ( $self->{pid_file_owner} == $PID ) { + $self->remove_pid_file(); + } return; } -sub slurp_file { - my ($file) = @_; - return unless $file; - open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR"; - return do { local $/; <$fh> }; -} - sub _d { my ($package, undef, $line) = caller 0; @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } @@ -244,7 +303,6 @@ sub _d { } 1; -} # ########################################################################### # End Daemon package # ########################################################################### diff --git a/lib/HTTPMicro.pm b/lib/HTTP/Micro.pm similarity index 56% rename from lib/HTTPMicro.pm rename to lib/HTTP/Micro.pm index cc9d36cd..127894c4 100644 --- a/lib/HTTPMicro.pm +++ b/lib/HTTP/Micro.pm @@ -15,23 +15,21 @@ # this program; if not, write to the Free Software Foundation, Inc., 59 Temple # Place, Suite 330, Boston, MA 02111-1307 USA. # ########################################################################### -# HTTPMicro package +# HTTP::Micro package # ########################################################################### { -# Package: HTTPMicro +# Package: HTTP::Micro # A stripped down version of HTTP::Tiny; but not a correct HTTP/1.1 -# implementation +# implementation. +package HTTP::Micro; + +our $VERSION = '0.01'; -package HTTPMicro; -BEGIN { - $HTTPMicro::VERSION = '0.001'; -} use strict; -use warnings; - +use warnings FATAL => 'all'; +use English qw(-no_match_vars); use Carp (); - my @attributes; BEGIN { @attributes = qw(agent timeout); @@ -103,7 +101,7 @@ sub _request { headers => {}, }; - my $handle = HTTPMicro::Handle->new(timeout => $self->{timeout}); + my $handle = HTTP::Micro::Handle->new(timeout => $self->{timeout}); $handle->connect($scheme, $host, $port); @@ -169,322 +167,327 @@ sub _split_url { return ($scheme, $host, $port, $path_query); } -package - HTTPMicro::Handle; # hide from PAUSE/indexers -use strict; -use warnings; +} # HTTP::Micro -use Carp qw[croak]; -use Errno qw[EINTR EPIPE]; -use IO::Socket qw[SOCK_STREAM]; +{ + package HTTP::Micro::Handle; -sub BUFSIZE () { 32768 } + use strict; + use warnings FATAL => 'all'; + use English qw(-no_match_vars); -my $Printable = sub { - local $_ = shift; - s/\r/\\r/g; - s/\n/\\n/g; - s/\t/\\t/g; - s/([^\x20-\x7E])/sprintf('\\x%.2X', ord($1))/ge; - $_; -}; + use Carp qw(croak); + use Errno qw(EINTR EPIPE); + use IO::Socket qw(SOCK_STREAM); -sub new { - my ($class, %args) = @_; - return bless { - rbuf => '', - timeout => 60, - max_line_size => 16384, - %args - }, $class; -} + sub BUFSIZE () { 32768 } -my $ssl_verify_args = { - check_cn => "when_only", - wildcards_in_alt => "anywhere", - wildcards_in_cn => "anywhere" -}; + my $Printable = sub { + local $_ = shift; + s/\r/\\r/g; + s/\n/\\n/g; + s/\t/\\t/g; + s/([^\x20-\x7E])/sprintf('\\x%.2X', ord($1))/ge; + $_; + }; -sub connect { - @_ == 4 || croak(q/Usage: $handle->connect(scheme, host, port)/); - my ($self, $scheme, $host, $port) = @_; + sub new { + my ($class, %args) = @_; + return bless { + rbuf => '', + timeout => 60, + max_line_size => 16384, + %args + }, $class; + } - if ( $scheme eq 'https' ) { - eval "require IO::Socket::SSL" - unless exists $INC{'IO/Socket/SSL.pm'}; - croak(qq/IO::Socket::SSL must be installed for https support\n/) - unless $INC{'IO/Socket/SSL.pm'}; - } - elsif ( $scheme ne 'http' ) { - croak(qq/Unsupported URL scheme '$scheme'\n/); - } + my $ssl_verify_args = { + check_cn => "when_only", + wildcards_in_alt => "anywhere", + wildcards_in_cn => "anywhere" + }; - $self->{fh} = 'IO::Socket::INET'->new( - PeerHost => $host, - PeerPort => $port, - Proto => 'tcp', - Type => SOCK_STREAM, - Timeout => $self->{timeout} - ) or croak(qq/Could not connect to '$host:$port': $@/); + sub connect { + @_ == 4 || croak(q/Usage: $handle->connect(scheme, host, port)/); + my ($self, $scheme, $host, $port) = @_; - binmode($self->{fh}) - or croak(qq/Could not binmode() socket: '$!'/); + if ( $scheme eq 'https' ) { + eval "require IO::Socket::SSL" + unless exists $INC{'IO/Socket/SSL.pm'}; + croak(qq/IO::Socket::SSL must be installed for https support\n/) + unless $INC{'IO/Socket/SSL.pm'}; + } + elsif ( $scheme ne 'http' ) { + croak(qq/Unsupported URL scheme '$scheme'\n/); + } - if ( $scheme eq 'https') { - IO::Socket::SSL->start_SSL($self->{fh}); - ref($self->{fh}) eq 'IO::Socket::SSL' - or die(qq/SSL connection failed for $host\n/); - if ( $self->{fh}->can("verify_hostname") ) { - $self->{fh}->verify_hostname( $host, $ssl_verify_args ); - } - else { - # Can't use $self->{fh}->verify_hostname because the IO::Socket::SSL - # that comes from yum doesn't have it, so use our inlined version. - my $fh = $self->{fh}; - _verify_hostname_of_cert($host, _peer_certificate($fh), $ssl_verify_args) - or die(qq/SSL certificate not valid for $host\n/); - } - } - - $self->{host} = $host; - $self->{port} = $port; + $self->{fh} = IO::Socket::INET->new( + PeerHost => $host, + PeerPort => $port, + Proto => 'tcp', + Type => SOCK_STREAM, + Timeout => $self->{timeout} + ) or croak(qq/Could not connect to '$host:$port': $@/); - return $self; -} + binmode($self->{fh}) + or croak(qq/Could not binmode() socket: '$!'/); -sub close { - @_ == 1 || croak(q/Usage: $handle->close()/); - my ($self) = @_; - CORE::close($self->{fh}) - or croak(qq/Could not close socket: '$!'/); -} + if ( $scheme eq 'https') { + IO::Socket::SSL->start_SSL($self->{fh}); + ref($self->{fh}) eq 'IO::Socket::SSL' + or die(qq/SSL connection failed for $host\n/); + if ( $self->{fh}->can("verify_hostname") ) { + $self->{fh}->verify_hostname( $host, $ssl_verify_args ); + } + else { + # Can't use $self->{fh}->verify_hostname because the IO::Socket::SSL + # that comes from yum doesn't have it, so use our inlined version. + my $fh = $self->{fh}; + _verify_hostname_of_cert($host, _peer_certificate($fh), $ssl_verify_args) + or die(qq/SSL certificate not valid for $host\n/); + } + } + + $self->{host} = $host; + $self->{port} = $port; -sub write { - @_ == 2 || croak(q/Usage: $handle->write(buf)/); - my ($self, $buf) = @_; + return $self; + } - my $len = length $buf; - my $off = 0; + sub close { + @_ == 1 || croak(q/Usage: $handle->close()/); + my ($self) = @_; + CORE::close($self->{fh}) + or croak(qq/Could not close socket: '$!'/); + } - local $SIG{PIPE} = 'IGNORE'; + sub write { + @_ == 2 || croak(q/Usage: $handle->write(buf)/); + my ($self, $buf) = @_; - while () { - $self->can_write - or croak(q/Timed out while waiting for socket to become ready for writing/); - my $r = syswrite($self->{fh}, $buf, $len, $off); - if (defined $r) { - $len -= $r; - $off += $r; - last unless $len > 0; - } - elsif ($! == EPIPE) { - croak(qq/Socket closed by remote server: $!/); - } - elsif ($! != EINTR) { - croak(qq/Could not write to socket: '$!'/); - } - } - return $off; -} + my $len = length $buf; + my $off = 0; -sub read { - @_ == 2 || @_ == 3 || croak(q/Usage: $handle->read(len)/); - my ($self, $len) = @_; + local $SIG{PIPE} = 'IGNORE'; - my $buf = ''; - my $got = length $self->{rbuf}; + while () { + $self->can_write + or croak(q/Timed out while waiting for socket to become ready for writing/); + my $r = syswrite($self->{fh}, $buf, $len, $off); + if (defined $r) { + $len -= $r; + $off += $r; + last unless $len > 0; + } + elsif ($! == EPIPE) { + croak(qq/Socket closed by remote server: $!/); + } + elsif ($! != EINTR) { + croak(qq/Could not write to socket: '$!'/); + } + } + return $off; + } - if ($got) { - my $take = ($got < $len) ? $got : $len; - $buf = substr($self->{rbuf}, 0, $take, ''); - $len -= $take; - } + sub read { + @_ == 2 || @_ == 3 || croak(q/Usage: $handle->read(len)/); + my ($self, $len) = @_; - while ($len > 0) { - $self->can_read - or croak(q/Timed out while waiting for socket to become ready for reading/); - my $r = sysread($self->{fh}, $buf, $len, length $buf); - if (defined $r) { - last unless $r; - $len -= $r; - } - elsif ($! != EINTR) { - croak(qq/Could not read from socket: '$!'/); - } - } - if ($len) { - croak(q/Unexpected end of stream/); - } - return $buf; -} + my $buf = ''; + my $got = length $self->{rbuf}; -sub readline { - @_ == 1 || croak(q/Usage: $handle->readline()/); - my ($self) = @_; + if ($got) { + my $take = ($got < $len) ? $got : $len; + $buf = substr($self->{rbuf}, 0, $take, ''); + $len -= $take; + } - while () { - if ($self->{rbuf} =~ s/\A ([^\x0D\x0A]* \x0D?\x0A)//x) { - return $1; - } - $self->can_read - or croak(q/Timed out while waiting for socket to become ready for reading/); - my $r = sysread($self->{fh}, $self->{rbuf}, BUFSIZE, length $self->{rbuf}); - if (defined $r) { - last unless $r; - } - elsif ($! != EINTR) { - croak(qq/Could not read from socket: '$!'/); - } - } - croak(q/Unexpected end of stream while looking for line/); -} + while ($len > 0) { + $self->can_read + or croak(q/Timed out while waiting for socket to become ready for reading/); + my $r = sysread($self->{fh}, $buf, $len, length $buf); + if (defined $r) { + last unless $r; + $len -= $r; + } + elsif ($! != EINTR) { + croak(qq/Could not read from socket: '$!'/); + } + } + if ($len) { + croak(q/Unexpected end of stream/); + } + return $buf; + } -sub read_header_lines { - @_ == 1 || @_ == 2 || croak(q/Usage: $handle->read_header_lines([headers])/); - my ($self, $headers) = @_; - $headers ||= {}; - my $lines = 0; - my $val; + sub readline { + @_ == 1 || croak(q/Usage: $handle->readline()/); + my ($self) = @_; - while () { - my $line = $self->readline; + while () { + if ($self->{rbuf} =~ s/\A ([^\x0D\x0A]* \x0D?\x0A)//x) { + return $1; + } + $self->can_read + or croak(q/Timed out while waiting for socket to become ready for reading/); + my $r = sysread($self->{fh}, $self->{rbuf}, BUFSIZE, length $self->{rbuf}); + if (defined $r) { + last unless $r; + } + elsif ($! != EINTR) { + croak(qq/Could not read from socket: '$!'/); + } + } + croak(q/Unexpected end of stream while looking for line/); + } - if ($line =~ /\A ([^\x00-\x1F\x7F:]+) : [\x09\x20]* ([^\x0D\x0A]*)/x) { - my ($field_name) = lc $1; - $val = \($headers->{$field_name} = $2); - } - elsif ($line =~ /\A [\x09\x20]+ ([^\x0D\x0A]*)/x) { - $val - or croak(q/Unexpected header continuation line/); - next unless length $1; - $$val .= ' ' if length $$val; - $$val .= $1; - } - elsif ($line =~ /\A \x0D?\x0A \z/x) { - last; - } - else { - croak(q/Malformed header line: / . $Printable->($line)); - } - } - return $headers; -} + sub read_header_lines { + @_ == 1 || @_ == 2 || croak(q/Usage: $handle->read_header_lines([headers])/); + my ($self, $headers) = @_; + $headers ||= {}; + my $lines = 0; + my $val; -sub write_header_lines { - (@_ == 2 && ref $_[1] eq 'HASH') || croak(q/Usage: $handle->write_header_lines(headers)/); - my($self, $headers) = @_; + while () { + my $line = $self->readline; - my $buf = ''; - while (my ($k, $v) = each %$headers) { - my $field_name = lc $k; - $field_name =~ /\A [\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5A\x5E-\x7A\x7C\x7E]+ \z/x - or croak(q/Invalid HTTP header field name: / . $Printable->($field_name)); - $field_name =~ s/\b(\w)/\u$1/g; - $buf .= "$field_name: $v\x0D\x0A"; - } - $buf .= "\x0D\x0A"; - return $self->write($buf); -} + if ($line =~ /\A ([^\x00-\x1F\x7F:]+) : [\x09\x20]* ([^\x0D\x0A]*)/x) { + my ($field_name) = lc $1; + $val = \($headers->{$field_name} = $2); + } + elsif ($line =~ /\A [\x09\x20]+ ([^\x0D\x0A]*)/x) { + $val + or croak(q/Unexpected header continuation line/); + next unless length $1; + $$val .= ' ' if length $$val; + $$val .= $1; + } + elsif ($line =~ /\A \x0D?\x0A \z/x) { + last; + } + else { + croak(q/Malformed header line: / . $Printable->($line)); + } + } + return $headers; + } -sub read_content_body { - @_ == 3 || @_ == 4 || croak(q/Usage: $handle->read_content_body(callback, response, [read_length])/); - my ($self, $cb, $response, $len) = @_; - $len ||= $response->{headers}{'content-length'}; + sub write_header_lines { + (@_ == 2 && ref $_[1] eq 'HASH') || croak(q/Usage: $handle->write_header_lines(headers)/); + my($self, $headers) = @_; - croak("No content-length in the returned response, and this " - . "UA doesn't implement chunking") unless defined $len; + my $buf = ''; + while (my ($k, $v) = each %$headers) { + my $field_name = lc $k; + $field_name =~ /\A [\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5A\x5E-\x7A\x7C\x7E]+ \z/x + or croak(q/Invalid HTTP header field name: / . $Printable->($field_name)); + $field_name =~ s/\b(\w)/\u$1/g; + $buf .= "$field_name: $v\x0D\x0A"; + } + $buf .= "\x0D\x0A"; + return $self->write($buf); + } - while ($len > 0) { - my $read = ($len > BUFSIZE) ? BUFSIZE : $len; - $cb->($self->read($read), $response); - $len -= $read; - } + sub read_content_body { + @_ == 3 || @_ == 4 || croak(q/Usage: $handle->read_content_body(callback, response, [read_length])/); + my ($self, $cb, $response, $len) = @_; + $len ||= $response->{headers}{'content-length'}; - return; -} + croak("No content-length in the returned response, and this " + . "UA doesn't implement chunking") unless defined $len; -sub write_content_body { - @_ == 2 || croak(q/Usage: $handle->write_content_body(request)/); - my ($self, $request) = @_; - my ($len, $content_length) = (0, $request->{headers}{'content-length'}); + while ($len > 0) { + my $read = ($len > BUFSIZE) ? BUFSIZE : $len; + $cb->($self->read($read), $response); + $len -= $read; + } - $len += $self->write($request->{content}); + return; + } - $len == $content_length - or croak(qq/Content-Length missmatch (got: $len expected: $content_length)/); + sub write_content_body { + @_ == 2 || croak(q/Usage: $handle->write_content_body(request)/); + my ($self, $request) = @_; + my ($len, $content_length) = (0, $request->{headers}{'content-length'}); - return $len; -} + $len += $self->write($request->{content}); -sub read_response_header { - @_ == 1 || croak(q/Usage: $handle->read_response_header()/); - my ($self) = @_; + $len == $content_length + or croak(qq/Content-Length missmatch (got: $len expected: $content_length)/); - my $line = $self->readline; + return $len; + } - $line =~ /\A (HTTP\/(0*\d+\.0*\d+)) [\x09\x20]+ ([0-9]{3}) [\x09\x20]+ ([^\x0D\x0A]*) \x0D?\x0A/x - or croak(q/Malformed Status-Line: / . $Printable->($line)); + sub read_response_header { + @_ == 1 || croak(q/Usage: $handle->read_response_header()/); + my ($self) = @_; - my ($protocol, $version, $status, $reason) = ($1, $2, $3, $4); + my $line = $self->readline; - return { - status => $status, - reason => $reason, - headers => $self->read_header_lines, - protocol => $protocol, - }; -} + $line =~ /\A (HTTP\/(0*\d+\.0*\d+)) [\x09\x20]+ ([0-9]{3}) [\x09\x20]+ ([^\x0D\x0A]*) \x0D?\x0A/x + or croak(q/Malformed Status-Line: / . $Printable->($line)); -sub write_request_header { - @_ == 4 || croak(q/Usage: $handle->write_request_header(method, request_uri, headers)/); - my ($self, $method, $request_uri, $headers) = @_; + my ($protocol, $version, $status, $reason) = ($1, $2, $3, $4); - return $self->write("$method $request_uri HTTP/1.1\x0D\x0A") - + $self->write_header_lines($headers); -} + return { + status => $status, + reason => $reason, + headers => $self->read_header_lines, + protocol => $protocol, + }; + } -sub _do_timeout { - my ($self, $type, $timeout) = @_; - $timeout = $self->{timeout} - unless defined $timeout && $timeout >= 0; + sub write_request_header { + @_ == 4 || croak(q/Usage: $handle->write_request_header(method, request_uri, headers)/); + my ($self, $method, $request_uri, $headers) = @_; - my $fd = fileno $self->{fh}; - defined $fd && $fd >= 0 - or croak(q/select(2): 'Bad file descriptor'/); + return $self->write("$method $request_uri HTTP/1.1\x0D\x0A") + + $self->write_header_lines($headers); + } - my $initial = time; - my $pending = $timeout; - my $nfound; + sub _do_timeout { + my ($self, $type, $timeout) = @_; + $timeout = $self->{timeout} + unless defined $timeout && $timeout >= 0; - vec(my $fdset = '', $fd, 1) = 1; + my $fd = fileno $self->{fh}; + defined $fd && $fd >= 0 + or croak(q/select(2): 'Bad file descriptor'/); - while () { - $nfound = ($type eq 'read') - ? select($fdset, undef, undef, $pending) - : select(undef, $fdset, undef, $pending) ; - if ($nfound == -1) { - $! == EINTR - or croak(qq/select(2): '$!'/); - redo if !$timeout || ($pending = $timeout - (time - $initial)) > 0; - $nfound = 0; - } - last; - } - $! = 0; - return $nfound; -} + my $initial = time; + my $pending = $timeout; + my $nfound; -sub can_read { - @_ == 1 || @_ == 2 || croak(q/Usage: $handle->can_read([timeout])/); - my $self = shift; - return $self->_do_timeout('read', @_) -} + vec(my $fdset = '', $fd, 1) = 1; -sub can_write { - @_ == 1 || @_ == 2 || croak(q/Usage: $handle->can_write([timeout])/); - my $self = shift; - return $self->_do_timeout('write', @_) -} + while () { + $nfound = ($type eq 'read') + ? select($fdset, undef, undef, $pending) + : select(undef, $fdset, undef, $pending) ; + if ($nfound == -1) { + $! == EINTR + or croak(qq/select(2): '$!'/); + redo if !$timeout || ($pending = $timeout - (time - $initial)) > 0; + $nfound = 0; + } + last; + } + $! = 0; + return $nfound; + } + + sub can_read { + @_ == 1 || @_ == 2 || croak(q/Usage: $handle->can_read([timeout])/); + my $self = shift; + return $self->_do_timeout('read', @_) + } + + sub can_write { + @_ == 1 || @_ == 2 || croak(q/Usage: $handle->can_write([timeout])/); + my $self = shift; + return $self->_do_timeout('write', @_) + } +} # HTTP::Micro::Handle # Partially copy-pasted from IO::Socket::SSL 1.76, with some changes because # we're forced to use IO::Socket::SSL version 1.01 in yum-based distros @@ -507,6 +510,7 @@ BEGIN { } } { + use Carp qw(croak); my %dispatcher = ( issuer => sub { Net::SSLeay::X509_NAME_oneline( Net::SSLeay::X509_get_issuer_name( shift )) }, subject => sub { Net::SSLeay::X509_NAME_oneline( Net::SSLeay::X509_get_subject_name( shift )) }, @@ -703,7 +707,6 @@ if ( $INC{"IO/Socket/SSL.pm"} ) { } 1; -} # ########################################################################### # End HTTPMicro package # ########################################################################### diff --git a/lib/JSONReportFormatter.pm b/lib/JSONReportFormatter.pm index ab981ade..5cfdb850 100644 --- a/lib/JSONReportFormatter.pm +++ b/lib/JSONReportFormatter.pm @@ -1,3 +1,22 @@ +# This program is copyright 2013 Percona Ireland Ltd. +# Feedback and improvements are welcome. +# +# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +# systems, you can issue `man perlgpl' or `man perlartistic' to read these +# licenses. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# ########################################################################### +# JSONReportFormatter package +# ########################################################################### { package JSONReportFormatter; use Lmo; @@ -9,17 +28,50 @@ use constant PTDEBUG => $ENV{PTDEBUG} || 0; my $have_json = eval { require JSON }; -our $pretty_json = 0; -our $sorted_json = 0; +our $pretty_json = $ENV{PTTEST_PRETTY_JSON} || 0; +our $sorted_json = $ENV{PTTEST_PRETTY_JSON} || 0; + extends qw(QueryReportFormatter); +has 'QueryRewriter' => ( + is => 'ro', + isa => 'Object', + required => 1, +); + +has 'QueryParser' => ( + is => 'ro', + isa => 'Object', + required => 1, +); + +has 'Quoter' => ( + is => 'ro', + isa => 'Object', + required => 1, +); + has _json => ( is => 'ro', init_arg => undef, builder => '_build_json', ); +has 'max_query_length' => ( + is => 'rw', + isa => 'Int', + required => 0, + default => sub { return 10_000; }, # characters, not bytes +); + +has 'max_fingerprint_length' => ( + is => 'rw', + isa => 'Int', + required => 0, + default => sub { return 5_000; }, # characters, not bytes +); + sub _build_json { return unless $have_json; return JSON->new->utf8 @@ -51,15 +103,112 @@ override query_report => sub { foreach my $arg ( qw(ea worst orderby groupby) ) { die "I need a $arg argument" unless defined $arg; } + my $ea = $args{ea}; + my $worst = $args{worst}; + my $orderby = $args{orderby}; + my $groupby = $args{groupby}; - my $ea = $args{ea}; - my $worst = $args{worst}; - + my $results = $ea->results(); my @attribs = @{$ea->get_attributes()}; - my %string_args = map { $_ => 1 } qw( db host arg user bytes pos_in_log ); - - my @queries; + my $q = $self->Quoter; + my $qr = $self->QueryRewriter; + + # ######################################################################## + # Global data + # ######################################################################## + my $global_data = { + metrics => {}, + files => $args{files}, + ($args{resume} && scalar keys %{$args{resume}} ? (resume => $args{resume}) : ()), + }; + + # Get global count + my $global_cnt = $results->{globals}->{$orderby}->{cnt} || 0; + my $global_unq = scalar keys %{$results->{classes}}; + + # Calculate QPS (queries per second) by looking at the min/max timestamp. + my ($qps, $conc) = (0, 0); + if ( $global_cnt && $results->{globals}->{ts} + && ($results->{globals}->{ts}->{max} || '') + gt ($results->{globals}->{ts}->{min} || '') ) + { + eval { + my $min = parse_timestamp($results->{globals}->{ts}->{min}); + my $max = parse_timestamp($results->{globals}->{ts}->{max}); + my $diff = unix_timestamp($max) - unix_timestamp($min); + $qps = $global_cnt / ($diff || 1); + $conc = $results->{globals}->{$orderby}->{sum} / $diff; + }; + } + + $global_data->{query_count} = $global_cnt; + $global_data->{unique_query_count} = $global_unq; + $global_data->{queries_per_second} = $qps if $qps; + $global_data->{concurrency} = $conc if $conc; + + my %hidden_attrib = ( + arg => 1, + fingerprint => 1, + pos_in_log => 1, + ts => 1, + ); + + foreach my $attrib ( grep { !$hidden_attrib{$_} } @attribs ) { + my $type = $ea->type_for($attrib) || 'string'; + next if $type eq 'string'; + next unless exists $results->{globals}->{$attrib}; + + my $store = $results->{globals}->{$attrib}; + my $metrics = $ea->stats()->{globals}->{$attrib}; + my $int = $attrib =~ m/(?:time|wait)$/ ? 0 : 1; + + # Be careful of Perl references: changing $attrib really changes it; + # it's not a local copy or copy-on-write. + my $real_attrib = $attrib eq 'bytes' ? 'Query_length' : $attrib; + + if ( $type eq 'num' ) { + foreach my $m ( qw(sum min max) ) { + if ( $int ) { + $global_data->{metrics}->{$real_attrib}->{$m} + = sprintf('%d', $store->{$m} || 0); + } + else { # microsecond + $global_data->{metrics}->{$real_attrib}->{$m} + = sprintf('%.6f', $store->{$m} || 0); + } + } + foreach my $m ( qw(pct_95 stddev median) ) { + if ( $int ) { + $global_data->{metrics}->{$real_attrib}->{$m} + = sprintf('%d', $metrics->{$m} || 0); + } + else { # microsecond + $global_data->{metrics}->{$real_attrib}->{$m} + = sprintf('%.6f', $metrics->{$m} || 0); + } + } + if ( $int ) { + $global_data->{metrics}->{$real_attrib}->{avg} + = sprintf('%d', $store->{sum} / $store->{cnt}); + } + else { + $global_data->{metrics}->{$real_attrib}->{avg} + = sprintf('%.6f', $store->{sum} / $store->{cnt}); + } + } + elsif ( $type eq 'bool' ) { + my $store = $results->{globals}->{$real_attrib}; + $global_data->{metrics}->{$real_attrib}->{cnt} + = sprintf('%d', $store->{sum}); + } + } + + # ######################################################################## + # Query class data + # ######################################################################## + + my @classes; foreach my $worst_info ( @$worst ) { my $item = $worst_info->[0]; my $stats = $ea->results->{classes}->{$item}; @@ -67,17 +216,30 @@ override query_report => sub { my $all_log_pos = $ea->{result_classes}->{$item}->{pos_in_log}->{all}; my $times_seen = sum values %$all_log_pos; - - my %class = ( - sample => $sample->{arg}, - fingerprint => $item, - checksum => make_checksum($item), - cnt => $times_seen, - ); - + + # Distill the query. + my $distill = $groupby eq 'fingerprint' ? $qr->distill($sample->{arg}) + : undef; + my $fingerprint = substr($item, 0, $self->max_fingerprint_length); + my $checksum = make_checksum($item); + my $class = { + checksum => $checksum, + fingerprint => $fingerprint, + distillate => $distill, + attribute => $groupby, + query_count => $times_seen, + example => { + query => substr($sample->{arg}, 0, $self->max_query_length), + ts => $sample->{ts} ? parse_timestamp($sample->{ts}) : undef, + }, + }; + my %metrics; foreach my $attrib ( @attribs ) { - $metrics{$attrib} = $ea->metrics( + my $real_attrib = $attrib eq 'bytes' ? 'Query_length' : $attrib; + next if $real_attrib eq 'Rows_affected' + && $distill && $distill =~ m/^(?:SELECT|SHOW|SET|ADMIN)/; + $metrics{$real_attrib} = $ea->metrics( attrib => $attrib, where => $item, ); @@ -88,6 +250,8 @@ override query_report => sub { delete $metrics{$attrib}; next; } + delete $metrics{pos_in_log}; + delete $metrics{$attrib}->{cnt}; if ($attrib eq 'ts') { my $ts = delete $metrics{ts}; @@ -95,36 +259,133 @@ override query_report => sub { next unless defined $ts && defined $ts->{$thing}; $ts->{$thing} = parse_timestamp($ts->{$thing}); } - $class{ts_min} = $ts->{min}; - $class{ts_max} = $ts->{max}; + $class->{ts_min} = $ts->{min}; + $class->{ts_max} = $ts->{max}; } - elsif ( $string_args{$attrib} ) { - $metrics{$attrib} = { value => $metrics{$attrib}{max} }; - } - elsif ( ($ea->{type_for}->{$attrib} || '') eq 'num' ) { - # Avoid scientific notation in the metrics by forcing it to use - # six decimal places. - for my $value ( values %{$metrics{$attrib}} ) { - next unless $value; - $value = sprintf '%.6f', $value; + else { + my $type = $attrib eq 'Query_length' ? 'num' : $ea->type_for($attrib) || 'string'; + if ( $type eq 'string' ) { + $metrics{$attrib} = { value => $metrics{$attrib}{max} }; } - # ..except for the percentage, which only needs two - if ( my $pct = $metrics{$attrib}->{pct} ) { - $metrics{$attrib}->{pct} = sprintf('%.2f', $pct); + elsif ( $type eq 'num' ) { + # Avoid scientific notation in the metrics by forcing it to use + # six decimal places. + foreach my $value ( values %{$metrics{$attrib}} ) { + next unless defined $value; + if ( $attrib =~ m/_(?:time|wait)$/ ) { + $value = sprintf('%.6f', $value); + } + else { + $value = sprintf('%d', $value); + } + } + } + elsif ( $type eq 'bool' ) { + $metrics{$attrib} = { + yes => sprintf('%d', $metrics{$attrib}->{sum}), + }; } } } - push @queries, { - class => \%class, - attributes => \%metrics, - }; + + # Add "copy-paste" info, i.e. this stuff from the regular report: + # + # Tables + # SHOW TABLE STATUS FROM `db2` LIKE 'tuningdetail_21_265507'\G + # SHOW CREATE TABLE `db2`.`tuningdetail_21_265507`\G + # SHOW TABLE STATUS FROM `db1` LIKE 'gonzo'\G + # SHOW CREATE TABLE `db1`.`gonzo`\G + # update db2.tuningdetail_21_265507 n + # inner join db1.gonzo a using(gonzo) + # set n.column1 = a.column1, n.word3 = a.word3\G + # Converted for EXPLAIN + # EXPLAIN /*!50100 PARTITIONS*/ + # select n.column1 = a.column1, n.word3 = a.word3 + # from db2.tuningdetail_21_265507 n + # inner join db1.gonzo a using(gonzo) \G + # + # The formatting isn't included, just the useful data, like: + # + # $tables = [ + # { + # create => "SHOW CREATE TABLE db.foo", + # status => "SHOW TABLE STATUS FROM db LIKE foo", + # }, + # explain => "select ..." + # ] + # + # This is called "copy-paste" because users can copy-paste these + # ready-made lines into MySQL. + my @tables; + if ( $groupby eq 'fingerprint' ) { + # Get SHOW CREATE TABLE and SHOW TABLE STATUS. + my $default_db = $sample->{db} ? $sample->{db} + : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} + : undef; + my @table_names = $self->QueryParser->extract_tables( + query => $sample->{arg} || '', + default_db => $default_db, + Quoter => $q, + ); + foreach my $db_tbl ( @table_names ) { + my ( $db, $tbl ) = @$db_tbl; + my $status + = 'SHOW TABLE STATUS' + . ($db ? " FROM `$db`" : '') + . " LIKE '$tbl'\\G"; + my $create + = "SHOW CREATE TABLE " + . $q->quote(grep { $_ } @$db_tbl) + . "\\G"; + push @tables, { status => $status, create => $create }; + } + + # Convert possible non-SELECTs for EXPLAIN. + if ( $item =~ m/^(?:[\(\s]*select|insert|replace)/ ) { + if ( $item =~ m/^(?:insert|replace)/ ) { + # Cannot convert or EXPLAIN INSERT or REPLACE queries. + } + else { + # SELECT queries don't need to converted for EXPLAIN. + + # TODO: return the actual EXPLAIN plan + # $self->explain_report($query, $vals->{default_db}); + } + } + else { + # Query is not SELECT, INSERT, or REPLACE, so we can convert + # it for EXPLAIN. + my $converted = $qr->convert_to_select( + $sample->{arg} || '', + ); + if ( $converted && $converted =~ m/^[\(\s]*select/i ) { + $class->{example}->{as_select} = $converted; + } + } + } + + $class->{metrics} = \%metrics; + if ( @tables ) { + $class->{tables} = \@tables; + } + push @classes, $class; } - my $json = $self->encode_json(\@queries); - $json .= "\n" if $json !~ /\n\Z/; - return $json . "\n"; + # ######################################################################## + # Done, combine, encode, and return global and query class data + # ######################################################################## + my $data = { + global => $global_data, + classes => \@classes, + }; + my $json = $self->encode_json($data); + $json .= "\n" unless $json =~ /\n\Z/; + return $json; }; no Lmo; 1; } +# ########################################################################### +# End JSONReportFormatter package +# ########################################################################### diff --git a/lib/MockSth.pm b/lib/MockSth.pm index 9e6d6880..0b04b7c7 100644 --- a/lib/MockSth.pm +++ b/lib/MockSth.pm @@ -1,4 +1,4 @@ -# This program is copyright 2007-2011 Baron Schwartz, 2011 Percona Ireland Ltd. +# This program is copyright 2007-2011 Baron Schwartz, 2011 Percona Inc. # Feedback and improvements are welcome. # # THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED diff --git a/lib/MockSync.pm b/lib/MockSync.pm index 7e9ac297..41051caa 100644 --- a/lib/MockSync.pm +++ b/lib/MockSync.pm @@ -1,4 +1,4 @@ -# This program is copyright 2007-2011 Baron Schwartz, 2011 Percona Ireland Ltd. +# This program is copyright 2007-2011 Baron Schwartz, 2011 Percona Inc. # Feedback and improvements are welcome. # # THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED diff --git a/lib/MockSyncStream.pm b/lib/MockSyncStream.pm index f06e6458..a9ba9435 100644 --- a/lib/MockSyncStream.pm +++ b/lib/MockSyncStream.pm @@ -1,4 +1,4 @@ -# This program is copyright 2009-2011 Percona Ireland Ltd. +# This program is copyright 2009-2011 Percona Inc. # Feedback and improvements are welcome. # # THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED diff --git a/lib/Percona/Agent/Logger.pm b/lib/Percona/Agent/Logger.pm new file mode 100644 index 00000000..1fccf5ec --- /dev/null +++ b/lib/Percona/Agent/Logger.pm @@ -0,0 +1,343 @@ +# This program is copyright 2013 Percona Ireland Ltd. +# Feedback and improvements are welcome. +# +# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +# systems, you can issue `man perlgpl' or `man perlartistic' to read these +# licenses. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# ########################################################################### +# Percona::Agent::Logger package +# ########################################################################### +package Percona::Agent::Logger; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use POSIX qw(SIGALRM); + +use Lmo; +use Transformers; +use Percona::WebAPI::Resource::LogEntry; + +Transformers->import(qw(ts)); + +has 'exit_status' => ( + is => 'rw', + isa => 'ScalarRef', + required => 1, +); + +has 'pid' => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has 'service' => ( + is => 'rw', + isa => 'Maybe[Str]', + required => 0, + default => sub { return; }, +); + +has 'data_ts' => ( + is => 'rw', + isa => 'Maybe[Int]', + required => 0, + default => sub { return; }, +); + +has 'online_logging' => ( + is => 'ro', + isa => 'Bool', + required => 0, + default => sub { return 1 }, +); + +has 'online_logging_enabled' => ( + is => 'rw', + isa => 'Bool', + required => 0, + default => sub { return 0 }, +); + +has 'quiet' => ( + is => 'rw', + isa => 'Int', + required => 0, + default => sub { return 0 }, +); + +has '_buffer' => ( + is => 'rw', + isa => 'ArrayRef', + required => 0, + default => sub { return []; }, +); + +has '_pipe_write' => ( + is => 'rw', + isa => 'Maybe[FileHandle]', + required => 0, +); + +sub read_stdin { + my ( $t ) = @_; + + # Set the SIGALRM handler. + POSIX::sigaction( + SIGALRM, + POSIX::SigAction->new(sub { die 'read timeout'; }), + ) or die "Error setting SIGALRM handler: $OS_ERROR"; + + my $timeout = 0; + my @lines; + eval { + alarm $t; + while(defined(my $line = )) { + push @lines, $line; + } + alarm 0; + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Read error:', $EVAL_ERROR); + die $EVAL_ERROR unless $EVAL_ERROR =~ m/read timeout/; + $timeout = 1; + } + return unless scalar @lines || $timeout; + return \@lines; +} + +sub start_online_logging { + my ($self, %args) = @_; + my $client = $args{client}; + my $log_link = $args{log_link}; + my $read_timeout = $args{read_timeout} || 3; + + return unless $self->online_logging; + + $self->info("Starting online logging. No more log entries will be printed here. " + . "Agent logs are accessible through the web interface."); + + my $pid = open(my $pipe_write, "|-"); + + if ($pid) { + # parent + select $pipe_write; + $OUTPUT_AUTOFLUSH = 1; + $self->_pipe_write($pipe_write); + $self->online_logging_enabled(1); + } + else { + # child + my @log_entries; + my $n_errors = 0; + my $oktorun = 1; + QUEUE: + while ($oktorun) { + my $lines = read_stdin($read_timeout); + last QUEUE unless $lines; + LINE: + while ( defined(my $line = shift @$lines) ) { + # $line = ts,level,n_lines,message + my ($ts, $level, $n_lines, $msg) = $line =~ m/^([^,]+),([^,]+),([^,]+),(.+)/s; + if ( !$ts || !$level || !$n_lines || !$msg ) { + warn "$line\n"; + next LINE; + } + if ( $n_lines > 1 ) { + $n_lines--; # first line + for ( 1..$n_lines ) { + $msg .= shift @$lines; + } + } + + push @log_entries, Percona::WebAPI::Resource::LogEntry->new( + pid => $self->pid, + entry_ts => $ts, + log_level => $level, + message => $msg, + ($self->service ? (service => $self->service) : ()), + ($self->data_ts ? (data_ts => $self->data_ts) : ()), + ); + } # LINE + + if ( scalar @log_entries ) { + eval { + $client->post( + link => $log_link, + resources => \@log_entries, + ); + }; + if ( my $e = $EVAL_ERROR ) { + # Safegaurd: don't spam the agent log file with errors. + if ( ++$n_errors <= 10 ) { + warn "Error sending log entry to API: $e"; + if ( $n_errors == 10 ) { + my $ts = ts(time, 1); # 1=UTC + warn "$ts WARNING $n_errors consecutive errors, no more " + . "error messages will be printed until log entries " + . "are sent successfully again.\n"; + } + } + } + else { + @log_entries = (); + $n_errors = 0; + } + } # have log entries + + # Safeguard: don't use too much memory if we lose connection + # to the API for a long time. + my $n_log_entries = scalar @log_entries; + if ( $n_log_entries > 1_000 ) { + warn "$n_log_entries log entries in send buffer, " + . "removing first 100 to avoid excessive usage.\n"; + @log_entries = @log_entries[100..($n_log_entries-1)]; + } + } # QUEUE + + if ( scalar @log_entries ) { + my $ts = ts(time, 1); # 1=UTC + warn "$ts WARNING Failed to send these log entries " + . "(timestamps are UTC):\n"; + foreach my $log ( @log_entries ) { + warn sprintf("%s %s %s\n", + $log->entry_ts, + level_name($log->log_level), + $log->message, + ); + } + } + + exit 0; + } # child + + return; +} + +sub level_number { + my $name = shift; + die "No log level name given" unless $name; + my $number = $name eq 'DEBUG' ? 1 + : $name eq 'INFO' ? 2 + : $name eq 'WARNING' ? 3 + : $name eq 'ERROR' ? 4 + : $name eq 'FATAL' ? 5 + : die "Invalid log level name: $name"; +} + +sub level_name { + my $number = shift; + die "No log level name given" unless $number; + my $name = $number == 1 ? 'DEBUG' + : $number == 2 ? 'INFO' + : $number == 3 ? 'WARNING' + : $number == 4 ? 'ERROR' + : $number == 5 ? 'FATAL' + : die "Invalid log level number: $number"; +} + +sub debug { + my $self = shift; + return $self->_log('DEBUG', @_); +} + +sub info { + my $self = shift; + return $self->_log('INFO', @_); +} + +sub warning { + my $self = shift; + $self->_set_exit_status(); + return $self->_log('WARNING', @_); +} + +sub error { + my $self = shift; + $self->_set_exit_status(); + return $self->_log('ERROR', @_); +} + +sub fatal { + my $self = shift; + $self->_set_exit_status(); + $self->_log('FATAL', @_); + exit $self->exit_status; +} + +sub _set_exit_status { + my $self = shift; + # exit_status is a scalar ref + my $exit_status = $self->exit_status; # get ref + $$exit_status |= 1; # deref to set + $self->exit_status($exit_status); # save back ref + return; +} + +sub _log { + my ($self, $level, $msg) = @_; + + my $ts = ts(time, 1); # 1=UTC + my $level_number = level_number($level); + + return if $self->quiet && $level_number < $self->quiet; + + chomp($msg); + my $n_lines = 1; + $n_lines++ while $msg =~ m/\n/g; + + if ( $self->online_logging_enabled ) { + while ( defined(my $log_entry = shift @{$self->_buffer}) ) { + $self->_queue_log_entry(@$log_entry); + } + $self->_queue_log_entry($ts, $level_number, $n_lines, $msg); + } + else { + if ( $self->online_logging ) { + push @{$self->_buffer}, [$ts, $level_number, $n_lines, $msg]; + } + + if ( $level_number >= 3 ) { # warning + print STDERR "$ts $level $msg\n"; + } + else { + print STDOUT "$ts $level $msg\n"; + } + } + + return; +} + +sub _queue_log_entry { + my ($self, $ts, $log_level, $n_lines, $msg) = @_; + print "$ts,$log_level,$n_lines,$msg\n"; + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +no Lmo; +1; +# ########################################################################### +# End Percona::Agent::Logger package +# ########################################################################### diff --git a/lib/Percona/Test.pm b/lib/Percona/Test.pm new file mode 100644 index 00000000..2d2a952d --- /dev/null +++ b/lib/Percona/Test.pm @@ -0,0 +1,806 @@ +# This program is copyright 2009-2013 Percona Inc. +# Feedback and improvements are welcome. +# +# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +# systems, you can issue `man perlgpl' or `man perlartistic' to read these +# licenses. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# ########################################################################### +# Percona::Test package +# ########################################################################### +{ +# Package: Percona::Test +# PerconaTest is a collection of helper-subs for Percona Toolkit tests. +# Any file arguments (like no_diff() $expected_output) are relative to +# PERCONA_TOOLKIT_BRANCH. So passing "commont/t/samples/foo" means +# "PERCONA_TOOLKIT_BRANCH/common/t/samples/foo". Do not BAIL_OUT() because +# this terminates the *entire* test process; die instead. All +# subs are exported by default, so is the variable $trunk, so there's +# no need to import() in the test scripts. +package Percona::Test; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEVDEBUG => $ENV{PTDEVDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +use Test::More; +use Time::HiRes qw(sleep time); +use File::Temp qw(tempfile); +use POSIX qw(signal_h); + +require Exporter; +our @ISA = qw(Exporter); +our %EXPORT_TAGS = (); +our @EXPORT_OK = qw(); +our @EXPORT = qw( + output + full_output + load_data + load_file + slurp_file + parse_file + wait_until + wait_for + wait_until_slave_running + wait_until_no_lag + test_log_parser + test_protocol_parser + test_packet_parser + no_diff + throws_ok + remove_traces + test_bash_tool + verify_test_data_integrity + $trunk + $dsn_opts + $sandbox_version + $can_load_data +); + +our $trunk = $ENV{PERCONA_TOOLKIT_BRANCH}; + +our $sandbox_version = ''; +eval { + chomp(my $v = `$trunk/sandbox/test-env version 2>/dev/null`); + $sandbox_version = $v if $v; +}; + +our $can_load_data = can_load_data(); + +our $dsn_opts = [ + { + key => 'A', + desc => 'Default character set', + dsn => 'charset', + copy => 1, + }, + { + key => 'D', + desc => 'Database to use', + dsn => 'database', + copy => 1, + }, + { + key => 'F', + desc => 'Only read default options from the given file', + dsn => 'mysql_read_default_file', + copy => 1, + }, + { + key => 'h', + desc => 'Connect to host', + dsn => 'host', + copy => 1, + }, + { + key => 'p', + desc => 'Password to use when connecting', + dsn => 'password', + copy => 1, + }, + { + key => 'P', + desc => 'Port number to use for connection', + dsn => 'port', + copy => 1, + }, + { + key => 'S', + desc => 'Socket file to use for connection', + dsn => 'mysql_socket', + copy => 1, + }, + { + key => 't', + desc => 'Table', + dsn => undef, + copy => 1, + }, + { + key => 'u', + desc => 'User for login if not current user', + dsn => 'user', + copy => 1, + }, +]; + +# Runs code, captures and returns its output. +# Optional arguments: +# * file scalar: capture output to this file (default none) +# * stderr scalar: capture STDERR (default no) +# * die scalar: die if code dies (default no) +# * trf coderef: pass output to this coderef (default none) +sub output { + my ( $code, %args ) = @_; + die "I need a code argument" unless $code; + my ($file, $stderr, $die, $trf) = @args{qw(file stderr die trf)}; + + if ( $args{debug} ) { + my $retval = eval { $code->() }; + warn $EVAL_ERROR if $EVAL_ERROR; + return $retval; + } + + my $output = ''; + { + if ( $file ) { + open *output_fh, '>', $file + or die "Cannot open file $file: $OS_ERROR"; + } + else { + open *output_fh, '>', \$output + or die "Cannot capture output to variable: $OS_ERROR"; + } + local *STDOUT = *output_fh; + + # If capturing STDERR we must dynamically scope (local) STDERR + # in the outer scope of the sub. If we did, + # if ( $args{stderr} ) { local *STDERR; ... } + # then STDERR would revert to its original value outside the if + # block. + local *STDERR if $args{stderr}; # do in outer scope of this sub + *STDERR = *STDOUT if $args{stderr}; + + eval { $code->() }; + if ( $EVAL_ERROR ) { + die $EVAL_ERROR if $die; + warn $EVAL_ERROR; + } + + close *output_fh; + } + + select STDOUT; + + # Possible transform output before returning it. This doesn't work + # if output was captured to a file. + $output = $trf->($output) if $trf; + + return $output; +} + +# Load data from file and removes spaces. Used to load tcpdump dumps. +sub load_data { + my ( $file ) = @_; + $file = "$trunk/$file"; + open my $fh, '<', $file or die "Cannot open $file: $OS_ERROR"; + my $contents = do { local $/ = undef; <$fh> }; + close $fh; + (my $data = join('', $contents =~ m/(.*)/g)) =~ s/\s+//g; + return $data; +} + +# Slurp file and return its entire contents. +sub load_file { + my ( $file, %args ) = @_; + $file = "$trunk/$file"; + my $contents = slurp_file($file); + chomp $contents if $args{chomp_contents}; + return $contents; +} + +sub slurp_file { + my ($file) = @_; + open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR"; + my $contents = do { local $/ = undef; <$fh> }; + close $fh; + return $contents; +} + +sub parse_file { + my ( $file, $p, $ea ) = @_; + $file = "$trunk/$file"; + my @e; + eval { + open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR"; + my %args = ( + next_event => sub { return <$fh>; }, + tell => sub { return tell $fh; }, + fh => $fh, + ); + while ( my $e = $p->parse_event(%args) ) { + push @e, $e; + $ea->aggregate($e) if $ea; + } + close $fh; + }; + die $EVAL_ERROR if $EVAL_ERROR; + return \@e; +} + +# Wait until code returns true. +sub wait_until { + my ( $code, $t, $max_t ) = @_; + $t ||= .20; + $max_t ||= 30; + + my $slept = 0; + while ( $slept <= $max_t ) { + return 1 if $code->(); + PTDEVDEBUG && _d('wait_until sleeping', $t); + sleep $t; + $slept += $t; + PTDEVDEBUG && _d('wait_until slept', $slept, 'of', $max_t); + } + return 0; +} + +# Wait t seconds for code to return. +sub wait_for { + my ( $code, $t ) = @_; + $t ||= 0; + my $mask = POSIX::SigSet->new(&POSIX::SIGALRM); + my $action = POSIX::SigAction->new( + sub { die }, + $mask, + ); + my $oldaction = POSIX::SigAction->new(); + sigaction(&POSIX::SIGALRM, $action, $oldaction); + eval { + alarm $t; + $code->(); + alarm 0; + }; + if ( $EVAL_ERROR ) { + # alarm was raised + return 1; + } + return 0; +} + +sub wait_for_table { + my ($dbh, $tbl, $where) = @_; + my $sql = "SELECT 1 FROM $tbl" . ($where ? " WHERE $where LIMIT 1" : ""); + return wait_until( + sub { + my $r; + eval { $r = $dbh->selectrow_arrayref($sql); }; + if ( $EVAL_ERROR ) { + PTDEVDEBUG && _d('Waiting on', $dbh, 'for table', $tbl, + 'error:', $EVAL_ERROR); + return 0; + } + if ( $where && (!$r || !scalar @$r) ) { + PTDEVDEBUG && _d('Waiting on', $dbh, 'for table', $tbl, + 'WHERE', $where); + return 0; + } + return 1; + }, + ); +} + +sub wait_for_files { + my (@files) = @_; + return wait_until( + sub { + foreach my $file (@files) { + if ( ! -f $file ) { + PTDEVDEBUG && _d('Waiting for file', $file); + return 0; + } + } + return 1; + }, + ); +} + +sub wait_for_sh { + my ($cmd) = @_; + return wait_until( + sub { + my $retval = system("$cmd 2>/dev/null"); + return $retval >> 8 == 0 ? 1 : 0; + } + ); +}; + +sub not_running { + my ($cmd) = @_; + PTDEVDEBUG && _d('Wait until not running:', $cmd); + return wait_until( + sub { + my $output = `ps x | grep -v grep | grep "$cmd"`; + PTDEVDEBUG && _d($output); + return 1 unless $output; + return 0; + } + ); +} + +sub _read { + my ( $fh ) = @_; + return <$fh>; +} + +sub test_log_parser { + my ( %args ) = @_; + foreach my $arg ( qw(parser file) ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $p = $args{parser}; + + # Make sure caller isn't giving us something we don't understand. + # We could ignore it, but then caller might not get the results + # they expected. + map { die "What is $_ for?"; } + grep { $_ !~ m/^(?:parser|misc|file|result|num_events|oktorun)$/ } + keys %args; + + my $file = "$trunk/$args{file}"; + my @e; + eval { + open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR"; + my %parser_args = ( + next_event => sub { return _read($fh); }, + tell => sub { return tell($fh); }, + fh => $fh, + misc => $args{misc}, + oktorun => $args{oktorun}, + ); + while ( my $e = $p->parse_event(%parser_args) ) { + push @e, $e; + } + close $fh; + }; + + my ($base_file_name) = $args{file} =~ m/([^\/]+)$/; + is( + $EVAL_ERROR, + '', + "$base_file_name: no errors" + ); + + if ( defined $args{result} ) { + is_deeply( + \@e, + $args{result}, + "$base_file_name: results" + ) or diag(Dumper(\@e)); + } + + if ( defined $args{num_events} ) { + is( + scalar @e, + $args{num_events}, + "$base_file_name: $args{num_events} events" + ); + } + + return \@e; +} + +sub test_protocol_parser { + my ( %args ) = @_; + foreach my $arg ( qw(parser protocol file) ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $parser = $args{parser}; + my $protocol = $args{protocol}; + + # Make sure caller isn't giving us something we don't understand. + # We could ignore it, but then caller might not get the results + # they expected. + map { die "What is $_ for?"; } + grep { $_ !~ m/^(?:parser|protocol|misc|file|result|num_events|desc)$/ } + keys %args; + + my $file = "$trunk/$args{file}"; + my @e; + eval { + open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR"; + my %parser_args = ( + next_event => sub { return _read($fh); }, + tell => sub { return tell($fh); }, + misc => $args{misc}, + ); + while ( my $p = $parser->parse_event(%parser_args) ) { + my $e = $protocol->parse_event(%parser_args, event => $p); + push @e, $e if $e; + } + close $fh; + }; + + my ($base_file_name) = $args{file} =~ m/([^\/]+)$/; + is( + $EVAL_ERROR, + '', + "$base_file_name: no errors" + ); + + if ( defined $args{result} ) { + is_deeply( + \@e, + $args{result}, + "$base_file_name: " . ($args{desc} || "results") + ) or diag(Dumper(\@e)); + } + + if ( defined $args{num_events} ) { + is( + scalar @e, + $args{num_events}, + "$base_file_name: $args{num_events} events" + ); + } + + return \@e; +} + +sub test_packet_parser { + my ( %args ) = @_; + foreach my $arg ( qw(parser file) ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $parser = $args{parser}; + + # Make sure caller isn't giving us something we don't understand. + # We could ignore it, but then caller might not get the results + # they expected. + map { die "What is $_ for?"; } + grep { $_ !~ m/^(?:parser|misc|file|result|desc|oktorun)$/ } + keys %args; + + my $file = "$trunk/$args{file}"; + my @packets; + open my $fh, '<', $file or die "Cannot open $file: $OS_ERROR"; + my %parser_args = ( + next_event => sub { return _read($fh); }, + tell => sub { return tell($fh); }, + misc => $args{misc}, + oktorun => $args{oktorun}, + ); + while ( my $packet = $parser->parse_event(%parser_args) ) { + push @packets, $packet; + } + + # raw_packet is the actual dump text from the file. It's used + # in MySQLProtocolParser but I don't think we need to double-check + # it here. It will make the results very long. + foreach my $packet ( @packets ) { + delete $packet->{raw_packet}; + } + + if ( !is_deeply( + \@packets, + $args{result}, + "$args{file}" . ($args{desc} ? ": $args{desc}" : '') + ) ) { + diag(Dumper(\@packets)); + } + + return; +} + +# no_diff() compares the STDOUT output of a cmd or code to expected output. +# Returns true if there are no differences between the two outputs, +# else returns false. Dies if the cmd/code dies. Does not capture STDERR. +# Args: +# * cmd scalar or coderef: if cmd is a scalar then the +# cmd is ran via the shell. if it's a coderef then +# the code is ran. the latter is preferred because +# it generates test coverage. +# * expected_output scalar: file name relative to PERCONA_TOOLKIT_BRANCH +# * args hash: (optional) may include +# update_sample overwrite expected_output with cmd/code output +# keep_output keep last cmd/code output file +# transform_result transform the code to be compared but do not +# reflect these changes on the original file +# if update_sample is passed in +# transform_sample similar to the above, but with the sample +# file +# * trf transform cmd/code output before diff +# The sub dies if cmd or code dies. STDERR is not captured. +sub no_diff { + my ( $cmd, $expected_output, %args ) = @_; + die "I need a cmd argument" unless $cmd; + die "I need an expected_output argument" unless $expected_output; + + die "$expected_output does not exist" unless -f "$trunk/$expected_output"; + $expected_output = "$trunk/$expected_output"; + + my $tmp_file = '/tmp/percona-toolkit-test-output.txt'; + my $tmp_file_orig = '/tmp/percona-toolkit-test-output-original.txt'; + + if ( my $sed_args = $args{sed_out} ) { + `cat $expected_output | sed $sed_args > /tmp/pt-test-outfile-trf`; + $expected_output = "/tmp/pt-test-outfile-trf"; + } + + # Determine cmd type and run it. + if ( ref $cmd eq 'CODE' ) { + output($cmd, file => $tmp_file); + } + elsif ( $args{cmd_output} ) { + # Copy cmd output to tmp file so we don't with the original. + open my $tmp_fh, '>', $tmp_file or die "Cannot open $tmp_file: $OS_ERROR"; + print $tmp_fh $cmd; + close $tmp_fh; + } + else { + `$cmd > $tmp_file`; + } + + # Do optional arg stuff. + `cp $tmp_file $tmp_file_orig`; + if ( my $trf = $args{trf} ) { + `$trf $tmp_file_orig > $tmp_file`; + } + if ( my $post_pipe = $args{post_pipe} ) { + `cat $tmp_file | $post_pipe > $tmp_file-2`; + `mv $tmp_file-2 $tmp_file`; + } + if ( my $sed_args = $args{sed} ) { + foreach my $sed_args ( @{$args{sed}} ) { + `cat $tmp_file | sed $sed_args > $tmp_file-2`; + `mv $tmp_file-2 $tmp_file`; + } + } + if ( defined(my $sort_args = $args{sort}) ) { + `cat $tmp_file | sort $sort_args > $tmp_file-2`; + `mv $tmp_file-2 $tmp_file`; + } + + my $res_file = $tmp_file; + if ( $args{transform_result} ) { + (undef, $res_file) = tempfile(); + output( + sub { $args{transform_result}->($tmp_file) }, + file => $res_file, + ); + } + + my $cmp_file = $expected_output; + if ( $args{transform_sample} ) { + (undef, $cmp_file) = tempfile(); + output( + sub { $args{transform_sample}->($expected_output) }, + file => $cmp_file, + ); + } + + # diff the outputs. + my $out = `diff $res_file $cmp_file`; + my $retval = $?; + + # diff returns 0 if there were no differences, + # so !0 = 1 = no diff in our testing parlance. + $retval = $retval >> 8; + + if ( $retval ) { + diag($out); + if ( $ENV{UPDATE_SAMPLES} || $args{update_sample} ) { + `cat $tmp_file > $expected_output`; + diag("Updated $expected_output"); + } + } + + # Remove our tmp files. + `rm -f $tmp_file $tmp_file_orig /tmp/pt-test-outfile-trf >/dev/null 2>&1` + unless $ENV{KEEP_OUTPUT} || $args{keep_output}; + + if ( $res_file ne $tmp_file ) { + 1 while unlink $res_file; + } + + if ( $cmp_file ne $expected_output ) { + 1 while unlink $cmp_file; + } + + return !$retval; +} + +sub throws_ok { + my ( $code, $pat, $msg ) = @_; + eval { $code->(); }; + like ( $EVAL_ERROR, $pat, $msg ); +} + +# Remove /*percona-toolkit ...*/ trace comments from the given SQL statement(s). +# Traces are added in ChangeHandler::process_rows(). +sub remove_traces { + my ( $sql ) = @_; + my $trace_pat = qr/ \/\*percona-toolkit .+?\*\//; + if ( ref $sql && ref $sql eq 'ARRAY' ) { + map { $_ =~ s/$trace_pat//gm } @$sql; + } + else { + $sql =~ s/$trace_pat//gm; + } + return $sql; +} + +sub test_bash_tool { + my ( $tool ) = @_; + die "I need a tool argument" unless $tool; + my $outfile = "/tmp/$tool-test-results.txt"; + `rm -rf $outfile >/dev/null`; + `$trunk/util/test-bash-tool $tool > $outfile`; + print `cat $outfile`; + return; +} + +my %checksum_result_col = ( + ts => 0, + errors => 1, + diffs => 2, + rows => 3, + chunks => 4, + skipped => 5, + time => 6, + table => 7, +); +sub count_checksum_results { + my ($output, $column, $table) = @_; + + my (@res) = map { + my $line = $_; + my (@cols) = $line =~ m/(\S+)/g; + \@cols; + } + grep { + my $line = $_; + if ( !$table ) { + $line; + } + else { + $line =~ m/$table$/m ? $line : ''; + } + } + grep { m/^\d+\-\d+T\d\d:\d\d:\d\d\s+\d+/ } split /\n/, $output; + my $colno = $checksum_result_col{lc $column}; + die "Invalid checksum result column: $column" unless defined $colno; + my $total = 0; + map { $total += $_->[$colno] } @res; + return $total; +} + +sub normalize_checksum_results { + my ($output) = @_; + my $tmp_file = "/tmp/test-checksum-results-output"; + open my $fh, ">", $tmp_file or die "Cannot open $tmp_file: $OS_ERROR"; + printf $fh $output; + close $fh; + my $normal_output = `cat $tmp_file | awk '/^[0-9 ]/ {print \$2 " " \$3 " " \$4 " " \$5 " " \$6 " " \$8} /^[A-Z]/ {print \$0}'`; + `rm $tmp_file >/dev/null`; + return $normal_output; +} + +sub get_master_binlog_pos { + my ($dbh) = @_; + my $sql = "SHOW MASTER STATUS"; + my $ms = $dbh->selectrow_hashref($sql); + return $ms->{position}; +} + +sub get_slave_pos_relative_to_master { + my ($dbh) = @_; + my $sql = "SHOW SLAVE STATUS"; + my $ss = $dbh->selectrow_hashref($sql); + return $ss->{exec_master_log_pos}; +} + +# Like output(), but forks a process to execute the coderef. +# This is because otherwise, errors thrown during cleanup +# would be skipped. +sub full_output { + my ( $code, %args ) = @_; + die "I need a code argument" unless $code; + + local (*STDOUT, *STDERR); + require IO::File; + + my (undef, $file) = tempfile(); + open *STDOUT, '>', $file + or die "Cannot open file $file: $OS_ERROR"; + *STDOUT->autoflush(1); + + my (undef, $file2) = tempfile(); + open *STDERR, '>', $file2 + or die "Cannot open file $file2: $OS_ERROR"; + *STDERR->autoflush(1); + + my $status; + if (my $pid = fork) { + if ( my $t = $args{wait_for} ) { + # Wait for t seconds then kill the child. + sleep $t; + my $tries = 3; + # Most tools require 2 interrupts to make them stop. + while ( kill(0, $pid) && $tries-- ) { + kill SIGTERM, $pid; + sleep 0.10; + } + # Child didn't respond to SIGTERM? Then kill -9 it. + kill SIGKILL, $pid if kill(0, $pid); + sleep 0.25; + } + waitpid($pid, 0); + $status = $? >> 8; + } + else { + exit $code->(); + } + close $_ or die "Cannot close $_: $OS_ERROR" for qw(STDOUT STDERR); + my $output = slurp_file($file) . slurp_file($file2); + + unlink $file; + unlink $file2; + + return ($output, $status); +} + +sub tables_used { + my ($file) = @_; + local $INPUT_RECORD_SEPARATOR = ''; + open my $fh, '<', $file or die "Cannot open $file: $OS_ERROR"; + my %tables; + while ( defined(my $chunk = <$fh>) ) { + map { + my $db_tbl = $_; + $db_tbl =~ s/^\s*`?//; # strip leading space and ` + $db_tbl =~ s/\s*`?$//; # strip trailing space and ` + $db_tbl =~ s/`\.`/./; # strip inner `.` + $tables{$db_tbl} = 1; + } + grep { + m/(?:\w\.\w|`\.`)/ # only db.tbl, not just db + } + $chunk =~ m/(?:FROM|INTO|UPDATE)\s+(\S+)/gi; + } + return [ sort keys %tables ]; +} + +sub can_load_data { + my $output = `/tmp/12345/use -e "SELECT * FROM percona_test.load_data" 2>/dev/null`; + return ($output || '') =~ /1/; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End PerconaTest package +# ########################################################################### diff --git a/lib/Percona/Test/Mock/AgentLogger.pm b/lib/Percona/Test/Mock/AgentLogger.pm new file mode 100644 index 00000000..e3ece818 --- /dev/null +++ b/lib/Percona/Test/Mock/AgentLogger.pm @@ -0,0 +1,129 @@ +# This program is copyright 2013 Percona Ireland Ltd. +# Feedback and improvements are welcome. +# +# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +# systems, you can issue `man perlgpl' or `man perlartistic' to read these +# licenses. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# ########################################################################### +# Percona::Agent::Logger package +# ########################################################################### +package Percona::Test::Mock::AgentLogger; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +sub new { + my ($class, %args) = @_; + my $self = { + log => $args{log}, + + exit_status => $args{exit_status}, + pid => $args{pid}, + online_logging => $args{online_logging}, + + service => undef, + data_ts => undef, + quiet => 0, + + }; + return bless $self, $class; +} + +sub service { + my $self = shift; + my $_service = shift; + $self->{service} = $_service if $_service; + return $self->{service}; +} + +sub data_ts { + my $self = shift; + my $_data_ts = shift; + $self->{data_ts} = $_data_ts if $_data_ts; + return $self->{data_ts}; +} + +sub quiet { + my $self = shift; + my $_quiet = shift; + $self->{quiet} = $_quiet if $_quiet; + return $self->{quiet}; +} + +sub start_online_logging { + my ($self, %args) = @_; + $self->_log('-', 'Called start_online_logging()'); + return; +} + +sub level_number { + my $name = shift; + die "No log level name given" unless $name; + my $number = $name eq 'DEBUG' ? 1 + : $name eq 'INFO' ? 2 + : $name eq 'WARNING' ? 3 + : $name eq 'ERROR' ? 4 + : $name eq 'FATAL' ? 5 + : die "Invalid log level name: $name"; +} + +sub level_name { + my $number = shift; + die "No log level name given" unless $number; + my $name = $number == 1 ? 'DEBUG' + : $number == 2 ? 'INFO' + : $number == 3 ? 'WARNING' + : $number == 4 ? 'ERROR' + : $number == 5 ? 'FATAL' + : die "Invalid log level number: $number"; +} + +sub debug { + my $self = shift; + return $self->_log('DEBUG', @_); +} + +sub info { + my $self = shift; + return $self->_log('INFO', @_); +} + +sub warning { + my $self = shift; + return $self->_log('WARNING', @_); +} + +sub error { + my $self = shift; + return $self->_log('ERROR', @_); +} + +sub fatal { + my $self = shift; + $self->_log('FATAL', @_); + return 255; +} + +sub _log { + my ($self, $level, $msg) = @_; + push @{$self->{log}}, "$level $msg"; + return; +} + +1; +# ########################################################################### +# End Percona::Test::Mock::AgentLogger package +# ########################################################################### diff --git a/lib/Percona/Test/Mock/UserAgent.pm b/lib/Percona/Test/Mock/UserAgent.pm new file mode 100644 index 00000000..cf201775 --- /dev/null +++ b/lib/Percona/Test/Mock/UserAgent.pm @@ -0,0 +1,71 @@ +# This program is copyright 2012-2013 Percona Inc. +# Feedback and improvements are welcome. +# +# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +# systems, you can issue `man perlgpl' or `man perlartistic' to read these +# licenses. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# ########################################################################### +# Percona::Test::Mock::UserAgent package +# ########################################################################### +{ +package Percona::Test::Mock::UserAgent; + +sub new { + my ($class, %args) = @_; + my $self = { + encode => $args{encode} || sub { return $_[0] }, + decode => $args{decode} || sub { return $_[0] }, + requests => [], + request_objs => [], + responses => { + get => [], + post => [], + put => [], + }, + content => { + post => [], + put => [], + }, + }; + return bless $self, $class; +} + +sub request { + my ($self, $req) = @_; + if ( scalar @{$self->{request_objs}} > 10 ) { + $self->{request_objs} = []; + } + push @{$self->{request_objs}}, $req; + my $type = lc($req->method); + push @{$self->{requests}}, uc($type) . ' ' . $req->uri; + if ( $type eq 'post' || $type eq 'put' ) { + push @{$self->{content}->{$type}}, $req->content; + } + my $r = shift @{$self->{responses}->{$type}}; + my $c = $r->{content} ? $self->{encode}->($r->{content}) : ''; + my $h = HTTP::Headers->new; + $h->header(%{$r->{headers}}) if exists $r->{headers}; + my $res = HTTP::Response->new( + $r->{code} || 200, + '', + $h, + $c, + ); + return $res; +} + +1; +} +# ########################################################################### +# End Percona::Test::Mock::UserAgent package +# ########################################################################### diff --git a/lib/Percona/Toolkit.pm b/lib/Percona/Toolkit.pm index fbeadccc..da72ec09 100644 --- a/lib/Percona/Toolkit.pm +++ b/lib/Percona/Toolkit.pm @@ -17,12 +17,55 @@ # ########################################################################### # Percona::Toolkit package # ########################################################################### -{ package Percona::Toolkit; -our $VERSION = '2.2.2'; + +our $VERSION = '2.2.3'; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Carp qw(carp cluck); +use Data::Dumper qw(); + +require Exporter; +our @ISA = qw(Exporter); +our @EXPORT_OK = qw( + have_required_args + Dumper + _d +); + +sub have_required_args { + my ($args, @required_args) = @_; + my $have_required_args = 1; + foreach my $arg ( @required_args ) { + if ( !defined $args->{$arg} ) { + $have_required_args = 0; + carp "Argument $arg is not defined"; + } + } + cluck unless $have_required_args; # print backtrace + return $have_required_args; +} + +sub Dumper { + local $Data::Dumper::Indent = 1; + local $Data::Dumper::Sortkeys = 1; + local $Data::Dumper::Quotekeys = 0; + Data::Dumper::Dumper(@_); +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} 1; -} # ########################################################################### # End Percona::Toolkit package # ########################################################################### diff --git a/lib/Percona/WebAPI/Client.pm b/lib/Percona/WebAPI/Client.pm new file mode 100644 index 00000000..286b19b3 --- /dev/null +++ b/lib/Percona/WebAPI/Client.pm @@ -0,0 +1,318 @@ +# This program is copyright 2012 codenode LLC, 2012-2013 Percona Ireland Ltd. +# +# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +# systems, you can issue `man perlgpl' or `man perlartistic' to read these +# licenses. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# ########################################################################### +# Percona::WebAPI::Client package +# ########################################################################### +{ +package Percona::WebAPI::Client; + +our $VERSION = '0.01'; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +eval { + require LWP; + require JSON; +}; + +use Scalar::Util qw(blessed); + +use Lmo; +use Percona::Toolkit; +use Percona::WebAPI::Representation; +use Percona::WebAPI::Exception::Request; +use Percona::WebAPI::Exception::Resource; + +Percona::WebAPI::Representation->import(qw(as_json)); +Percona::Toolkit->import(qw(_d Dumper have_required_args)); + +has 'api_key' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'entry_link' => ( + is => 'rw', + isa => 'Str', + required => 0, + default => sub { return 'https://cloud-api.percona.com' }, +); + +has 'ua' => ( + is => 'rw', + isa => 'Object', + lazy => 1, + required => 0, + builder => '_build_ua', +); + +has 'response' => ( + is => 'rw', + isa => 'Object', + required => 0, + default => undef, +); + +sub _build_ua { + my $self = shift; + my $ua = LWP::UserAgent->new; + $ua->agent("Percona::WebAPI::Client/$Percona::WebAPI::Client::VERSION"); + $ua->default_header('Content-Type', 'application/json'); + $ua->default_header('X-Percona-API-Key', $self->api_key); + return $ua; +} + +sub get { + my ($self, %args) = @_; + + have_required_args(\%args, qw( + link + )) or die; + my ($link) = $args{link}; + + # Get the resources at the link. + eval { + $self->_request( + method => 'GET', + link => $link, + ); + }; + if ( my $e = $EVAL_ERROR ) { + if (blessed($e) && $e->isa('Percona::WebAPI::Exception::Request')) { + die $e; + } + else { + die "Unknown error: $e"; + } + } + + # The resource should be represented as JSON, decode it. + my $resource = eval { + JSON::decode_json($self->response->content); + }; + if ( $EVAL_ERROR ) { + warn sprintf "Error decoding resource: %s: %s", + $self->response->content, + $EVAL_ERROR; + return; + } + + # If the server tells us the resource's type, create a new object + # of that type. Else, if there's no type, there's no resource, so + # we should have received links. This usually only happens for the + # entry link. The returned resource objects ref may be scalar or + # an arrayref; the caller should know. + my $resource_objects; + if ( my $type = $self->response->headers->{'x-percona-resource-type'} ) { + eval { + $type = "Percona::WebAPI::Resource::$type"; + if ( ref $resource eq 'ARRAY' ) { + PTDEBUG && _d('Got a list of', $type, 'resources'); + $resource_objects = []; + foreach my $attribs ( @$resource ) { + my $obj = $type->new(%$attribs); + push @$resource_objects, $obj; + } + } + else { + PTDEBUG && _d('Got a', $type, 'resource', Dumper($resource)); + $resource_objects = $type->new(%$resource); + } + }; + if ( my $e = $EVAL_ERROR ) { + die Percona::WebAPI::Exception::Resource->new( + type => $type, + link => $link, + data => (ref $resource eq 'ARRAY' ? $resource : [ $resource ]), + error => $e, + ); + } + } + elsif ( exists $resource->{links} ) { + # Lie to the caller: this isn't an object, but the caller can + # treat it like one, e.g. my $links = $api->get(); + # then access $links->{self}. A Links object couldn't have + # dynamic attribs anyway, so no use having a real Links obj. + $resource_objects = $resource->{links}; + } + else { + warn "Did not get X-Percona-Resource-Type or links from $link\n"; + } + + return $resource_objects; +} + +# For a successful POST, the server sets the Location header with +# the URI of the newly created resource. +sub post { + my $self = shift; + $self->_set( + @_, + method => 'POST', + ); + return $self->response->header('Location'); +} + +sub put { + my $self = shift; + $self->_set( + @_, + method => 'PUT', + ); + return $self->response->header('Location'); +} + +sub delete { + my ($self, %args) = @_; + have_required_args(\%args, qw( + link + )) or die; + my ($link) = $args{link}; + + eval { + $self->_request( + method => 'DELETE', + link => $link, + headers => { 'Content-Length' => 0 }, + ); + }; + if ( my $e = $EVAL_ERROR ) { + if (blessed($e) && $e->isa('Percona::WebAPI::Exception::Request')) { + die $e; + } + else { + die "Unknown error: $e"; + } + } + + return; +} + +# Low-level POST and PUT handler. +sub _set { + my ($self, %args) = @_; + have_required_args(\%args, qw( + method + resources + link + )) or die; + my $method = $args{method}; + my $res = $args{resources}; + my $link = $args{link}; + + # Optional args + my $headers = $args{headers}; + + my $content = ''; + if ( ref($res) eq 'ARRAY' ) { + PTDEBUG && _d('List of resources'); + $content = '[' . join(",\n", map { as_json($_) } @$res) . ']'; + } + elsif ( ref($res) ) { + PTDEBUG && _d('Resource object'); + $content = as_json($res); + } + elsif ( $res !~ m/\n/ && -f $res ) { + PTDEBUG && _d('List of resources in file', $res); + $content = '['; + my $data = do { + local $INPUT_RECORD_SEPARATOR = undef; + open my $fh, '<', $res + or die "Error opening $res: $OS_ERROR"; + <$fh>; + }; + $data =~ s/,?\s*$/]/; + $content .= $data; + } + else { + PTDEBUG && _d('Resource text'); + $content = $res; + } + + eval { + $self->_request( + method => $method, + link => $link, + content => $content, + headers => $headers, + ); + }; + if ( my $e = $EVAL_ERROR ) { + if (blessed($e) && $e->isa('Percona::WebAPI::Exception::Request')) { + die $e; + } + else { + die "Unknown error: $e"; + } + } + + return; +} + +# Low-level HTTP request handler for all methods. Sets $self->response +# from the request. Returns nothing on success (HTTP status 2xx-3xx), +# else throws an Percona::WebAPI::Exception::Request. +sub _request { + my ($self, %args) = @_; + + have_required_args(\%args, qw( + method + link + )) or die; + my $method = $args{method}; + my $link = $args{link}; + + # Optional args + my $content = $args{content}; + my $headers = $args{headers}; + + my $req = HTTP::Request->new($method => $link); + if ( $content ) { + $req->content($content); + } + if ( $headers ) { + map { $req->header($_ => $headers->{$_}) } keys %$headers; + } + PTDEBUG && _d('Request', $method, $link, Dumper($req)); + + my $response = $self->ua->request($req); + PTDEBUG && _d('Response', Dumper($response)); + + $self->response($response); + + if ( !($response->code >= 200 && $response->code < 400) ) { + die Percona::WebAPI::Exception::Request->new( + method => $method, + url => $link, + content => $content, + status => $response->code, + error => "Failed to $method $link", + ); + } + + return; +} + +no Lmo; +1; +} +# ########################################################################### +# End Percona::WebAPI::Client package +# ########################################################################### diff --git a/lib/Percona/WebAPI/Exception/Request.pm b/lib/Percona/WebAPI/Exception/Request.pm new file mode 100644 index 00000000..5958ecb8 --- /dev/null +++ b/lib/Percona/WebAPI/Exception/Request.pm @@ -0,0 +1,69 @@ +# This program is copyright 2012-2013 Percona Inc. +# Feedback and improvements are welcome. +# +# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +# systems, you can issue `man perlgpl' or `man perlartistic' to read these +# licenses. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# ########################################################################### +# Percona::WebAPI::Exception::Request package +# ########################################################################### +{ +package Percona::WebAPI::Exception::Request; + +use Lmo; +use overload '""' => \&as_string; + +has 'method' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'url' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'content' => ( + is => 'ro', + isa => 'Maybe[Str]', + required => 0, +); + +has 'status' => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has 'error' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +sub as_string { + my $self = shift; + chomp(my $error = $self->error); + $error =~ s/\n/ /g; + return sprintf "%s\nRequest: %s %s %s\nStatus: %d\n", + $error, $self->method, $self->url, $self->content || '', $self->status; +} + +no Lmo; +1; +} +# ########################################################################### +# End Percona::WebAPI::Exception::Request package +# ########################################################################### diff --git a/lib/Percona/WebAPI/Exception/Resource.pm b/lib/Percona/WebAPI/Exception/Resource.pm new file mode 100644 index 00000000..3aa8d3d3 --- /dev/null +++ b/lib/Percona/WebAPI/Exception/Resource.pm @@ -0,0 +1,66 @@ +# This program is copyright 2012-2013 Percona Inc. +# Feedback and improvements are welcome. +# +# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +# systems, you can issue `man perlgpl' or `man perlartistic' to read these +# licenses. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# ########################################################################### +# Percona::WebAPI::Exception::Resource package +# ########################################################################### +{ +package Percona::WebAPI::Exception::Resource; + +use Lmo; +use overload '""' => \&as_string; +use Data::Dumper; + +has 'type' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'link' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'data' => ( + is => 'ro', + isa => 'ArrayRef', + required => 1, +); + +has 'error' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +sub as_string { + my $self = shift; + chomp(my $error = $self->error); + local $Data::Dumper::Indent = 1; + local $Data::Dumper::Sortkeys = 1; + local $Data::Dumper::Quotekeys = 0; + return sprintf "Invalid %s resource from %s:\n\n%s\nError: %s\n\n", + $self->type, $self->link, Dumper($self->data), $error; +} + +no Lmo; +1; +} +# ########################################################################### +# End Percona::WebAPI::Exception::Resource package +# ########################################################################### diff --git a/lib/Percona/WebAPI/Representation.pm b/lib/Percona/WebAPI/Representation.pm new file mode 100644 index 00000000..deedbf22 --- /dev/null +++ b/lib/Percona/WebAPI/Representation.pm @@ -0,0 +1,86 @@ +# This program is copyright 2012-2013 Percona Inc. +# Feedback and improvements are welcome. +# +# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +# systems, you can issue `man perlgpl' or `man perlartistic' to read these +# licenses. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# ########################################################################### +# Percona::WebAPI::Representation package +# ########################################################################### +{ +package Percona::WebAPI::Representation; + +eval { + require JSON; +}; + +require Exporter; +our @ISA = qw(Exporter); +our @EXPORT_OK = qw( + as_hashref + as_json + as_config +); + +sub as_hashref { + my ($resource, %args) = @_; + + # Copy the object into a new hashref. + my $as_hashref = { %$resource }; + + # Delete the links because they're just for client-side use + # and the caller should be sending this object, not getting it. + # But sometimes for testing we want to keep the links. + if ( !defined $args{with_links} || !$args{with_links} ) { + delete $as_hashref->{links}; + } + + return $as_hashref; +} + +sub as_json { + my ($resource, %args) = @_; + + my $json = $args{json} || JSON->new; + $json->allow_blessed([]); + $json->convert_blessed([]); + + my $text = $json->encode( + ref $resource eq 'ARRAY' ? $resource : as_hashref($resource, %args) + ); + if ( $args{json} && $text ) { # for testing + chomp($text); + $text .= "\n"; + } + return $text; +} + +sub as_config { + my $resource = shift; + if ( !$resource->isa('Percona::WebAPI::Resource::Config') ) { + die "Only Config resources can be represented as config.\n"; + } + my $as_hashref = as_hashref($resource); + my $options = $as_hashref->{options}; + my $config = join("\n", + map { defined $options->{$_} ? "$_=$options->{$_}" : "$_" } + sort keys %$options + ) . "\n"; + return $config; +} + +1; +} +# ########################################################################### +# End Percona::WebAPI::Representation package +# ########################################################################### diff --git a/lib/Percona/WebAPI/Resource/Agent.pm b/lib/Percona/WebAPI/Resource/Agent.pm new file mode 100644 index 00000000..8c6b8c8c --- /dev/null +++ b/lib/Percona/WebAPI/Resource/Agent.pm @@ -0,0 +1,77 @@ +# This program is copyright 2012-2013 Percona Inc. +# Feedback and improvements are welcome. +# +# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +# systems, you can issue `man perlgpl' or `man perlartistic' to read these +# licenses. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# ########################################################################### +# Percona::WebAPI::Resource::Agent package +# ########################################################################### +{ +package Percona::WebAPI::Resource::Agent; + +use Lmo; + +has 'uuid' => ( + is => 'ro', + isa => 'Str', + required => 0, +); + +has 'username' => ( + is => 'rw', + isa => 'Str', + required => 0, + default => sub { return $ENV{USER} || $ENV{LOGNAME} }, +); + +has 'hostname' => ( + is => 'rw', + isa => 'Str', + required => 0, + default => sub { + chomp(my $hostname = `hostname`); + return $hostname; + }, +); + +has 'alias' => ( + is => 'rw', + isa => 'Str', + required => 0, +); + +has 'versions' => ( + is => 'rw', + isa => 'Maybe[HashRef]', + required => 0, +); + +has 'links' => ( + is => 'rw', + isa => 'Maybe[HashRef]', + required => 0, + default => sub { return {} }, +); + +sub name { + my ($self) = @_; + return $self->alias || $self->hostname || $self->uuid || 'Unknown'; +} + +no Lmo; +1; +} +# ########################################################################### +# End Percona::WebAPI::Resource::Agent package +# ########################################################################### diff --git a/lib/Percona/WebAPI/Resource/Config.pm b/lib/Percona/WebAPI/Resource/Config.pm new file mode 100644 index 00000000..11d06de1 --- /dev/null +++ b/lib/Percona/WebAPI/Resource/Config.pm @@ -0,0 +1,55 @@ +# This program is copyright 2012-2013 Percona Inc. +# Feedback and improvements are welcome. +# +# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +# systems, you can issue `man perlgpl' or `man perlartistic' to read these +# licenses. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# ########################################################################### +# Percona::WebAPI::Resource::Config package +# ########################################################################### +{ +package Percona::WebAPI::Resource::Config; + +use Lmo; + +has 'ts' => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has 'name' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'options' => ( + is => 'ro', + isa => 'HashRef', + required => 1, +); + +has 'links' => ( + is => 'rw', + isa => 'Maybe[HashRef]', + required => 0, + default => sub { return {} }, +); + +no Lmo; +1; +} +# ########################################################################### +# End Percona::WebAPI::Resource::Config package +# ########################################################################### diff --git a/lib/Percona/WebAPI/Resource/LogEntry.pm b/lib/Percona/WebAPI/Resource/LogEntry.pm new file mode 100644 index 00000000..b686fcbc --- /dev/null +++ b/lib/Percona/WebAPI/Resource/LogEntry.pm @@ -0,0 +1,66 @@ +# This program is copyright 2013 Percona Inc. +# Feedback and improvements are welcome. +# +# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +# systems, you can issue `man perlgpl' or `man perlartistic' to read these +# licenses. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# ########################################################################### +# Percona::WebAPI::Resource::LogEntry package +# ########################################################################### +{ +package Percona::WebAPI::Resource::LogEntry; + +use Lmo; + +has 'pid' => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has 'service' => ( + is => 'ro', + isa => 'Str', + required => 0, +); + +has 'data_ts' => ( + is => 'ro', + isa => 'Int', + required => 0, +); + +has 'entry_ts' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'log_level' => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has 'message' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +no Lmo; +1; +} +# ########################################################################### +# End Percona::WebAPI::Resource::LogEntry package +# ########################################################################### diff --git a/lib/Percona/WebAPI/Resource/Service.pm b/lib/Percona/WebAPI/Resource/Service.pm new file mode 100644 index 00000000..1bac2fec --- /dev/null +++ b/lib/Percona/WebAPI/Resource/Service.pm @@ -0,0 +1,94 @@ +# This program is copyright 2012-2013 Percona Inc. +# Feedback and improvements are welcome. +# +# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +# systems, you can issue `man perlgpl' or `man perlartistic' to read these +# licenses. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# ########################################################################### +# Percona::WebAPI::Resource::Service package +# ########################################################################### +{ +package Percona::WebAPI::Resource::Service; + +use Lmo; + +has 'ts' => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has 'name' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'tasks' => ( + is => 'ro', + isa => 'ArrayRef[Percona::WebAPI::Resource::Task]', + required => 1, +); + +has 'run_schedule' => ( + is => 'ro', + isa => 'Str', + required => 0, +); + +has 'spool_schedule' => ( + is => 'ro', + isa => 'Str', + required => 0, +); + +has 'meta' => ( + is => 'ro', + isa => 'Bool', + required => 0, + default => sub { return 0 }, +); + +has 'run_once' => ( + is => 'ro', + isa => 'Bool', + required => 0, + default => sub { return 0 }, +); + +has 'links' => ( + is => 'rw', + isa => 'Maybe[HashRef]', + required => 0, + default => sub { return {} }, +); + +sub BUILDARGS { + my ($class, %args) = @_; + if ( ref $args{tasks} eq 'ARRAY' ) { + my @tasks; + foreach my $run_hashref ( @{$args{tasks}} ) { + my $task = Percona::WebAPI::Resource::Task->new(%$run_hashref); + push @tasks, $task; + } + $args{tasks} = \@tasks; + } + return $class->SUPER::BUILDARGS(%args); +} + +no Lmo; +1; +} +# ########################################################################### +# End Percona::WebAPI::Resource::Service package +# ########################################################################### diff --git a/lib/Percona/WebAPI/Resource/Task.pm b/lib/Percona/WebAPI/Resource/Task.pm new file mode 100644 index 00000000..782ca40d --- /dev/null +++ b/lib/Percona/WebAPI/Resource/Task.pm @@ -0,0 +1,62 @@ +# This program is copyright 2012-2013 Percona Inc. +# Feedback and improvements are welcome. +# +# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +# systems, you can issue `man perlgpl' or `man perlartistic' to read these +# licenses. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# ########################################################################### +# Percona::WebAPI::Resource::Task package +# ########################################################################### +{ +package Percona::WebAPI::Resource::Task; + +use Lmo; + +has 'name' => ( + is => 'ro', + isa => 'Str', + required => 1, +); + +has 'number' => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has 'program' => ( + is => 'ro', + isa => 'Maybe[Str]', + required => 0, +); + +has 'query' => ( + is => 'ro', + isa => 'Maybe[Str]', + required => 0, +); + +has 'output' => ( + is => 'ro', + isa => 'Maybe[Str]', + required => 0, +); + +sub TO_JSON { return { %{ shift() } }; } + +no Lmo; +1; +} +# ########################################################################### +# End Percona::WebAPI::Resource::Task package +# ########################################################################### diff --git a/lib/PerconaTest.pm b/lib/PerconaTest.pm index e573ca7c..8d96196b 100644 --- a/lib/PerconaTest.pm +++ b/lib/PerconaTest.pm @@ -33,18 +33,12 @@ use warnings FATAL => 'all'; use English qw(-no_match_vars); use constant PTDEVDEBUG => $ENV{PTDEVDEBUG} || 0; -use Percona::Toolkit; - use Carp qw(croak); use Test::More; use Time::HiRes qw(sleep time); use File::Temp qw(tempfile); use POSIX qw(signal_h); -use Data::Dumper; -$Data::Dumper::Indent = 1; -$Data::Dumper::Sortkeys = 1; -$Data::Dumper::Quotekeys = 0; require Exporter; our @ISA = qw(Exporter); @@ -754,15 +748,6 @@ sub get_slave_pos_relative_to_master { return $ss->{exec_master_log_pos}; } -sub _d { - my ($package, undef, $line) = caller 0; - @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } - map { defined $_ ? $_ : 'undef' } - @_; - my $t = sprintf '%.3f', time; - print STDERR "# $package:$line $PID $t ", join(' ', @_), "\n"; -} - # Like output(), but forks a process to execute the coderef. # This is because otherwise, errors thrown during cleanup # would be skipped. diff --git a/lib/QueryReportFormatter.pm b/lib/QueryReportFormatter.pm index 3b332830..0c3b64b2 100644 --- a/lib/QueryReportFormatter.pm +++ b/lib/QueryReportFormatter.pm @@ -214,7 +214,7 @@ sub hostname { sub files { my ( $self, %args ) = @_; if ( $args{files} ) { - return "# Files: " . join(', ', @{$args{files}}) . "\n"; + return "# Files: " . join(', ', map { $_->{name} } @{$args{files}}) . "\n"; } return; } diff --git a/lib/Safeguards.pm b/lib/Safeguards.pm new file mode 100644 index 00000000..1cee49af --- /dev/null +++ b/lib/Safeguards.pm @@ -0,0 +1,94 @@ +# This program is copyright 2013 Percona Ireland Ltd. +# Feedback and improvements are welcome. +# +# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +# systems, you can issue `man perlgpl' or `man perlartistic' to read these +# licenses. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# ########################################################################### +# Safeguards package +# ########################################################################### +package Safeguards; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +sub new { + my ($class, %args) = @_; + my $self = { + disk_bytes_free => $args{disk_bytes_free} || 104857600, # 100 MiB + disk_pct_free => $args{disk_pct_free} || 5, + }; + return bless $self, $class; +} + +sub get_disk_space { + my ($self, %args) = @_; + my $filesystem = $args{filesystem} || $ENV{PWD}; + + # Filesystem 1024-blocks Used Available Capacity Mounted on + # /dev/disk0s2 118153176 94409664 23487512 81% / + my $disk_space = `df -P -k "$filesystem"`; + chop($disk_space) if $disk_space; + PTDEBUG && _d('Disk space on', $filesystem, $disk_space); + + return $disk_space; +} + +sub check_disk_space() { + my ($self, %args) = @_; + my $disk_space = $args{disk_space}; + PTDEBUG && _d("Checking disk space:\n", $disk_space); + + # There may be other info, so extract just the partition line, + # i.e. the first line starting with /, as in: + # Filesystem 1024-blocks Used Available Capacity Mounted on + # /dev/disk0s2 118153176 94409664 23487512 81% / + my ($partition) = $disk_space =~ m/^\s*(\/.+)/m; + PTDEBUG && _d('Partition:', $partition); + die "Failed to parse partition from disk space:\n$disk_space" + unless $partition; + + # Parse the partition line. + my (undef, undef, $bytes_used, $bytes_free, $pct_used, undef) + = $partition =~ m/(\S+)/g; + PTDEBUG && _d('Bytes used:', $bytes_used, 'free:', $bytes_free, + 'Percentage used:', $pct_used); + + # Convert 1024-blocks blocks to bytes. + $bytes_used = ($bytes_used || 0) * 1024; + $bytes_free = ($bytes_free || 0) * 1024; + + # Convert pct used to free. + $pct_used =~ s/%//; + my $pct_free = 100 - ($pct_used || 0); + + # Return true if both thresholds are ok. + return $bytes_free >= $self->{disk_bytes_free} + && $pct_free >= $self->{disk_pct_free}; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +# ########################################################################### +# End Safeguards package +# ########################################################################### diff --git a/lib/SlowLogParser.pm b/lib/SlowLogParser.pm index d1fc1b16..b645db01 100644 --- a/lib/SlowLogParser.pm +++ b/lib/SlowLogParser.pm @@ -36,6 +36,7 @@ sub new { my ( $class ) = @_; my $self = { pending => [], + last_event_offset => undef, }; return bless $self, $class; } @@ -103,6 +104,7 @@ sub parse_event { or defined($stmt = $next_event->()) ) { my @properties = ('cmd', 'Query', 'pos_in_log', $pos_in_log); + $self->{last_event_offset} = $pos_in_log; $pos_in_log = $tell->(); # If there were such lines in the file, we may have slurped > 1 event. @@ -299,9 +301,15 @@ sub parse_event { # it's been cast into a hash, duplicated keys will be gone. PTDEBUG && _d('Properties of event:', Dumper(\@properties)); my $event = { @properties }; - if ( $args{stats} ) { - $args{stats}->{events_read}++; - $args{stats}->{events_parsed}++; + if ( !$event->{arg} ) { + PTDEBUG && _d('Partial event, no arg'); + } + else { + $self->{last_event_offset} = undef; + if ( $args{stats} ) { + $args{stats}->{events_read}++; + $args{stats}->{events_parsed}++; + } } return $event; } # EVENT diff --git a/lib/VersionParser.pm b/lib/VersionParser.pm index cefc0e94..0a6dd496 100644 --- a/lib/VersionParser.pm +++ b/lib/VersionParser.pm @@ -37,8 +37,6 @@ use overload ( use Carp (); -our $VERSION = 0.01; - has major => ( is => 'ro', isa => 'Int', diff --git a/t/lib/Daemon.t b/t/lib/Daemon.t index 1240a4cd..f860d335 100644 --- a/t/lib/Daemon.t +++ b/t/lib/Daemon.t @@ -9,31 +9,28 @@ BEGIN { use strict; use warnings FATAL => 'all'; use English qw(-no_match_vars); + use Test::More; use Time::HiRes qw(sleep); -use File::Temp qw( tempfile ); +use File::Temp qw(tempfile); + use Daemon; -use OptionParser; use PerconaTest; -#plan skip_all => "Hm"; + use constant PTDEVDEBUG => $ENV{PTDEVDEBUG} || 0; -my $o = new OptionParser(file => "$trunk/t/lib/samples/daemonizes.pl"); -my $d = new Daemon(o=>$o); - -my $pid_file = '/tmp/daemonizes.pl.pid'; -my $log_file = '/tmp/daemonizes.output'; +my $cmd = "$trunk/t/lib/samples/daemonizes.pl"; +my $pid_file = "/tmp/pt-daemon-test.pid.$PID"; +my $log_file = "/tmp/pt-daemon-test.log.$PID"; sub rm_tmp_files() { - -e $pid_file && (unlink $pid_file || die "Error removing $pid_file"); - -e $log_file && (unlink $log_file || die "Error removing $log_file"); + -f $pid_file && (unlink $pid_file || die "Error removing $pid_file"); + -f $log_file && (unlink $log_file || die "Error removing $log_file"); } # ############################################################################ # Test that it daemonizes, creates a PID file, and removes that PID file. # ############################################################################ -rm_tmp_files(); -my $cmd = "$trunk/t/lib/samples/daemonizes.pl"; my $ret_val = system("$cmd 5 --daemonize --pid $pid_file >/dev/null 2>&1"); die 'Cannot test Daemon.pm because t/daemonizes.pl is not working' unless $ret_val == 0; @@ -41,16 +38,34 @@ die 'Cannot test Daemon.pm because t/daemonizes.pl is not working' PerconaTest::wait_for_files($pid_file); my $output = `ps wx | grep '$cmd 5' | grep -v grep`; -like($output, qr/$cmd/, 'Daemonizes'); -ok(-f $pid_file, 'Creates PID file'); -my ($pid) = $output =~ /\s*(\d+)\s+/; +like( + $output, + qr/$cmd/, + 'Daemonizes' +); + +ok( + -f $pid_file, + 'Creates PID file' +); + +my ($pid) = $output =~ /^\s*(\d+)\s+/; $output = slurp_file($pid_file); -is($output, $pid, 'PID file has correct PID'); +chomp($output) if $output; + +is( + $output, + $pid, + 'PID file has correct PID' +); # Wait until the process goes away PerconaTest::wait_until(sub { !kill(0, $pid) }); -ok(! -f $pid_file, 'Removes PID file upon exit'); +ok( + ! -f $pid_file, + 'Removes PID file upon exit' +); # ############################################################################ # Check that STDOUT can be redirected @@ -59,10 +74,19 @@ rm_tmp_files(); system("$cmd 0 --daemonize --log $log_file"); PerconaTest::wait_for_files($log_file); -ok(-f $log_file, 'Log file exists'); + +ok( + -f $log_file, + 'Log file exists' +); $output = slurp_file($log_file); -like($output, qr/STDOUT\nSTDERR\n/, 'STDOUT and STDERR went to log file'); + +like( + $output, + qr/STDOUT\nSTDERR\n/, + 'STDOUT and STDERR went to log file' +); my $log_size = -s $log_file; PTDEVDEBUG && PerconaTest::_d('log size', $log_size); @@ -71,6 +95,7 @@ PTDEVDEBUG && PerconaTest::_d('log size', $log_size); system("$cmd 0 --daemonize --log $log_file"); PerconaTest::wait_until(sub { -s $log_file > $log_size }); $output = slurp_file($log_file); + like( $output, qr/STDOUT\nSTDERR\nSTDOUT\nSTDERR\n/, @@ -82,6 +107,7 @@ like( # ########################################################################## rm_tmp_files(); diag(`touch $pid_file`); + ok( -f $pid_file, 'PID file already exists' @@ -90,7 +116,7 @@ ok( $output = `$cmd 2 --daemonize --pid $pid_file 2>&1`; like( $output, - qr{The PID file $pid_file already exists}, + qr{PID file $pid_file exists}, 'Dies if PID file already exists' ); @@ -182,7 +208,7 @@ like( like( slurp_file($tempfile), - qr/$pid, is not running/, + qr/Overwriting PID file $pid_file because PID $pid is not running/, 'Says that old PID is not running (issue 419)' ); @@ -209,54 +235,55 @@ chomp($pid = slurp_file($pid_file)); $output = `$cmd 0 --daemonize --pid $pid_file 2>&1`; like( $output, - qr/$pid, is running/, + qr/PID file $pid_file exists and PID $pid is running/, 'Says that PID is running (issue 419)' ); -kill SIGKILL => $pid - if $pid; +if ( $pid ) { + kill 9, $pid; +} -sleep 1; +sleep 0.25; rm_tmp_files(); # ############################################################################# # Test auto-PID file removal without having to daemonize (for issue 391). # ############################################################################# +my $pid_file2 = "/tmp/pt-daemon-test.pid2.$PID"; { - @ARGV = qw(--pid /tmp/d2.pid); - $o->get_specs("$trunk/t/lib/samples/daemonizes.pl"); - $o->get_opts(); - my $d2 = new Daemon(o=>$o); - $d2->make_PID_file(); + my $d2 = Daemon->new( + pid_file => $pid_file2, + ); + $d2->run(); ok( - -f '/tmp/d2.pid', + -f $pid_file2, 'PID file for non-daemon exists' ); } # Since $d2 was locally scoped, it should have been destoryed by now. # This should have caused the PID file to be automatically removed. ok( - !-f '/tmpo/d2.pid', + !-f $pid_file2, 'PID file auto-removed for non-daemon' ); # We should still die if the PID file already exists, # even if we're not a daemon. { - `touch /tmp/d2.pid`; - @ARGV = qw(--pid /tmp/d2.pid); - $o->get_opts(); + diag(`touch $pid_file2`); eval { - my $d2 = new Daemon(o=>$o); # should die here actually - $d2->make_PID_file(); + my $d2 = Daemon->new( + pid_file => $pid_file2, + ); + $d2->run(); }; like( $EVAL_ERROR, - qr{PID file /tmp/d2.pid already exists}, + qr/PID file $pid_file2 exists/, 'Dies if PID file already exists for non-daemon' ); - diag(`rm -rf /tmp/d2.pid >/dev/null`); + unlink $pid_file2 if -f $pid_file2; } # ############################################################################# diff --git a/t/lib/HTTPMicro.t b/t/lib/HTTP/Micro.t similarity index 85% rename from t/lib/HTTPMicro.t rename to t/lib/HTTP/Micro.t index bf32e40c..703aff53 100644 --- a/t/lib/HTTPMicro.t +++ b/t/lib/HTTP/Micro.t @@ -11,7 +11,7 @@ use warnings FATAL => 'all'; use English qw(-no_match_vars); use Test::More; -use HTTPMicro; +use HTTP::Micro; local $EVAL_ERROR; eval { require HTTP::Tiny }; @@ -22,12 +22,12 @@ if ( $EVAL_ERROR ) { # Need a simple URL that won't try to do chunking. for my $test_url ( "http://www.percona.com/robots.txt", "https://v.percona.com" ) { my $tiny = HTTP::Tiny->new(max_redirect => 0)->request('GET', $test_url); - my $micro = HTTPMicro->new->request('GET', $test_url); + my $micro = HTTP::Micro->new->request('GET', $test_url); like( $micro->{content}, qr/^\Q$tiny->{content}/, - "HTTPMicro == HTTP::Tiny for $test_url" + "HTTP::Micro == HTTP::Tiny for $test_url" ); } diff --git a/t/lib/Percona/Toolkit.t b/t/lib/Percona/Toolkit.t index 74e2a562..7d72307a 100644 --- a/t/lib/Percona/Toolkit.t +++ b/t/lib/Percona/Toolkit.t @@ -8,10 +8,9 @@ BEGIN { use strict; use warnings FATAL => 'all'; +use English qw(-no_match_vars); use Test::More; -use IPC::Cmd qw(run can_run); - use PerconaTest; use Percona::Toolkit; diff --git a/t/lib/Percona/WebAPI/Client.t b/t/lib/Percona/WebAPI/Client.t new file mode 100644 index 00000000..5e365913 --- /dev/null +++ b/t/lib/Percona/WebAPI/Client.t @@ -0,0 +1,235 @@ +#!/usr/bin/env perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; +use JSON; +use File::Temp qw(tempdir); + +use Percona::Test; +use Percona::Test::Mock::UserAgent; +use Percona::WebAPI::Client; +use Percona::WebAPI::Resource::Agent; +use Percona::WebAPI::Resource::Config; +use Percona::WebAPI::Resource::Service; +use Percona::WebAPI::Resource::Task; + +Percona::Toolkit->import(qw(Dumper have_required_args)); +Percona::WebAPI::Representation->import(qw(as_json as_hashref)); + +# ############################################################################# +# Create a client with a mock user-agent. +# ############################################################################# + +my $json = JSON->new; +$json->allow_blessed([]); +$json->convert_blessed([]); + +my $ua = Percona::Test::Mock::UserAgent->new( + encode => sub { my $c = shift; return $json->encode($c || {}) }, +); + +my $client = eval { + Percona::WebAPI::Client->new( + api_key => '123', + ua => $ua, + ); +}; + +is( + $EVAL_ERROR, + '', + 'Create client' +) or die; + +# ############################################################################# +# First thing a client should do is get the entry links. +# ############################################################################# + +my $return_links = { # what the server returns + agents => '/agents', +}; + +$ua->{responses}->{get} = [ + { + content => { + links => $return_links, + } + }, +]; + +my $links = $client->get(link => $client->entry_link); + +is_deeply( + $links, + $return_links, + "Get entry links" +) or diag(Dumper($links)); + +is_deeply( + $ua->{requests}, + [ + 'GET https://api.tools.percona.com', + ], + "1 request, 1 GET" +) or diag(Dumper($ua->{requests})); + + +# ############################################################################# +# Second, a new client will POST an Agent for itself. The entry links +# should have an "agents" link. The server response is empty but the +# URI for the new Agent resource is given by the Location header. +# ############################################################################# + +my $agent = Percona::WebAPI::Resource::Agent->new( + id => '123', + hostname => 'host', +); + +$ua->{responses}->{post} = [ + { + headers => { 'Location' => 'agents/5' }, + content => '', + }, +]; + +my $uri = $client->post(resources => $agent, link => $links->{agents}); + +is( + $uri, + "agents/5", + "POST Agent, got Location URI" +); + +# ############################################################################# +# After successfully creating the new Agent, the client should fetch +# the new Agent resoruce which will have links to the next step: the +# agent's config. +# ############################################################################# + +$return_links = { + self => 'agents/5', + config => 'agents/5/config', +}; + +my $content = { + %{ as_hashref($agent) }, + links => $return_links, +}; + +$ua->{responses}->{get} = [ + { + headers => { 'X-Percona-Resource-Type' => 'Agent' }, + content => $content, + }, +]; + +# Re-using $agent, i.e. updating it with the actual, newly created +# Agent resource as returned by the server with links. +$agent = $client->get(link => $uri); + +# Need to use with_links=>1 here because by as_hashref() removes +# links by default because it's usually used to encode and send +# resources, and clients never send links; but here we're using +# it for testing. +is_deeply( + as_hashref($agent, with_links => 1), + $content, + "GET Agent with links" +) or diag(Dumper(as_hashref($agent, with_links => 1))); + +# ############################################################################# +# Now the agent can get its Config. +# ############################################################################# + +$return_links = { + self => 'agents/5/config', + services => 'agents/5/services', +}; + +my $return_config = Percona::WebAPI::Resource::Config->new( + ts => '100', + name => 'Default', + options => {}, + links => $return_links, +); + +$ua->{responses}->{get} = [ + { + headers => { 'X-Percona-Resource-Type' => 'Config' }, + content => as_hashref($return_config, with_links => 1), + }, +]; + +my $config = $client->get(link => $agent->links->{config}); + +is_deeply( + as_hashref($config, with_links => 1), + as_hashref($return_config, with_links => 1), + "GET Config" +) or diag(Dumper(as_hashref($config, with_links => 1))); + +# ############################################################################# +# Once an agent is configured, i.e. successfully gets a Config resource, +# its Config should have a services link which returns a list of Service +# resources, each with their own links. +# ############################################################################# + +$return_links = { + 'send_data' => '/query-monitor', +}; + +my $run0 = Percona::WebAPI::Resource::Task->new( + name => 'run-pqd', + number => '0', + program => 'pt-query-digest', + options => '--output json', + output => 'spool', +); + +my $svc0 = Percona::WebAPI::Resource::Service->new( + name => 'query-monitor', + run_schedule => '1 * * * *', + spool_schedule => '2 * * * *', + tasks => [ $run0 ], + links => $return_links, +); + +$ua->{responses}->{get} = [ + { + headers => { 'X-Percona-Resource-Type' => 'Service' }, + content => [ as_hashref($svc0, with_links => 1) ], + }, +]; + +my $services = $client->get(link => $config->links->{services}); + +is( + scalar @$services, + 1, + "Got 1 service" +); + +is_deeply( + as_hashref($services->[0], with_links => 1), + as_hashref($svc0, with_links => 1), + "GET Services" +) or diag(Dumper(as_hashref($services, with_links => 1))); + +is( + $services->[0]->links->{send_data}, + "/query-monitor", + "send_data link for Service" +); + +# ############################################################################# +# Done. +# ############################################################################# +done_testing; diff --git a/t/lib/Percona/WebAPI/Representation.t b/t/lib/Percona/WebAPI/Representation.t new file mode 100644 index 00000000..5e6e3c25 --- /dev/null +++ b/t/lib/Percona/WebAPI/Representation.t @@ -0,0 +1,51 @@ +#!/usr/bin/perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; + +use PerconaTest; +use Percona::Toolkit; +use Percona::WebAPI::Resource::Agent; +use Percona::WebAPI::Resource::Config; +use Percona::WebAPI::Representation; + +my $agent = Percona::WebAPI::Resource::Agent->new( + id => '123', + hostname => 'pt', + versions => { + Perl => '5.10.1', + }, +); + +is( + Percona::WebAPI::Representation::as_json($agent), + q/{"versions":{"Perl":"5.10.1"},"id":"123","hostname":"pt"}/, + "as_json" +); + +my $config = Percona::WebAPI::Resource::Config->new( + ts => '100', + name => 'Default', + options => { + 'check-interval' => 60, + }, +); + +is( + Percona::WebAPI::Representation::as_config($config), + "check-interval=60\n", + "as_config" +); + +# ############################################################################# +# Done. +# ############################################################################# +done_testing; diff --git a/t/lib/Safeguards.t b/t/lib/Safeguards.t new file mode 100644 index 00000000..43d6db5c --- /dev/null +++ b/t/lib/Safeguards.t @@ -0,0 +1,69 @@ +#!/usr/bin/perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; + +use Safeguards; +use Percona::Test; + +use Data::Dumper; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +my $sample = "t/lib/samples/bash/"; + +my $safeguards = Safeguards->new( + disk_bytes_free => 104857600, + disk_pct_free => 10, +); + +# Filesystem 1024-blocks Used Available Capacity Mounted on +# /dev/disk0s2 118153176 94409664 23487512 81% / +# +# Those values are in Kb, so: +# used = 94409664 (94.4G) = 96_675_495_936 bytes +# free = 23487512 (23.4G) = 24_051_212_288 bytes +# pct free = 100 - 81 = 19 % +my $df = slurp_file("$trunk/$sample/diskspace001.txt"); + +ok( + $safeguards->check_disk_space( + disk_space => $df, + ), + "diskspace001: Enough bytes and pct free" +); + +$safeguards = Safeguards->new( + disk_bytes_free => 104857600, + disk_pct_free => 20, +); + +ok( + !$safeguards->check_disk_space( + disk_space => $df, + ), + "diskspace001: Not enough pct free" +); + +$safeguards = Safeguards->new( + disk_bytes_free => 24_051_212_289, + disk_pct_free => 5, +); + +ok( + !$safeguards->check_disk_space( + disk_space => $df, + ), + "diskspace001: Not enough bytes free" +); + +done_testing; diff --git a/t/lib/samples/daemonizes.pl b/t/lib/samples/daemonizes.pl index b9319487..2f6c705a 100755 --- a/t/lib/samples/daemonizes.pl +++ b/t/lib/samples/daemonizes.pl @@ -12,7 +12,9 @@ BEGIN { use strict; use warnings FATAL => 'all'; use English qw(-no_match_vars); -use constant PTDEVDEBUG => $ENV{PTDEVDEBUG}; + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; +use constant PTDEVDEBUG => $ENV{PTDEVDEBUG} || 0; use Time::HiRes qw(sleep); @@ -31,22 +33,20 @@ if ( !defined $sleep_time ) { $o->usage_or_errors(); -my $daemon; -if ( $o->get('daemonize') ) { - PTDEVDEBUG && PerconaTest::_d('daemonizing'); +my $daemon = Daemon->new( + daemonize => $o->get('daemonize'), + pid_file => $o->get('pid'), + log_file => $o->get('log'), +); - $OUTPUT_AUTOFLUSH = 1; +$daemon->run(); +PTDEVDEBUG && PerconaTest::_d('daemonized'); - $daemon = new Daemon(o=>$o); - $daemon->daemonize(); - PTDEVDEBUG && PerconaTest::_d('daemonized'); +print "STDOUT\n"; +print STDERR "STDERR\n"; - print "STDOUT\n"; - print STDERR "STDERR\n"; - - PTDEVDEBUG && PerconaTest::_d('daemon sleep', $sleep_time); - sleep $sleep_time; -} +PTDEVDEBUG && PerconaTest::_d('daemon sleep', $sleep_time); +sleep $sleep_time; PTDEVDEBUG && PerconaTest::_d('daemon done'); exit; diff --git a/t/lib/samples/slowlogs/slow057.txt b/t/lib/samples/slowlogs/slow057.txt new file mode 100644 index 00000000..bf44a6be --- /dev/null +++ b/t/lib/samples/slowlogs/slow057.txt @@ -0,0 +1,8835 @@ +# Time: 130111 08:00:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.376000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5138; +# Time: 130111 08:00:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.838000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1982; +# Time: 130111 08:00:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.904000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4909; +# Time: 130111 08:00:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.048000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4580; +# Time: 130111 08:00:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.642000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2037; +# Time: 130111 08:00:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.389000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1210; +# Time: 130111 08:00:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.214000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5858; +# Time: 130111 08:00:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.493000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9572; +# Time: 130111 08:00:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.885000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1824; +# Time: 130111 08:00:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.086000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9207; +# Time: 130111 08:00:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.477000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2429; +# Time: 130111 08:00:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.323000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4937); +# Time: 130111 08:00:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.849000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3441); +# Time: 130111 08:00:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.858000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 681; +# Time: 130111 08:00:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.294000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1837; +# Time: 130111 08:00:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.506000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1674; +# Time: 130111 08:00:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.627000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7071; +# Time: 130111 08:00:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.893000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5450; +# Time: 130111 08:00:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.465000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5706; +# Time: 130111 08:00:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.807000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9413); +# Time: 130111 08:00:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.198000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8278; +# Time: 130111 08:00:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.371000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1408; +# Time: 130111 08:00:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.095000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4149; +# Time: 130111 08:00:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.106000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7874; +# Time: 130111 08:00:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.598000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2946; +# Time: 130111 08:00:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.291000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4895; +# Time: 130111 08:00:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.776000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6629; +# Time: 130111 08:00:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.109000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6633; +# Time: 130111 08:01:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.769000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6127; +# Time: 130111 08:01:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.254000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4672; +# Time: 130111 08:01:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.772000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4382; +# Time: 130111 08:01:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.220000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 918; +# Time: 130111 08:01:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.251000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8372; +# Time: 130111 08:01:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.156000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2438); +# Time: 130111 08:01:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.616000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4545); +# Time: 130111 08:01:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.630000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7456; +# Time: 130111 08:01:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.652000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6927); +# Time: 130111 08:01:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.336000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1238; +# Time: 130111 08:01:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.699000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6383); +# Time: 130111 08:01:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.960000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 490; +# Time: 130111 08:01:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.346000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1424; +# Time: 130111 08:01:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.877000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6984; +# Time: 130111 08:01:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.725000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4081; +# Time: 130111 08:01:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.848000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 338); +# Time: 130111 08:01:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.943000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2317; +# Time: 130111 08:01:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.342000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 213); +# Time: 130111 08:01:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.932000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6020; +# Time: 130111 08:01:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.157000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9114); +# Time: 130111 08:01:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.680000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9475; +# Time: 130111 08:01:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.297000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9130; +# Time: 130111 08:01:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.282000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3922; +# Time: 130111 08:01:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.543000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4388); +# Time: 130111 08:01:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.593000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7474; +# Time: 130111 08:01:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.359000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3103; +# Time: 130111 08:01:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.834000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6055; +# Time: 130111 08:01:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.924000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8297); +# Time: 130111 08:01:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.916000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 944; +# Time: 130111 08:02:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.265000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4638; +# Time: 130111 08:02:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.610000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3617; +# Time: 130111 08:02:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.635000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6146; +# Time: 130111 08:02:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.892000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8353; +# Time: 130111 08:02:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.513000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7256); +# Time: 130111 08:02:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.607000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2675); +# Time: 130111 08:02:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.639000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3081; +# Time: 130111 08:02:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.996000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8666); +# Time: 130111 08:02:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.658000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4589; +# Time: 130111 08:02:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.204000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9243; +# Time: 130111 08:02:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.893000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2496; +# Time: 130111 08:02:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.004000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9846; +# Time: 130111 08:02:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.951000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8270); +# Time: 130111 08:02:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.168000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7109); +# Time: 130111 08:02:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.374000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2282; +# Time: 130111 08:02:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.267000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 529; +# Time: 130111 08:02:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.723000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2420; +# Time: 130111 08:02:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.594000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5751; +# Time: 130111 08:02:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.024000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9471; +# Time: 130111 08:02:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.549000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6052; +# Time: 130111 08:02:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.012000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8184; +# Time: 130111 08:02:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.962000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3483; +# Time: 130111 08:02:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.971000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2261); +# Time: 130111 08:02:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.411000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6860; +# Time: 130111 08:02:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.053000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7768; +# Time: 130111 08:02:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.729000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4353; +# Time: 130111 08:02:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.045000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2041; +# Time: 130111 08:02:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.977000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6770; +# Time: 130111 08:02:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.180000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 810; +# Time: 130111 08:02:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.476000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2815; +# Time: 130111 08:02:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.370000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2855; +# Time: 130111 08:02:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.091000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5050; +# Time: 130111 08:02:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.045000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9791; +# Time: 130111 08:02:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.736000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8061; +# Time: 130111 08:02:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.026000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8195; +# Time: 130111 08:02:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.740000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9041; +# Time: 130111 08:02:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.794000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5191; +# Time: 130111 08:03:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.468000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4148; +# Time: 130111 08:03:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.673000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2592); +# Time: 130111 08:03:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.574000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8230; +# Time: 130111 08:03:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.026000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3995; +# Time: 130111 08:03:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.009000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6292; +# Time: 130111 08:03:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.165000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2403; +# Time: 130111 08:03:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.562000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3221; +# Time: 130111 08:03:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.874000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6212; +# Time: 130111 08:03:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.094000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5653); +# Time: 130111 08:03:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.956000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5645; +# Time: 130111 08:03:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.668000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1964; +# Time: 130111 08:03:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.310000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9164; +# Time: 130111 08:03:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.321000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 96; +# Time: 130111 08:03:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.385000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 253; +# Time: 130111 08:03:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.045000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8891; +# Time: 130111 08:03:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.040000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2522; +# Time: 130111 08:03:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.846000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3162); +# Time: 130111 08:03:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.924000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1754; +# Time: 130111 08:03:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.631000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6143); +# Time: 130111 08:03:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.100000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9337; +# Time: 130111 08:03:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.209000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7992; +# Time: 130111 08:03:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.200000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6595; +# Time: 130111 08:03:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.245000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9290; +# Time: 130111 08:03:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.986000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 655); +# Time: 130111 08:03:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.497000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4684; +# Time: 130111 08:03:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.290000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6379); +# Time: 130111 08:03:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.700000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4648); +# Time: 130111 08:03:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.221000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 650; +# Time: 130111 08:03:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.195000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5744); +# Time: 130111 08:04:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.818000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3336; +# Time: 130111 08:04:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.066000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1794); +# Time: 130111 08:04:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.290000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6935); +# Time: 130111 08:04:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.287000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9600; +# Time: 130111 08:04:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.965000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1890; +# Time: 130111 08:04:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.118000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6295; +# Time: 130111 08:04:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.167000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2410; +# Time: 130111 08:04:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.331000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3837; +# Time: 130111 08:04:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.058000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9589; +# Time: 130111 08:04:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.288000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3209); +# Time: 130111 08:04:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.180000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6749; +# Time: 130111 08:04:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.285000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1401; +# Time: 130111 08:04:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.156000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8947; +# Time: 130111 08:04:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.688000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3894; +# Time: 130111 08:04:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.141000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4746); +# Time: 130111 08:04:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.098000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3368; +# Time: 130111 08:04:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.427000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1550; +# Time: 130111 08:04:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.875000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 868; +# Time: 130111 08:04:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.289000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8171; +# Time: 130111 08:04:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.083000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 855; +# Time: 130111 08:04:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.996000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2491; +# Time: 130111 08:04:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.942000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8703; +# Time: 130111 08:04:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.829000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1628; +# Time: 130111 08:04:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.596000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1773; +# Time: 130111 08:04:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.362000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1069; +# Time: 130111 08:04:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.137000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9211; +# Time: 130111 08:04:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.606000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3166; +# Time: 130111 08:05:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.733000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3798; +# Time: 130111 08:05:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.510000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6308; +# Time: 130111 08:05:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.231000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2546; +# Time: 130111 08:05:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.752000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7910; +# Time: 130111 08:05:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.563000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 756; +# Time: 130111 08:05:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.862000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2786; +# Time: 130111 08:05:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.876000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4735); +# Time: 130111 08:05:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.141000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6062; +# Time: 130111 08:05:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.974000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3342); +# Time: 130111 08:05:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.559000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5567; +# Time: 130111 08:05:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.313000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5859; +# Time: 130111 08:05:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.604000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 346); +# Time: 130111 08:05:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.183000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1080; +# Time: 130111 08:05:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.298000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1431; +# Time: 130111 08:05:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.138000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2030; +# Time: 130111 08:05:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.066000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 760); +# Time: 130111 08:05:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.378000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2845; +# Time: 130111 08:05:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.158000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7253; +# Time: 130111 08:05:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.337000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5498; +# Time: 130111 08:05:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.257000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2975; +# Time: 130111 08:05:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.383000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7358; +# Time: 130111 08:05:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.441000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1477; +# Time: 130111 08:05:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.208000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5536; +# Time: 130111 08:05:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.026000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1831; +# Time: 130111 08:05:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.019000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5453; +# Time: 130111 08:05:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.535000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8583; +# Time: 130111 08:05:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.050000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6754; +# Time: 130111 08:05:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.068000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 808; +# Time: 130111 08:05:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.831000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6218; +# Time: 130111 08:05:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.634000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1621); +# Time: 130111 08:05:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.980000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5856; +# Time: 130111 08:06:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.916000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6160; +# Time: 130111 08:06:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.293000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3328; +# Time: 130111 08:06:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.447000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2584; +# Time: 130111 08:06:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.963000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7814; +# Time: 130111 08:06:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.525000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1586; +# Time: 130111 08:06:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.046000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9623; +# Time: 130111 08:06:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.699000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 43); +# Time: 130111 08:06:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.483000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4051); +# Time: 130111 08:06:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.528000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6008; +# Time: 130111 08:06:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.701000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9394); +# Time: 130111 08:06:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.670000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1176; +# Time: 130111 08:06:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.693000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6980); +# Time: 130111 08:06:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.292000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7087; +# Time: 130111 08:06:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.213000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6835); +# Time: 130111 08:06:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.427000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3677; +# Time: 130111 08:06:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.977000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8252; +# Time: 130111 08:06:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.341000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2675; +# Time: 130111 08:06:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.995000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6950; +# Time: 130111 08:06:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.928000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1947; +# Time: 130111 08:06:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.081000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8638; +# Time: 130111 08:06:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.187000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4204; +# Time: 130111 08:06:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.016000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9113); +# Time: 130111 08:06:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.112000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4017; +# Time: 130111 08:06:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.525000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 553); +# Time: 130111 08:06:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.074000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8776; +# Time: 130111 08:06:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.384000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1271); +# Time: 130111 08:06:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.461000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6001; +# Time: 130111 08:06:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.856000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3862; +# Time: 130111 08:06:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.915000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6268; +# Time: 130111 08:06:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.149000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2444; +# Time: 130111 08:07:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.753000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8531; +# Time: 130111 08:07:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.467000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1379); +# Time: 130111 08:07:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.556000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4142; +# Time: 130111 08:07:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.083000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7725; +# Time: 130111 08:07:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.542000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1928; +# Time: 130111 08:07:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.959000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4960; +# Time: 130111 08:07:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.669000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9848; +# Time: 130111 08:07:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.608000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8775; +# Time: 130111 08:07:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.498000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7025); +# Time: 130111 08:07:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.490000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2033; +# Time: 130111 08:07:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.257000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3638); +# Time: 130111 08:07:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.852000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1219); +# Time: 130111 08:07:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.711000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5522; +# Time: 130111 08:07:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.058000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 582; +# Time: 130111 08:07:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.783000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 599); +# Time: 130111 08:07:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.951000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 146); +# Time: 130111 08:07:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.882000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5134); +# Time: 130111 08:07:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.226000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 997; +# Time: 130111 08:07:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.762000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5101; +# Time: 130111 08:07:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.754000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8122); +# Time: 130111 08:07:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.020000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7062; +# Time: 130111 08:07:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.110000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7331; +# Time: 130111 08:07:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.692000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5447; +# Time: 130111 08:07:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.779000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5472); +# Time: 130111 08:07:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.096000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 325; +# Time: 130111 08:07:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.291000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3743); +# Time: 130111 08:07:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.235000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4472); +# Time: 130111 08:07:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.664000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 753; +# Time: 130111 08:08:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.102000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4195; +# Time: 130111 08:08:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.688000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6740; +# Time: 130111 08:08:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.511000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1024; +# Time: 130111 08:08:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.521000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9601); +# Time: 130111 08:08:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.022000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7756; +# Time: 130111 08:08:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.396000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3267); +# Time: 130111 08:08:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.304000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8333; +# Time: 130111 08:08:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.291000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8769; +# Time: 130111 08:08:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.528000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5086; +# Time: 130111 08:08:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.041000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9004; +# Time: 130111 08:08:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.906000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9773); +# Time: 130111 08:08:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.736000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7840; +# Time: 130111 08:08:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.502000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7714; +# Time: 130111 08:08:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.646000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 582; +# Time: 130111 08:08:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.847000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1528; +# Time: 130111 08:08:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.056000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3858); +# Time: 130111 08:08:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.508000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1768; +# Time: 130111 08:08:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.687000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4004; +# Time: 130111 08:08:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.702000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2741; +# Time: 130111 08:08:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.161000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 704); +# Time: 130111 08:08:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.076000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 558); +# Time: 130111 08:08:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.474000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5904; +# Time: 130111 08:08:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.093000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8456; +# Time: 130111 08:08:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.624000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6281; +# Time: 130111 08:08:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.700000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8660; +# Time: 130111 08:08:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.035000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4354; +# Time: 130111 08:08:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.616000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5026; +# Time: 130111 08:08:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.367000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8174; +# Time: 130111 08:09:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.718000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6167; +# Time: 130111 08:09:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.967000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8819); +# Time: 130111 08:09:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.648000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1797); +# Time: 130111 08:09:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.189000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4106; +# Time: 130111 08:09:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.932000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5497; +# Time: 130111 08:09:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.117000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2586; +# Time: 130111 08:09:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.015000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3505; +# Time: 130111 08:09:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.930000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8292; +# Time: 130111 08:09:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.037000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9318; +# Time: 130111 08:09:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.862000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1649; +# Time: 130111 08:09:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.001000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8478; +# Time: 130111 08:09:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.692000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4912; +# Time: 130111 08:09:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.488000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8448; +# Time: 130111 08:09:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.000000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6646; +# Time: 130111 08:09:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.551000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4411; +# Time: 130111 08:09:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.359000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 561; +# Time: 130111 08:09:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.667000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 141; +# Time: 130111 08:09:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.557000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2022; +# Time: 130111 08:09:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.651000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1559; +# Time: 130111 08:09:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.568000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9130; +# Time: 130111 08:09:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.211000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3967); +# Time: 130111 08:09:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.848000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2307); +# Time: 130111 08:09:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.487000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8661; +# Time: 130111 08:09:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.662000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 896); +# Time: 130111 08:09:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.009000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2139); +# Time: 130111 08:09:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.191000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6802; +# Time: 130111 08:09:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.445000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 88; +# Time: 130111 08:09:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.044000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6293; +# Time: 130111 08:09:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.123000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9910; +# Time: 130111 08:09:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.346000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3539); +# Time: 130111 08:09:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.801000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5124); +# Time: 130111 08:09:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.837000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2582; +# Time: 130111 08:09:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.629000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2475; +# Time: 130111 08:09:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.265000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8098; +# Time: 130111 08:10:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.874000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6317; +# Time: 130111 08:10:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.988000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6565; +# Time: 130111 08:10:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.887000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5385); +# Time: 130111 08:10:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.821000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 165; +# Time: 130111 08:10:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.224000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8766; +# Time: 130111 08:10:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.789000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7766; +# Time: 130111 08:10:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.947000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6790; +# Time: 130111 08:10:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.485000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4823; +# Time: 130111 08:10:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.861000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4993; +# Time: 130111 08:10:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.465000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4832; +# Time: 130111 08:10:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.343000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3989); +# Time: 130111 08:10:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.601000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3871; +# Time: 130111 08:10:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.087000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2732; +# Time: 130111 08:10:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.975000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3729; +# Time: 130111 08:10:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.293000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7768; +# Time: 130111 08:10:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.401000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7745; +# Time: 130111 08:10:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.143000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7425; +# Time: 130111 08:10:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.523000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7152; +# Time: 130111 08:10:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.624000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 118; +# Time: 130111 08:10:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.324000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7315; +# Time: 130111 08:10:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.451000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4661; +# Time: 130111 08:10:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.003000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3352; +# Time: 130111 08:10:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.760000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 688; +# Time: 130111 08:10:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.710000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8734; +# Time: 130111 08:10:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.256000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8815; +# Time: 130111 08:10:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.878000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 557; +# Time: 130111 08:10:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.738000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9853; +# Time: 130111 08:10:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.386000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5924; +# Time: 130111 08:10:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.035000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2519; +# Time: 130111 08:10:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.104000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 847); +# Time: 130111 08:10:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.826000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4363; +# Time: 130111 08:10:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.429000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8939; +# Time: 130111 08:10:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.837000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5209); +# Time: 130111 08:10:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.945000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1974; +# Time: 130111 08:10:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.542000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6175; +# Time: 130111 08:10:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.104000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9765; +# Time: 130111 08:10:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.384000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7577; +# Time: 130111 08:10:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.229000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2256); +# Time: 130111 08:10:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.784000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6748); +# Time: 130111 08:11:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.870000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4039; +# Time: 130111 08:11:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.580000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1758; +# Time: 130111 08:11:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.867000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 795; +# Time: 130111 08:11:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.952000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2419); +# Time: 130111 08:11:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.418000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2276; +# Time: 130111 08:11:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.934000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6285; +# Time: 130111 08:11:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.879000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7240; +# Time: 130111 08:11:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.741000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7967); +# Time: 130111 08:11:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.233000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3884; +# Time: 130111 08:11:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.366000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1559; +# Time: 130111 08:11:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.963000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9180; +# Time: 130111 08:11:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.978000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5403; +# Time: 130111 08:11:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.027000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7243; +# Time: 130111 08:11:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.951000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 920; +# Time: 130111 08:11:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.909000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1350); +# Time: 130111 08:11:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.883000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4592); +# Time: 130111 08:11:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.939000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2605; +# Time: 130111 08:11:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.712000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8624; +# Time: 130111 08:11:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.868000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1036; +# Time: 130111 08:11:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.056000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6731; +# Time: 130111 08:11:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.668000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3101; +# Time: 130111 08:11:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.641000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8162; +# Time: 130111 08:11:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.970000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7704; +# Time: 130111 08:11:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.563000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1674; +# Time: 130111 08:11:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.824000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8052); +# Time: 130111 08:11:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.752000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4762; +# Time: 130111 08:11:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.710000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6010; +# Time: 130111 08:11:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.786000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3717; +# Time: 130111 08:11:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.292000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4590; +# Time: 130111 08:12:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.470000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8972); +# Time: 130111 08:12:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.402000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8414; +# Time: 130111 08:12:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.615000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7552; +# Time: 130111 08:12:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.419000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3115); +# Time: 130111 08:12:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.714000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6903; +# Time: 130111 08:12:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.644000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7563; +# Time: 130111 08:12:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.704000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7594; +# Time: 130111 08:12:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.391000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8736; +# Time: 130111 08:12:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.340000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6881; +# Time: 130111 08:12:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.769000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2213; +# Time: 130111 08:12:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.465000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 977; +# Time: 130111 08:12:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.325000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2377; +# Time: 130111 08:12:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.867000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4147; +# Time: 130111 08:12:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.480000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1329; +# Time: 130111 08:12:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.640000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 47; +# Time: 130111 08:12:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.611000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1825; +# Time: 130111 08:12:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.112000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8948; +# Time: 130111 08:12:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.635000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6041; +# Time: 130111 08:12:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.891000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 473; +# Time: 130111 08:12:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.861000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8569; +# Time: 130111 08:12:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.078000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 127; +# Time: 130111 08:12:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.051000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7387; +# Time: 130111 08:12:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.558000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9748; +# Time: 130111 08:12:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.252000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8658; +# Time: 130111 08:12:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.275000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1268; +# Time: 130111 08:12:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.506000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2974; +# Time: 130111 08:12:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.880000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7195; +# Time: 130111 08:12:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.569000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5767; +# Time: 130111 08:12:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.160000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1042; +# Time: 130111 08:12:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.236000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8721; +# Time: 130111 08:12:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.294000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6870); +# Time: 130111 08:12:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.032000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3544; +# Time: 130111 08:12:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.895000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 313; +# Time: 130111 08:13:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.794000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5140; +# Time: 130111 08:13:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.519000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8237); +# Time: 130111 08:13:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.163000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7884; +# Time: 130111 08:13:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.416000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7614; +# Time: 130111 08:13:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.196000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5525; +# Time: 130111 08:13:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.106000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9561); +# Time: 130111 08:13:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.997000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7745; +# Time: 130111 08:13:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.985000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1467; +# Time: 130111 08:13:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.853000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9205; +# Time: 130111 08:13:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.697000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4287); +# Time: 130111 08:13:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.839000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 843; +# Time: 130111 08:13:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.172000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5733; +# Time: 130111 08:13:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.666000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4129; +# Time: 130111 08:13:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.927000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2693; +# Time: 130111 08:13:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.407000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 448; +# Time: 130111 08:13:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.114000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3013); +# Time: 130111 08:13:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.805000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1732; +# Time: 130111 08:13:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.394000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6367); +# Time: 130111 08:13:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.112000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3256); +# Time: 130111 08:13:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.956000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8237; +# Time: 130111 08:13:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.395000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2975; +# Time: 130111 08:13:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.205000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1924; +# Time: 130111 08:13:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.577000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4196; +# Time: 130111 08:13:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.619000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6541; +# Time: 130111 08:13:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.122000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 323; +# Time: 130111 08:13:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.102000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4220; +# Time: 130111 08:13:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.265000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5039; +# Time: 130111 08:13:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.662000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2912); +# Time: 130111 08:14:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.945000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 335; +# Time: 130111 08:14:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.893000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3650); +# Time: 130111 08:14:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.610000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 970; +# Time: 130111 08:14:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.753000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7694); +# Time: 130111 08:14:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.943000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2693; +# Time: 130111 08:14:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.918000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1114; +# Time: 130111 08:14:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.434000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3254; +# Time: 130111 08:14:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.895000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2033; +# Time: 130111 08:14:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.363000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8434; +# Time: 130111 08:14:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.327000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1904; +# Time: 130111 08:14:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.363000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 157; +# Time: 130111 08:14:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.183000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8513); +# Time: 130111 08:14:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.487000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9252; +# Time: 130111 08:14:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.086000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9282; +# Time: 130111 08:14:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.501000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5099; +# Time: 130111 08:14:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.704000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7548); +# Time: 130111 08:14:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.775000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9906; +# Time: 130111 08:14:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.492000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2354; +# Time: 130111 08:14:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.894000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 23); +# Time: 130111 08:14:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.088000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5609; +# Time: 130111 08:14:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.545000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1860; +# Time: 130111 08:14:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.399000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2052; +# Time: 130111 08:14:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.252000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5734; +# Time: 130111 08:14:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.987000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7422; +# Time: 130111 08:14:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.040000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1680; +# Time: 130111 08:14:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.487000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4610; +# Time: 130111 08:14:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.087000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3251); +# Time: 130111 08:14:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.148000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9739; +# Time: 130111 08:14:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.306000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5661); +# Time: 130111 08:14:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.143000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6524; +# Time: 130111 08:15:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.690000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5932); +# Time: 130111 08:15:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.308000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2126; +# Time: 130111 08:15:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.662000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3035; +# Time: 130111 08:15:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.006000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1991; +# Time: 130111 08:15:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.318000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5447); +# Time: 130111 08:15:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.908000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4645; +# Time: 130111 08:15:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.196000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1398; +# Time: 130111 08:15:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.381000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1201; +# Time: 130111 08:15:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.207000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 225); +# Time: 130111 08:15:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.745000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7920; +# Time: 130111 08:15:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.987000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6305); +# Time: 130111 08:15:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.888000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7753; +# Time: 130111 08:15:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.336000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4584; +# Time: 130111 08:15:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.920000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1694; +# Time: 130111 08:15:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.701000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8142; +# Time: 130111 08:15:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.853000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7732; +# Time: 130111 08:15:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.043000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 268; +# Time: 130111 08:15:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.439000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5913; +# Time: 130111 08:15:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.852000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9151; +# Time: 130111 08:15:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.382000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9623); +# Time: 130111 08:15:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.193000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 209); +# Time: 130111 08:15:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.777000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8288; +# Time: 130111 08:15:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.402000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1080); +# Time: 130111 08:15:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.446000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9002; +# Time: 130111 08:15:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.989000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1032); +# Time: 130111 08:15:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.362000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 388; +# Time: 130111 08:15:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.768000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5055); +# Time: 130111 08:16:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.373000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 22; +# Time: 130111 08:16:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.253000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8354; +# Time: 130111 08:16:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.459000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7004; +# Time: 130111 08:16:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.327000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5671; +# Time: 130111 08:16:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.051000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1526); +# Time: 130111 08:16:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.970000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7755); +# Time: 130111 08:16:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.037000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3634; +# Time: 130111 08:16:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.682000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2398; +# Time: 130111 08:16:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.148000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2422; +# Time: 130111 08:16:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.732000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4476; +# Time: 130111 08:16:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.289000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2587); +# Time: 130111 08:16:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.163000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1242; +# Time: 130111 08:16:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.181000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9055; +# Time: 130111 08:16:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.260000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8318); +# Time: 130111 08:16:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.031000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6274; +# Time: 130111 08:16:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.519000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5368; +# Time: 130111 08:16:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.484000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8340; +# Time: 130111 08:16:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.326000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6655; +# Time: 130111 08:16:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.081000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9728; +# Time: 130111 08:16:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.510000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 461; +# Time: 130111 08:16:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.291000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8360; +# Time: 130111 08:16:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.468000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3206; +# Time: 130111 08:16:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.719000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7279; +# Time: 130111 08:16:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.790000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2813; +# Time: 130111 08:16:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.175000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7493; +# Time: 130111 08:16:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.332000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1454; +# Time: 130111 08:16:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.775000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5221; +# Time: 130111 08:16:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.476000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9175; +# Time: 130111 08:16:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.019000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4247; +# Time: 130111 08:16:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.035000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9087; +# Time: 130111 08:16:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.098000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6315; +# Time: 130111 08:16:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.944000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9410); +# Time: 130111 08:16:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.505000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2989; +# Time: 130111 08:16:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.843000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8050; +# Time: 130111 08:16:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.885000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4357); +# Time: 130111 08:16:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.898000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3007; +# Time: 130111 08:17:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.502000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9031; +# Time: 130111 08:17:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.143000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3738; +# Time: 130111 08:17:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.792000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5199; +# Time: 130111 08:17:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.949000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9700; +# Time: 130111 08:17:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.491000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 915; +# Time: 130111 08:17:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.995000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6736; +# Time: 130111 08:17:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.701000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3410; +# Time: 130111 08:17:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.888000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7404; +# Time: 130111 08:17:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.039000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7400; +# Time: 130111 08:17:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.864000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9334; +# Time: 130111 08:17:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.461000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4554; +# Time: 130111 08:17:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.548000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7354; +# Time: 130111 08:17:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.851000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1134; +# Time: 130111 08:17:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.480000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4848; +# Time: 130111 08:17:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.886000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9173); +# Time: 130111 08:17:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.811000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1281); +# Time: 130111 08:17:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.811000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9758); +# Time: 130111 08:17:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.440000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7442); +# Time: 130111 08:17:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.574000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1336; +# Time: 130111 08:17:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.490000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7158; +# Time: 130111 08:17:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.422000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6720; +# Time: 130111 08:17:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.864000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9206; +# Time: 130111 08:17:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.523000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5606; +# Time: 130111 08:17:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.150000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7390); +# Time: 130111 08:17:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.865000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3816; +# Time: 130111 08:17:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.722000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 13; +# Time: 130111 08:17:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.466000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2509; +# Time: 130111 08:17:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.338000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5591; +# Time: 130111 08:17:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.956000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4692; +# Time: 130111 08:18:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.014000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6224; +# Time: 130111 08:18:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.349000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9630; +# Time: 130111 08:18:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.666000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5317; +# Time: 130111 08:18:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.714000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7947); +# Time: 130111 08:18:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.157000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1067; +# Time: 130111 08:18:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.031000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2540; +# Time: 130111 08:18:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.026000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7083; +# Time: 130111 08:18:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.763000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1196; +# Time: 130111 08:18:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.871000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6590; +# Time: 130111 08:18:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.734000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8463); +# Time: 130111 08:18:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.067000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2167; +# Time: 130111 08:18:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.332000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 169; +# Time: 130111 08:18:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.380000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8078; +# Time: 130111 08:18:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.470000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1319; +# Time: 130111 08:18:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.536000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2940; +# Time: 130111 08:18:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.346000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6992; +# Time: 130111 08:18:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.561000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9400; +# Time: 130111 08:18:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.205000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5971); +# Time: 130111 08:18:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.251000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 586; +# Time: 130111 08:18:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.215000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9030; +# Time: 130111 08:18:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.457000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9155; +# Time: 130111 08:18:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.630000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9840); +# Time: 130111 08:19:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.125000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 151); +# Time: 130111 08:19:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.847000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4006; +# Time: 130111 08:19:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.104000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9366); +# Time: 130111 08:19:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.196000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8870); +# Time: 130111 08:19:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.915000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7852; +# Time: 130111 08:19:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.720000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9090; +# Time: 130111 08:19:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.328000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4923); +# Time: 130111 08:19:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.120000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4431; +# Time: 130111 08:19:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.764000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7942; +# Time: 130111 08:19:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.137000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3404; +# Time: 130111 08:19:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.018000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3826; +# Time: 130111 08:19:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.875000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6013; +# Time: 130111 08:19:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.183000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4824); +# Time: 130111 08:19:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.120000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3733; +# Time: 130111 08:19:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.148000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2395; +# Time: 130111 08:19:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.244000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2366; +# Time: 130111 08:19:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.511000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5609); +# Time: 130111 08:19:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.782000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6766; +# Time: 130111 08:19:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.645000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7281; +# Time: 130111 08:19:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.258000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 643; +# Time: 130111 08:19:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.978000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6833; +# Time: 130111 08:19:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.498000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8397; +# Time: 130111 08:19:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.945000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6613; +# Time: 130111 08:19:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.911000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 135; +# Time: 130111 08:19:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.610000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1874; +# Time: 130111 08:19:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.660000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6871); +# Time: 130111 08:19:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.943000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 959; +# Time: 130111 08:19:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.100000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2178; +# Time: 130111 08:19:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.642000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 446; +# Time: 130111 08:19:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.206000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9811); +# Time: 130111 08:19:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.069000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2433; +# Time: 130111 08:19:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.648000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5827; +# Time: 130111 08:19:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.318000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4770; +# Time: 130111 08:19:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.014000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 880); +# Time: 130111 08:20:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.349000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9628; +# Time: 130111 08:20:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.988000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8913); +# Time: 130111 08:20:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.997000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9969); +# Time: 130111 08:20:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.266000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9385; +# Time: 130111 08:20:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.565000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7614; +# Time: 130111 08:20:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.604000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2321; +# Time: 130111 08:20:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.792000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8247; +# Time: 130111 08:20:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.861000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1755); +# Time: 130111 08:20:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.081000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9346; +# Time: 130111 08:20:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.892000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7014; +# Time: 130111 08:20:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.248000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1696; +# Time: 130111 08:20:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.398000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9790; +# Time: 130111 08:20:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.800000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8390; +# Time: 130111 08:20:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.056000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4040; +# Time: 130111 08:20:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.433000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7679; +# Time: 130111 08:20:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.673000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4399; +# Time: 130111 08:20:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.160000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3755; +# Time: 130111 08:20:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.213000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 885; +# Time: 130111 08:20:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.283000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3498; +# Time: 130111 08:20:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.884000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9259; +# Time: 130111 08:20:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.163000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3828); +# Time: 130111 08:20:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.073000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7571); +# Time: 130111 08:20:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.883000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2956; +# Time: 130111 08:20:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.255000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1164; +# Time: 130111 08:20:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.373000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9804; +# Time: 130111 08:20:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.790000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3614; +# Time: 130111 08:20:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.491000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1925; +# Time: 130111 08:20:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.707000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8989); +# Time: 130111 08:20:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.443000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3987; +# Time: 130111 08:21:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.135000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6466; +# Time: 130111 08:21:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.377000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8994); +# Time: 130111 08:21:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.223000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4871); +# Time: 130111 08:21:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.332000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8552; +# Time: 130111 08:21:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.970000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2247); +# Time: 130111 08:21:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.198000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5800; +# Time: 130111 08:21:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.316000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3312; +# Time: 130111 08:21:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.695000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1997; +# Time: 130111 08:21:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.332000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6244; +# Time: 130111 08:21:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.930000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7064; +# Time: 130111 08:21:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.495000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1393; +# Time: 130111 08:21:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.620000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2413; +# Time: 130111 08:21:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.413000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5086; +# Time: 130111 08:21:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.008000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6827; +# Time: 130111 08:21:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.313000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5600; +# Time: 130111 08:21:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.777000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7768; +# Time: 130111 08:21:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.134000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6337; +# Time: 130111 08:21:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.051000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7397; +# Time: 130111 08:21:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.907000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3721; +# Time: 130111 08:21:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.742000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9343; +# Time: 130111 08:21:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.136000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6656); +# Time: 130111 08:21:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.444000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4706; +# Time: 130111 08:21:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.374000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5447; +# Time: 130111 08:21:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.290000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5785; +# Time: 130111 08:21:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.964000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2909; +# Time: 130111 08:21:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.826000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9502; +# Time: 130111 08:21:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.237000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4654; +# Time: 130111 08:22:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.912000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4316); +# Time: 130111 08:22:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.286000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3824; +# Time: 130111 08:22:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.445000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 52); +# Time: 130111 08:22:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.365000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6568; +# Time: 130111 08:22:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.856000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9552; +# Time: 130111 08:22:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.217000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 655; +# Time: 130111 08:22:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.408000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6160; +# Time: 130111 08:22:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.772000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 606; +# Time: 130111 08:22:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.701000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7854; +# Time: 130111 08:22:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.775000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3956; +# Time: 130111 08:22:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.490000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7927; +# Time: 130111 08:22:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.223000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8345; +# Time: 130111 08:22:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.030000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1423; +# Time: 130111 08:22:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.788000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6991; +# Time: 130111 08:22:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.163000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2270; +# Time: 130111 08:22:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.536000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4988; +# Time: 130111 08:22:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.294000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1217); +# Time: 130111 08:22:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.221000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3066; +# Time: 130111 08:22:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.332000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1591; +# Time: 130111 08:22:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.206000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9361; +# Time: 130111 08:22:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.544000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1255); +# Time: 130111 08:22:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.406000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8122; +# Time: 130111 08:23:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.903000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1282); +# Time: 130111 08:23:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.743000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2755; +# Time: 130111 08:23:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.666000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9654; +# Time: 130111 08:23:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.434000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4094; +# Time: 130111 08:23:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.851000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 496); +# Time: 130111 08:23:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.400000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8740; +# Time: 130111 08:23:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.094000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2879; +# Time: 130111 08:23:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.666000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1786); +# Time: 130111 08:23:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.351000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6847; +# Time: 130111 08:23:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.840000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2572; +# Time: 130111 08:23:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.513000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 394); +# Time: 130111 08:23:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.026000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8861; +# Time: 130111 08:23:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.353000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5697; +# Time: 130111 08:23:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.330000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 48; +# Time: 130111 08:23:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.101000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 52); +# Time: 130111 08:23:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.220000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 819); +# Time: 130111 08:23:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.649000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3526; +# Time: 130111 08:23:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.672000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6866; +# Time: 130111 08:23:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.698000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6409; +# Time: 130111 08:23:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.014000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1879; +# Time: 130111 08:23:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.055000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7568; +# Time: 130111 08:23:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.269000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3075; +# Time: 130111 08:23:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.249000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1423; +# Time: 130111 08:23:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.819000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6189; +# Time: 130111 08:23:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.617000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1935); +# Time: 130111 08:23:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.084000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6487; +# Time: 130111 08:23:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.498000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4162; +# Time: 130111 08:23:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.453000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8787; +# Time: 130111 08:23:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.250000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2654; +# Time: 130111 08:23:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.401000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8078; +# Time: 130111 08:23:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.335000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6609; +# Time: 130111 08:23:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.323000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9217; +# Time: 130111 08:23:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.695000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6054; +# Time: 130111 08:23:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.571000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2191; +# Time: 130111 08:24:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.469000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8484; +# Time: 130111 08:24:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.880000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9943; +# Time: 130111 08:24:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.183000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5938; +# Time: 130111 08:24:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.495000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8897; +# Time: 130111 08:24:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.449000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 447; +# Time: 130111 08:24:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.494000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 292); +# Time: 130111 08:24:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.846000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2091; +# Time: 130111 08:24:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.629000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7156; +# Time: 130111 08:24:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.499000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2991); +# Time: 130111 08:24:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.889000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4404; +# Time: 130111 08:24:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.336000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 384; +# Time: 130111 08:24:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.921000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6829); +# Time: 130111 08:24:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.089000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1143); +# Time: 130111 08:24:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.464000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1164; +# Time: 130111 08:24:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.097000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 369; +# Time: 130111 08:24:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.564000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7044; +# Time: 130111 08:24:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.753000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3054; +# Time: 130111 08:24:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.726000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9417; +# Time: 130111 08:24:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.727000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2529); +# Time: 130111 08:24:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.507000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5366; +# Time: 130111 08:24:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.668000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5337; +# Time: 130111 08:24:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.335000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8978; +# Time: 130111 08:24:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.820000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1765; +# Time: 130111 08:24:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.373000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2288); +# Time: 130111 08:24:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.329000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4396; +# Time: 130111 08:24:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.185000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3529; +# Time: 130111 08:24:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.949000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4013; +# Time: 130111 08:24:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.420000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3560); +# Time: 130111 08:24:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.891000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1893; +# Time: 130111 08:24:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.846000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6520; +# Time: 130111 08:24:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.175000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4489; +# Time: 130111 08:25:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.183000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3189; +# Time: 130111 08:25:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.975000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9936); +# Time: 130111 08:25:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.786000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3211; +# Time: 130111 08:25:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.811000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5053; +# Time: 130111 08:25:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.711000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 40; +# Time: 130111 08:25:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.418000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8977); +# Time: 130111 08:25:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.828000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6948; +# Time: 130111 08:25:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.425000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5907; +# Time: 130111 08:25:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.035000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6556; +# Time: 130111 08:25:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.614000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3355; +# Time: 130111 08:25:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.356000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5357; +# Time: 130111 08:25:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.928000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5525; +# Time: 130111 08:25:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.447000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8551; +# Time: 130111 08:25:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.316000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 751; +# Time: 130111 08:25:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.544000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9614; +# Time: 130111 08:25:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.796000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1528); +# Time: 130111 08:25:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.166000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3289; +# Time: 130111 08:25:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.362000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2764); +# Time: 130111 08:25:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.710000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1870; +# Time: 130111 08:25:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.382000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8095; +# Time: 130111 08:25:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.372000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3091; +# Time: 130111 08:25:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.536000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2587); +# Time: 130111 08:25:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.210000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6872; +# Time: 130111 08:25:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.955000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9285; +# Time: 130111 08:25:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.192000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 734; +# Time: 130111 08:26:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.703000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6041; +# Time: 130111 08:26:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.568000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4623; +# Time: 130111 08:26:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.265000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7927; +# Time: 130111 08:26:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.220000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4883; +# Time: 130111 08:26:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.936000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9771; +# Time: 130111 08:26:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.553000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4697; +# Time: 130111 08:26:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.929000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3879); +# Time: 130111 08:26:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.152000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1078; +# Time: 130111 08:26:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.959000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8143; +# Time: 130111 08:26:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.764000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1310); +# Time: 130111 08:26:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.690000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8544; +# Time: 130111 08:26:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.594000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5482; +# Time: 130111 08:26:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.197000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4816; +# Time: 130111 08:26:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.900000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 112; +# Time: 130111 08:26:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.228000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6223; +# Time: 130111 08:26:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.086000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2482; +# Time: 130111 08:26:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.488000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2457; +# Time: 130111 08:26:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.685000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3923; +# Time: 130111 08:26:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.286000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4400; +# Time: 130111 08:26:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.525000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4951); +# Time: 130111 08:26:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.164000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9802; +# Time: 130111 08:26:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.675000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5112; +# Time: 130111 08:26:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.038000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9021); +# Time: 130111 08:26:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.212000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2110); +# Time: 130111 08:26:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.781000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2064; +# Time: 130111 08:26:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.478000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 618; +# Time: 130111 08:26:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.090000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4425; +# Time: 130111 08:26:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.714000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1965); +# Time: 130111 08:26:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.742000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5351; +# Time: 130111 08:27:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.835000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6507; +# Time: 130111 08:27:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.239000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 593; +# Time: 130111 08:27:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.247000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2138); +# Time: 130111 08:27:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.870000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9225); +# Time: 130111 08:27:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.747000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4431; +# Time: 130111 08:27:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.394000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8257; +# Time: 130111 08:27:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.720000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7567); +# Time: 130111 08:27:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.852000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1646; +# Time: 130111 08:27:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.933000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5099; +# Time: 130111 08:27:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.105000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4200); +# Time: 130111 08:27:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.621000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6980; +# Time: 130111 08:27:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.630000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 395; +# Time: 130111 08:27:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.695000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6871); +# Time: 130111 08:27:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.301000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5012; +# Time: 130111 08:27:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.876000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2440; +# Time: 130111 08:27:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.493000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8377; +# Time: 130111 08:27:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.049000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9850; +# Time: 130111 08:27:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.214000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9239; +# Time: 130111 08:27:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.037000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7737; +# Time: 130111 08:27:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.917000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6122; +# Time: 130111 08:27:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.335000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9009; +# Time: 130111 08:27:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.636000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6384; +# Time: 130111 08:27:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.983000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2062; +# Time: 130111 08:27:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.334000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3010); +# Time: 130111 08:27:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.708000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2789; +# Time: 130111 08:27:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.602000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3116); +# Time: 130111 08:27:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.374000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3569; +# Time: 130111 08:27:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.188000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3482); +# Time: 130111 08:27:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.990000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4170; +# Time: 130111 08:27:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.955000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1876); +# Time: 130111 08:27:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.601000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6804); +# Time: 130111 08:28:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.811000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7330; +# Time: 130111 08:28:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.562000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8811; +# Time: 130111 08:28:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.954000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4286; +# Time: 130111 08:28:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.119000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5974); +# Time: 130111 08:28:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.729000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4423; +# Time: 130111 08:28:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.915000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4649); +# Time: 130111 08:28:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.486000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5047); +# Time: 130111 08:28:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.669000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7733; +# Time: 130111 08:28:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.713000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6238; +# Time: 130111 08:28:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.196000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9739); +# Time: 130111 08:28:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.808000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7856; +# Time: 130111 08:28:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.953000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1637; +# Time: 130111 08:28:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.668000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8483; +# Time: 130111 08:28:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.392000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5458; +# Time: 130111 08:28:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.243000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3246; +# Time: 130111 08:28:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.457000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6166; +# Time: 130111 08:28:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.083000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 657; +# Time: 130111 08:28:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.962000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3199; +# Time: 130111 08:28:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.675000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9993; +# Time: 130111 08:28:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.930000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8706; +# Time: 130111 08:28:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.424000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1180); +# Time: 130111 08:28:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.743000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2302; +# Time: 130111 08:28:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.893000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1213; +# Time: 130111 08:28:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.604000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2779); +# Time: 130111 08:28:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.064000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8244); +# Time: 130111 08:28:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.724000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7221; +# Time: 130111 08:28:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.431000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4645; +# Time: 130111 08:28:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.239000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1548; +# Time: 130111 08:28:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.401000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1869; +# Time: 130111 08:28:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.129000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4223; +# Time: 130111 08:28:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.424000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2286); +# Time: 130111 08:28:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.528000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4304; +# Time: 130111 08:28:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.551000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4864; +# Time: 130111 08:28:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.726000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5966); +# Time: 130111 08:29:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.490000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9563; +# Time: 130111 08:29:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.468000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7952; +# Time: 130111 08:29:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.289000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1125); +# Time: 130111 08:29:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.254000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8146; +# Time: 130111 08:29:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.329000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8941; +# Time: 130111 08:29:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.165000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1807; +# Time: 130111 08:29:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.099000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3935; +# Time: 130111 08:29:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.928000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6556; +# Time: 130111 08:29:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.817000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 264; +# Time: 130111 08:29:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.975000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9378; +# Time: 130111 08:29:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.896000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4785; +# Time: 130111 08:29:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.756000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 536; +# Time: 130111 08:29:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.127000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8109); +# Time: 130111 08:29:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.903000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9988); +# Time: 130111 08:29:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.325000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2784; +# Time: 130111 08:29:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.335000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8814; +# Time: 130111 08:29:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.409000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6892; +# Time: 130111 08:29:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.052000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1188; +# Time: 130111 08:29:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.508000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9331); +# Time: 130111 08:29:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.041000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9730); +# Time: 130111 08:29:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.750000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9498); +# Time: 130111 08:29:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.531000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7864); +# Time: 130111 08:29:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.893000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5915; +# Time: 130111 08:29:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.077000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3969; +# Time: 130111 08:29:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.841000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5630; +# Time: 130111 08:29:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.235000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7063; +# Time: 130111 08:29:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.391000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9097); +# Time: 130111 08:29:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.953000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8621; +# Time: 130111 08:29:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.414000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8616; +# Time: 130111 08:29:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.498000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1111; +# Time: 130111 08:29:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.337000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7907; +# Time: 130111 08:29:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.657000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5314; +# Time: 130111 08:30:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.005000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7094; +# Time: 130111 08:30:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.902000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 454); +# Time: 130111 08:30:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.646000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7729); +# Time: 130111 08:30:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.706000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5408; +# Time: 130111 08:30:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.790000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9637); +# Time: 130111 08:30:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.374000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2446); +# Time: 130111 08:30:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.107000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7412; +# Time: 130111 08:30:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.550000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1322; +# Time: 130111 08:30:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.786000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8146; +# Time: 130111 08:30:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.622000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1775; +# Time: 130111 08:30:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.120000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1069; +# Time: 130111 08:30:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.990000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1719); +# Time: 130111 08:30:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.447000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1775); +# Time: 130111 08:30:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.527000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1821); +# Time: 130111 08:30:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.065000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6845; +# Time: 130111 08:30:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.372000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8249; +# Time: 130111 08:30:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.567000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9907; +# Time: 130111 08:30:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.048000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2679; +# Time: 130111 08:30:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.226000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3515; +# Time: 130111 08:30:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.520000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4735; +# Time: 130111 08:30:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.168000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3516); +# Time: 130111 08:30:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.386000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 111; +# Time: 130111 08:30:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.758000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1145; +# Time: 130111 08:30:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.396000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1429; +# Time: 130111 08:30:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.772000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7987); +# Time: 130111 08:30:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.141000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6563); +# Time: 130111 08:30:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.120000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5046; +# Time: 130111 08:30:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.227000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8182); +# Time: 130111 08:30:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.291000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 256; +# Time: 130111 08:31:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.959000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9868); +# Time: 130111 08:31:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.131000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6721; +# Time: 130111 08:31:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.506000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4204); +# Time: 130111 08:31:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.084000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3631; +# Time: 130111 08:31:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.357000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5554; +# Time: 130111 08:31:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.658000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5197; +# Time: 130111 08:31:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.842000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5265; +# Time: 130111 08:31:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.738000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9285; +# Time: 130111 08:31:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.089000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1218); +# Time: 130111 08:31:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.116000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6118; +# Time: 130111 08:31:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.332000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 324; +# Time: 130111 08:31:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.225000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6352; +# Time: 130111 08:31:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.834000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7464; +# Time: 130111 08:31:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.585000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9360; +# Time: 130111 08:31:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.851000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2074); +# Time: 130111 08:31:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.566000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5359; +# Time: 130111 08:31:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.396000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 346); +# Time: 130111 08:31:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.785000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8990; +# Time: 130111 08:31:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.553000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4296; +# Time: 130111 08:31:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.494000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8511; +# Time: 130111 08:31:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.206000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5279; +# Time: 130111 08:31:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.531000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6874; +# Time: 130111 08:31:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.606000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5749; +# Time: 130111 08:31:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.974000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1614; +# Time: 130111 08:31:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.654000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2494; +# Time: 130111 08:31:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.960000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6529; +# Time: 130111 08:31:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.992000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3473; +# Time: 130111 08:31:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.014000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5348); +# Time: 130111 08:31:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.465000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4834; +# Time: 130111 08:31:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.963000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9556; +# Time: 130111 08:31:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.030000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7130; +# Time: 130111 08:31:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.152000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7577; +# Time: 130111 08:32:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.714000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7966); +# Time: 130111 08:32:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.081000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8602; +# Time: 130111 08:32:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.038000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2801); +# Time: 130111 08:32:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.175000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9695); +# Time: 130111 08:32:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.231000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6316; +# Time: 130111 08:32:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.310000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8910; +# Time: 130111 08:32:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.741000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1286; +# Time: 130111 08:32:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.431000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9407; +# Time: 130111 08:32:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.611000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9688; +# Time: 130111 08:32:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.317000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2905; +# Time: 130111 08:32:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.661000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2884); +# Time: 130111 08:32:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.520000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5985; +# Time: 130111 08:32:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.024000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9121; +# Time: 130111 08:32:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.474000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9374; +# Time: 130111 08:32:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.800000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4383; +# Time: 130111 08:32:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.597000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4244; +# Time: 130111 08:32:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.478000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9745; +# Time: 130111 08:32:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.605000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3696; +# Time: 130111 08:32:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.341000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9910; +# Time: 130111 08:32:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.587000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9852; +# Time: 130111 08:32:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.249000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5127; +# Time: 130111 08:32:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.318000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5189; +# Time: 130111 08:32:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.871000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1867); +# Time: 130111 08:32:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.297000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1603; +# Time: 130111 08:32:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.893000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7448; +# Time: 130111 08:32:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.920000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8039; +# Time: 130111 08:33:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.144000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3842; +# Time: 130111 08:33:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.510000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3229; +# Time: 130111 08:33:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.206000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4683; +# Time: 130111 08:33:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.110000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 13; +# Time: 130111 08:33:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.928000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3426); +# Time: 130111 08:33:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.636000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9191; +# Time: 130111 08:33:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.887000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8045; +# Time: 130111 08:33:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.534000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2173); +# Time: 130111 08:33:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.159000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5232); +# Time: 130111 08:33:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.673000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8365); +# Time: 130111 08:33:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.513000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4763); +# Time: 130111 08:33:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.087000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7135; +# Time: 130111 08:33:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.779000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 254; +# Time: 130111 08:33:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.270000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8179; +# Time: 130111 08:33:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.707000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6755; +# Time: 130111 08:33:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.443000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9908; +# Time: 130111 08:33:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.845000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5212; +# Time: 130111 08:33:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.403000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 911; +# Time: 130111 08:33:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.754000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6838; +# Time: 130111 08:33:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.817000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6878; +# Time: 130111 08:33:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.926000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5794; +# Time: 130111 08:33:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.591000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2571; +# Time: 130111 08:33:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.040000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6710; +# Time: 130111 08:33:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.928000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7458; +# Time: 130111 08:33:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.145000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 846; +# Time: 130111 08:33:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.340000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9448; +# Time: 130111 08:33:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.257000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2271; +# Time: 130111 08:33:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.811000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 517); +# Time: 130111 08:33:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.402000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4789); +# Time: 130111 08:33:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.314000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7631; +# Time: 130111 08:33:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.986000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3413); +# Time: 130111 08:34:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.507000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2302; +# Time: 130111 08:34:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.967000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6214; +# Time: 130111 08:34:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.358000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6556); +# Time: 130111 08:34:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.691000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5158); +# Time: 130111 08:34:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.851000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3729); +# Time: 130111 08:34:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.434000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 253; +# Time: 130111 08:34:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.886000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8686; +# Time: 130111 08:34:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.561000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5025); +# Time: 130111 08:34:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.667000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4929; +# Time: 130111 08:34:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.511000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3406); +# Time: 130111 08:34:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.465000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5937); +# Time: 130111 08:34:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.933000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6735); +# Time: 130111 08:34:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.319000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1663; +# Time: 130111 08:34:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.904000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2880); +# Time: 130111 08:34:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.236000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7131; +# Time: 130111 08:34:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.425000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3917; +# Time: 130111 08:34:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.226000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3511; +# Time: 130111 08:34:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.908000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4329); +# Time: 130111 08:34:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.222000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5733; +# Time: 130111 08:34:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.830000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1629; +# Time: 130111 08:34:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.884000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2131; +# Time: 130111 08:34:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.738000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7722); +# Time: 130111 08:34:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.151000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1872; +# Time: 130111 08:34:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.574000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5235; +# Time: 130111 08:34:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.413000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 379; +# Time: 130111 08:34:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.184000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6965; +# Time: 130111 08:34:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.035000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9232; +# Time: 130111 08:34:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.111000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 702; +# Time: 130111 08:35:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.136000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5049); +# Time: 130111 08:35:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.092000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7512; +# Time: 130111 08:35:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.326000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8013; +# Time: 130111 08:35:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.488000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3259; +# Time: 130111 08:35:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.039000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8210; +# Time: 130111 08:35:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.172000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9600; +# Time: 130111 08:35:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.013000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7039; +# Time: 130111 08:35:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.185000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2298; +# Time: 130111 08:35:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.110000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8090; +# Time: 130111 08:35:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.822000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4023); +# Time: 130111 08:35:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.892000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3903); +# Time: 130111 08:35:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.279000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4567; +# Time: 130111 08:35:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.434000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7307; +# Time: 130111 08:35:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.739000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1805; +# Time: 130111 08:35:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.709000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 214); +# Time: 130111 08:35:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.136000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9268; +# Time: 130111 08:35:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.598000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5384; +# Time: 130111 08:35:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.442000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9867; +# Time: 130111 08:35:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.499000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 819; +# Time: 130111 08:35:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.172000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9712); +# Time: 130111 08:35:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.810000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7455; +# Time: 130111 08:35:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.504000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5042; +# Time: 130111 08:35:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.722000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8359); +# Time: 130111 08:35:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.623000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4504; +# Time: 130111 08:35:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.609000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2378; +# Time: 130111 08:35:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.196000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3055); +# Time: 130111 08:35:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.170000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4430; +# Time: 130111 08:35:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.040000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6659; +# Time: 130111 08:35:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.979000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6462; +# Time: 130111 08:35:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.715000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 50; +# Time: 130111 08:35:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.868000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6732; +# Time: 130111 08:36:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.019000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6460; +# Time: 130111 08:36:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.192000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5997; +# Time: 130111 08:36:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.076000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4832; +# Time: 130111 08:36:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.961000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5582; +# Time: 130111 08:36:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.934000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2715; +# Time: 130111 08:36:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.055000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1627; +# Time: 130111 08:36:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.261000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7097; +# Time: 130111 08:36:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.837000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 334); +# Time: 130111 08:36:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.137000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6173); +# Time: 130111 08:36:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.787000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9633; +# Time: 130111 08:36:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.035000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9835; +# Time: 130111 08:36:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.355000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7938; +# Time: 130111 08:36:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.626000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5540); +# Time: 130111 08:36:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.245000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5687; +# Time: 130111 08:36:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.545000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9073; +# Time: 130111 08:36:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.593000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3590; +# Time: 130111 08:36:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.205000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4155; +# Time: 130111 08:36:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.696000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3379; +# Time: 130111 08:36:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.885000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5349); +# Time: 130111 08:36:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.681000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 639); +# Time: 130111 08:36:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.006000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6385; +# Time: 130111 08:36:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.963000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7601); +# Time: 130111 08:36:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.374000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8424); +# Time: 130111 08:36:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.656000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1106; +# Time: 130111 08:36:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.027000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9453; +# Time: 130111 08:36:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.584000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5423; +# Time: 130111 08:37:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.645000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1546; +# Time: 130111 08:37:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.723000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4559; +# Time: 130111 08:37:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.376000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5698; +# Time: 130111 08:37:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.050000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4909; +# Time: 130111 08:37:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.518000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1585; +# Time: 130111 08:37:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.835000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4862); +# Time: 130111 08:37:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.235000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8457; +# Time: 130111 08:37:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.252000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8298; +# Time: 130111 08:37:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.844000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3204; +# Time: 130111 08:37:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.436000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1946; +# Time: 130111 08:37:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.547000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2738; +# Time: 130111 08:37:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.118000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 785); +# Time: 130111 08:37:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.137000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1730); +# Time: 130111 08:37:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.287000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 518); +# Time: 130111 08:37:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.032000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8420; +# Time: 130111 08:37:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.686000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4061); +# Time: 130111 08:37:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.483000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5909); +# Time: 130111 08:37:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.464000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7824); +# Time: 130111 08:37:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.053000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7695); +# Time: 130111 08:37:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.952000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6842); +# Time: 130111 08:37:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.512000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1323; +# Time: 130111 08:37:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.265000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3984); +# Time: 130111 08:37:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.170000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7196; +# Time: 130111 08:37:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.972000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3781; +# Time: 130111 08:37:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.770000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9071; +# Time: 130111 08:37:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.431000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2264; +# Time: 130111 08:37:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.191000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7568; +# Time: 130111 08:38:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.592000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9700; +# Time: 130111 08:38:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.690000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 980); +# Time: 130111 08:38:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.521000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7426); +# Time: 130111 08:38:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.799000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5848; +# Time: 130111 08:38:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.775000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 496; +# Time: 130111 08:38:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.156000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2225; +# Time: 130111 08:38:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.134000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 190; +# Time: 130111 08:38:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.633000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7287; +# Time: 130111 08:38:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.714000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7453; +# Time: 130111 08:38:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.364000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6710); +# Time: 130111 08:38:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.357000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7808); +# Time: 130111 08:38:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.263000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8319); +# Time: 130111 08:38:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.566000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4980; +# Time: 130111 08:38:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.236000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1880); +# Time: 130111 08:38:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.864000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 951; +# Time: 130111 08:38:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.909000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6542; +# Time: 130111 08:38:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.848000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1065; +# Time: 130111 08:38:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.426000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6943; +# Time: 130111 08:38:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.677000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3441; +# Time: 130111 08:38:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.120000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4163; +# Time: 130111 08:38:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.447000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7107; +# Time: 130111 08:38:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.886000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4923; +# Time: 130111 08:38:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.874000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5936; +# Time: 130111 08:38:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.845000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4847; +# Time: 130111 08:38:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.427000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7537; +# Time: 130111 08:38:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.496000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1826); +# Time: 130111 08:38:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.313000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9647; +# Time: 130111 08:38:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.541000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1581); +# Time: 130111 08:38:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.474000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4739; +# Time: 130111 08:38:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.946000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1357; +# Time: 130111 08:38:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.286000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9586; +# Time: 130111 08:38:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.005000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6890); +# Time: 130111 08:39:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.868000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8201; +# Time: 130111 08:39:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.297000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6112; +# Time: 130111 08:39:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.574000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4072; +# Time: 130111 08:39:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.236000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8670); +# Time: 130111 08:39:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.871000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3705; +# Time: 130111 08:39:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.655000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3157; +# Time: 130111 08:39:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.711000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3502; +# Time: 130111 08:39:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.712000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5187; +# Time: 130111 08:39:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.992000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3620; +# Time: 130111 08:39:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.093000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 608; +# Time: 130111 08:39:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.279000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1445; +# Time: 130111 08:39:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.470000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9608); +# Time: 130111 08:39:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.100000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9240; +# Time: 130111 08:39:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.105000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1176; +# Time: 130111 08:39:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.621000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2420; +# Time: 130111 08:39:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.211000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 138; +# Time: 130111 08:39:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.544000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2018; +# Time: 130111 08:39:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.721000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2077; +# Time: 130111 08:39:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.720000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4580; +# Time: 130111 08:39:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.376000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6875; +# Time: 130111 08:39:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.669000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8508; +# Time: 130111 08:39:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.084000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3852; +# Time: 130111 08:39:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.938000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9356; +# Time: 130111 08:39:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.969000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5631; +# Time: 130111 08:39:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.794000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4704; +# Time: 130111 08:39:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.341000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7201; +# Time: 130111 08:39:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.307000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 531; +# Time: 130111 08:39:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.480000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9648; +# Time: 130111 08:39:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.082000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7732; +# Time: 130111 08:39:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.264000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9732; +# Time: 130111 08:39:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.197000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5906; +# Time: 130111 08:40:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.937000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6470; +# Time: 130111 08:40:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.719000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5039; +# Time: 130111 08:40:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.912000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2995; +# Time: 130111 08:40:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.517000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6876; +# Time: 130111 08:40:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.096000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1152); +# Time: 130111 08:40:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.491000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 864; +# Time: 130111 08:40:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.758000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 585; +# Time: 130111 08:40:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.759000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1995); +# Time: 130111 08:40:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.079000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4617); +# Time: 130111 08:40:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.967000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1049; +# Time: 130111 08:40:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.847000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1562); +# Time: 130111 08:40:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.744000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7969; +# Time: 130111 08:40:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.734000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5906; +# Time: 130111 08:40:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.415000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8114); +# Time: 130111 08:40:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.685000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1280; +# Time: 130111 08:40:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.578000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 409; +# Time: 130111 08:40:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.237000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4752); +# Time: 130111 08:40:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.214000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2537; +# Time: 130111 08:40:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.185000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2929); +# Time: 130111 08:40:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.935000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9594; +# Time: 130111 08:40:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.160000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6518; +# Time: 130111 08:40:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.314000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8884; +# Time: 130111 08:40:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.410000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1808; +# Time: 130111 08:40:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.687000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9370; +# Time: 130111 08:40:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.726000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9022; +# Time: 130111 08:40:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.377000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4214; +# Time: 130111 08:40:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.970000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5646; +# Time: 130111 08:40:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.646000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3455; +# Time: 130111 08:40:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.839000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3284; +# Time: 130111 08:40:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.235000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 690; +# Time: 130111 08:40:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.401000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 992); +# Time: 130111 08:40:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.461000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8375; +# Time: 130111 08:40:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.731000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7743); +# Time: 130111 08:41:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.289000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9633; +# Time: 130111 08:41:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.152000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2454; +# Time: 130111 08:41:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.213000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6836; +# Time: 130111 08:41:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.366000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8847); +# Time: 130111 08:41:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.279000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1301; +# Time: 130111 08:41:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.934000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2122; +# Time: 130111 08:41:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.534000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5640; +# Time: 130111 08:41:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.025000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6815; +# Time: 130111 08:41:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.757000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5587; +# Time: 130111 08:41:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.216000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2777); +# Time: 130111 08:41:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.758000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5450; +# Time: 130111 08:41:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.426000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7936; +# Time: 130111 08:41:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.612000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7602; +# Time: 130111 08:41:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.047000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4366; +# Time: 130111 08:41:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.757000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 888); +# Time: 130111 08:41:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.143000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 804); +# Time: 130111 08:41:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.501000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 557); +# Time: 130111 08:41:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.968000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1962; +# Time: 130111 08:41:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.471000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9236); +# Time: 130111 08:41:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.325000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9434; +# Time: 130111 08:41:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.404000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3273; +# Time: 130111 08:41:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.626000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9785); +# Time: 130111 08:41:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.593000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6591; +# Time: 130111 08:41:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.788000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1727; +# Time: 130111 08:41:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.475000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3921); +# Time: 130111 08:41:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.858000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1989; +# Time: 130111 08:41:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.466000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3825; +# Time: 130111 08:41:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.114000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9378; +# Time: 130111 08:41:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.294000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7371; +# Time: 130111 08:41:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.366000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 796); +# Time: 130111 08:41:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.556000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6898; +# Time: 130111 08:41:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.074000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3917; +# Time: 130111 08:41:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.154000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1236; +# Time: 130111 08:42:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.193000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8998; +# Time: 130111 08:42:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.520000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9792; +# Time: 130111 08:42:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.555000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6959; +# Time: 130111 08:42:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.382000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7581; +# Time: 130111 08:42:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.198000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6919; +# Time: 130111 08:42:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.141000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7750); +# Time: 130111 08:42:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.809000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9230; +# Time: 130111 08:42:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.950000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9668; +# Time: 130111 08:42:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.768000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5434); +# Time: 130111 08:42:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.176000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 500); +# Time: 130111 08:42:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.764000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4688; +# Time: 130111 08:42:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.822000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2882); +# Time: 130111 08:42:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.755000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6911; +# Time: 130111 08:42:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.297000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5817; +# Time: 130111 08:42:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.097000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5336; +# Time: 130111 08:42:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.458000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4228; +# Time: 130111 08:42:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.846000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6700; +# Time: 130111 08:42:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.142000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4279); +# Time: 130111 08:42:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.566000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1539; +# Time: 130111 08:42:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.580000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8203; +# Time: 130111 08:42:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.848000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7912; +# Time: 130111 08:42:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.854000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8121; +# Time: 130111 08:42:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.952000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6405; +# Time: 130111 08:42:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.390000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 126); +# Time: 130111 08:42:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.756000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5375); +# Time: 130111 08:42:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.782000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3827); +# Time: 130111 08:43:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.170000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2007); +# Time: 130111 08:43:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.944000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5376); +# Time: 130111 08:43:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.881000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6400; +# Time: 130111 08:43:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.812000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 424; +# Time: 130111 08:43:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.421000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5473; +# Time: 130111 08:43:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.568000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5585; +# Time: 130111 08:43:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.816000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3997; +# Time: 130111 08:43:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.566000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6936; +# Time: 130111 08:43:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.512000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6988; +# Time: 130111 08:43:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.411000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1166; +# Time: 130111 08:43:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.782000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5141); +# Time: 130111 08:43:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.833000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7758; +# Time: 130111 08:43:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.747000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 779; +# Time: 130111 08:43:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.997000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 148); +# Time: 130111 08:43:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.905000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5583; +# Time: 130111 08:43:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.020000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6125); +# Time: 130111 08:43:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.462000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3861; +# Time: 130111 08:43:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.167000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6312; +# Time: 130111 08:43:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.802000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7037); +# Time: 130111 08:43:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.236000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7495; +# Time: 130111 08:43:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.239000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2611; +# Time: 130111 08:43:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.378000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5859); +# Time: 130111 08:43:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.566000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2624; +# Time: 130111 08:43:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.671000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5124); +# Time: 130111 08:43:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.555000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3789); +# Time: 130111 08:43:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.437000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6298; +# Time: 130111 08:43:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.548000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8395; +# Time: 130111 08:44:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.139000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2014; +# Time: 130111 08:44:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.979000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8431; +# Time: 130111 08:44:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.290000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9262; +# Time: 130111 08:44:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.489000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2261; +# Time: 130111 08:44:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.559000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7685); +# Time: 130111 08:44:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.363000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4639); +# Time: 130111 08:44:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.797000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6431; +# Time: 130111 08:44:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.070000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2765; +# Time: 130111 08:44:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.879000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6229; +# Time: 130111 08:44:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.620000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2923; +# Time: 130111 08:44:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.935000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 15; +# Time: 130111 08:44:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.313000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7961; +# Time: 130111 08:44:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.437000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6499; +# Time: 130111 08:44:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.940000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6206; +# Time: 130111 08:44:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.576000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9464); +# Time: 130111 08:44:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.923000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5237); +# Time: 130111 08:44:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.335000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6201; +# Time: 130111 08:44:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.079000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8345; +# Time: 130111 08:44:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.918000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1951; +# Time: 130111 08:44:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.592000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8382; +# Time: 130111 08:44:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.219000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8931; +# Time: 130111 08:44:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.075000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7430; +# Time: 130111 08:45:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.148000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2741; +# Time: 130111 08:45:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.308000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4617; +# Time: 130111 08:45:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.959000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7294; +# Time: 130111 08:45:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.214000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6250; +# Time: 130111 08:45:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.330000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9170; +# Time: 130111 08:45:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.997000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1130); +# Time: 130111 08:45:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.694000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3733; +# Time: 130111 08:45:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.726000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5467; +# Time: 130111 08:45:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.532000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5477); +# Time: 130111 08:45:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.578000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8644; +# Time: 130111 08:45:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.574000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 953); +# Time: 130111 08:45:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.949000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8186; +# Time: 130111 08:45:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.201000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3784; +# Time: 130111 08:45:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.477000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7174; +# Time: 130111 08:45:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.612000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7188); +# Time: 130111 08:45:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.386000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4382; +# Time: 130111 08:45:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.960000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1515); +# Time: 130111 08:45:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.542000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9175; +# Time: 130111 08:45:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.531000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5263); +# Time: 130111 08:45:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.911000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8370); +# Time: 130111 08:45:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.075000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6471); +# Time: 130111 08:45:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.986000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6321; +# Time: 130111 08:45:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.724000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 906; +# Time: 130111 08:45:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.489000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 606); +# Time: 130111 08:45:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.354000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8833; +# Time: 130111 08:45:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.580000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2573; +# Time: 130111 08:45:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.206000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8762; +# Time: 130111 08:45:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.971000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6817); +# Time: 130111 08:45:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.069000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7728; +# Time: 130111 08:45:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.526000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 224; +# Time: 130111 08:46:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.685000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6845; +# Time: 130111 08:46:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.270000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3579; +# Time: 130111 08:46:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.212000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1640); +# Time: 130111 08:46:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.544000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6928); +# Time: 130111 08:46:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.924000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7864); +# Time: 130111 08:46:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.980000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4475; +# Time: 130111 08:46:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.212000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6131); +# Time: 130111 08:46:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.316000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5006; +# Time: 130111 08:46:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.494000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3914; +# Time: 130111 08:46:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.645000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4119; +# Time: 130111 08:46:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.096000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9659); +# Time: 130111 08:46:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.503000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6022; +# Time: 130111 08:46:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.795000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9583; +# Time: 130111 08:46:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.382000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1403); +# Time: 130111 08:46:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.499000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7929; +# Time: 130111 08:46:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.698000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5030; +# Time: 130111 08:46:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.616000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2281; +# Time: 130111 08:46:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.117000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4792; +# Time: 130111 08:46:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.494000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3905; +# Time: 130111 08:46:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.482000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5430; +# Time: 130111 08:46:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.197000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2141); +# Time: 130111 08:46:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.146000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5985; +# Time: 130111 08:46:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.019000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7594; +# Time: 130111 08:46:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.463000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9495; +# Time: 130111 08:46:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.720000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6789; +# Time: 130111 08:46:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.167000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1518; +# Time: 130111 08:46:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.970000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 294); +# Time: 130111 08:46:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.694000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5096); +# Time: 130111 08:46:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.136000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5101; +# Time: 130111 08:46:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.911000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5123; +# Time: 130111 08:46:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.101000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1197; +# Time: 130111 08:47:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.956000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1735; +# Time: 130111 08:47:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.232000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9768; +# Time: 130111 08:47:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.609000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5494; +# Time: 130111 08:47:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.151000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8130; +# Time: 130111 08:47:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.123000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5009; +# Time: 130111 08:47:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.463000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1470; +# Time: 130111 08:47:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.979000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6827; +# Time: 130111 08:47:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.423000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2178); +# Time: 130111 08:47:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.437000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4884; +# Time: 130111 08:47:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.480000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3348); +# Time: 130111 08:47:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.190000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7980; +# Time: 130111 08:47:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.741000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1064; +# Time: 130111 08:47:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.062000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3683); +# Time: 130111 08:47:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.384000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3385; +# Time: 130111 08:47:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.331000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2457; +# Time: 130111 08:47:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.035000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3161; +# Time: 130111 08:47:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.665000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 736; +# Time: 130111 08:47:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.259000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 662); +# Time: 130111 08:47:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.050000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8958; +# Time: 130111 08:47:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.475000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3481; +# Time: 130111 08:47:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.126000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5547); +# Time: 130111 08:47:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.698000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6348); +# Time: 130111 08:47:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.033000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2439); +# Time: 130111 08:47:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.391000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4702; +# Time: 130111 08:47:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.016000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3929; +# Time: 130111 08:47:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.041000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9055); +# Time: 130111 08:48:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.565000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3594; +# Time: 130111 08:48:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.592000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9626; +# Time: 130111 08:48:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.253000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4927; +# Time: 130111 08:48:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.587000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2357); +# Time: 130111 08:48:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.812000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2225); +# Time: 130111 08:48:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.468000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1658; +# Time: 130111 08:48:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.905000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1480; +# Time: 130111 08:48:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.495000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3121; +# Time: 130111 08:48:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.248000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6435; +# Time: 130111 08:48:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.985000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6289); +# Time: 130111 08:48:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.646000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9989; +# Time: 130111 08:48:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.658000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 605; +# Time: 130111 08:48:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.605000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8960); +# Time: 130111 08:48:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.323000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8453; +# Time: 130111 08:48:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.673000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 972; +# Time: 130111 08:48:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.168000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5952; +# Time: 130111 08:48:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.137000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1021; +# Time: 130111 08:48:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.210000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6871; +# Time: 130111 08:48:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.806000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3797; +# Time: 130111 08:48:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.215000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4552; +# Time: 130111 08:48:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.723000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2046; +# Time: 130111 08:48:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.064000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3785); +# Time: 130111 08:48:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.837000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 798); +# Time: 130111 08:48:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.502000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7077); +# Time: 130111 08:48:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.735000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5254; +# Time: 130111 08:48:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.570000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2338); +# Time: 130111 08:48:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.872000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5660; +# Time: 130111 08:48:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.135000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6615); +# Time: 130111 08:48:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.287000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 36; +# Time: 130111 08:48:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.836000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6884; +# Time: 130111 08:48:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.450000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1968; +# Time: 130111 08:48:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.799000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1351); +# Time: 130111 08:48:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.931000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1704); +# Time: 130111 08:49:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.891000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5086; +# Time: 130111 08:49:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.234000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3115; +# Time: 130111 08:49:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.060000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7307; +# Time: 130111 08:49:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.462000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1120); +# Time: 130111 08:49:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.381000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5625); +# Time: 130111 08:49:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.485000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2665); +# Time: 130111 08:49:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.758000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5034; +# Time: 130111 08:49:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.422000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7120; +# Time: 130111 08:49:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.322000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8039; +# Time: 130111 08:49:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.490000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1292); +# Time: 130111 08:49:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.396000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7615; +# Time: 130111 08:49:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.526000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 958; +# Time: 130111 08:49:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.577000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9137; +# Time: 130111 08:49:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.161000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2848; +# Time: 130111 08:49:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.221000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8488); +# Time: 130111 08:49:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.857000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5569; +# Time: 130111 08:49:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.966000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6656; +# Time: 130111 08:49:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.965000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6168); +# Time: 130111 08:49:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.354000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6308; +# Time: 130111 08:49:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.128000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5867; +# Time: 130111 08:49:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.859000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1594; +# Time: 130111 08:49:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.426000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8549; +# Time: 130111 08:49:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.043000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9222; +# Time: 130111 08:49:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.463000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4982; +# Time: 130111 08:49:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.528000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6518; +# Time: 130111 08:49:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.348000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5646; +# Time: 130111 08:49:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.289000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7237; +# Time: 130111 08:49:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.719000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1653; +# Time: 130111 08:49:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.297000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8146; +# Time: 130111 08:49:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.097000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5218; +# Time: 130111 08:49:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.587000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8442; +# Time: 130111 08:49:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.241000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6825; +# Time: 130111 08:49:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.193000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9194; +# Time: 130111 08:50:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.480000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3924); +# Time: 130111 08:50:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.675000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 238; +# Time: 130111 08:50:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.759000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5210; +# Time: 130111 08:50:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.332000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5834; +# Time: 130111 08:50:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.200000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5943); +# Time: 130111 08:50:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.190000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9901; +# Time: 130111 08:50:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.148000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2348; +# Time: 130111 08:50:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.270000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2940; +# Time: 130111 08:50:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.083000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3966; +# Time: 130111 08:50:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.770000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5454; +# Time: 130111 08:50:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.141000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6152); +# Time: 130111 08:50:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.287000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 224; +# Time: 130111 08:50:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.889000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9807; +# Time: 130111 08:50:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.640000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6946; +# Time: 130111 08:50:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.669000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3794; +# Time: 130111 08:50:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.954000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6581); +# Time: 130111 08:50:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.529000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6302; +# Time: 130111 08:50:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.634000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8737); +# Time: 130111 08:50:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.591000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6762); +# Time: 130111 08:50:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.234000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7208; +# Time: 130111 08:50:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.977000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7895); +# Time: 130111 08:50:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.682000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 917); +# Time: 130111 08:50:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.180000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4715; +# Time: 130111 08:50:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.148000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 34; +# Time: 130111 08:50:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.347000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3859; +# Time: 130111 08:50:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.532000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1979; +# Time: 130111 08:51:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.807000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6071; +# Time: 130111 08:51:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.076000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5566); +# Time: 130111 08:51:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.392000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9560); +# Time: 130111 08:51:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.169000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8463; +# Time: 130111 08:51:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.350000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6555; +# Time: 130111 08:51:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.436000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5855; +# Time: 130111 08:51:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.685000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6055); +# Time: 130111 08:51:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.304000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1874; +# Time: 130111 08:51:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.224000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5855; +# Time: 130111 08:51:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.678000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6); +# Time: 130111 08:51:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.762000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3852); +# Time: 130111 08:51:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.121000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2683; +# Time: 130111 08:51:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.679000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6959; +# Time: 130111 08:51:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.782000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1218; +# Time: 130111 08:51:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.929000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3577; +# Time: 130111 08:51:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.288000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8164; +# Time: 130111 08:51:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.365000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4989); +# Time: 130111 08:51:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.494000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8918; +# Time: 130111 08:51:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.032000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3286; +# Time: 130111 08:51:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.825000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3448; +# Time: 130111 08:51:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.306000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8118); +# Time: 130111 08:51:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.754000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1831; +# Time: 130111 08:51:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.078000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3460; +# Time: 130111 08:51:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.230000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9862; +# Time: 130111 08:51:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.379000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4973); +# Time: 130111 08:51:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.811000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7200; +# Time: 130111 08:51:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.940000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8564; +# Time: 130111 08:52:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.080000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3483; +# Time: 130111 08:52:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.269000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6060; +# Time: 130111 08:52:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.939000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3742; +# Time: 130111 08:52:04 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.859000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4839); +# Time: 130111 08:52:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.611000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 4399; +# Time: 130111 08:52:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.184000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3921; +# Time: 130111 08:52:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.761000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9198; +# Time: 130111 08:52:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.776000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9857; +# Time: 130111 08:52:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.115000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5710; +# Time: 130111 08:52:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.322000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 408; +# Time: 130111 08:52:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.223000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5261; +# Time: 130111 08:52:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.461000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6714); +# Time: 130111 08:52:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.375000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3460); +# Time: 130111 08:52:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.535000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1315; +# Time: 130111 08:52:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.397000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7405; +# Time: 130111 08:52:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.973000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2076; +# Time: 130111 08:52:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.681000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6232); +# Time: 130111 08:52:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.040000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6781); +# Time: 130111 08:52:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.421000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9675; +# Time: 130111 08:52:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.937000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1127); +# Time: 130111 08:52:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.769000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9067; +# Time: 130111 08:52:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.313000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8667); +# Time: 130111 08:52:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.570000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9580); +# Time: 130111 08:52:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.201000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2943; +# Time: 130111 08:52:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.734000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8585; +# Time: 130111 08:52:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.550000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 842); +# Time: 130111 08:52:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.616000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7483; +# Time: 130111 08:52:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.418000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2024; +# Time: 130111 08:52:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.239000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7311; +# Time: 130111 08:52:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.735000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1599; +# Time: 130111 08:52:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.544000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8280; +# Time: 130111 08:52:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.759000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7261; +# Time: 130111 08:52:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.424000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9806; +# Time: 130111 08:53:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.857000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5734; +# Time: 130111 08:53:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.792000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3501; +# Time: 130111 08:53:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.869000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7799; +# Time: 130111 08:53:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.773000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8997; +# Time: 130111 08:53:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.301000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4303); +# Time: 130111 08:53:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.155000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2896; +# Time: 130111 08:53:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.481000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8052); +# Time: 130111 08:53:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.289000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2450); +# Time: 130111 08:53:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.167000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2017; +# Time: 130111 08:53:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.543000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6993; +# Time: 130111 08:53:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.786000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2772; +# Time: 130111 08:53:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.670000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4511); +# Time: 130111 08:53:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.764000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4183; +# Time: 130111 08:53:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.430000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5671); +# Time: 130111 08:53:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.421000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2650; +# Time: 130111 08:53:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.442000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8820); +# Time: 130111 08:53:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.027000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4252; +# Time: 130111 08:53:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.670000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2978; +# Time: 130111 08:53:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.338000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4816; +# Time: 130111 08:53:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.409000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4982); +# Time: 130111 08:53:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.579000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7571; +# Time: 130111 08:53:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.602000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7384; +# Time: 130111 08:53:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.660000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4142); +# Time: 130111 08:54:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.077000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9867; +# Time: 130111 08:54:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.097000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7871); +# Time: 130111 08:54:03 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.724000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7501; +# Time: 130111 08:54:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.474000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3777; +# Time: 130111 08:54:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.645000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1250; +# Time: 130111 08:54:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.511000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7586); +# Time: 130111 08:54:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.494000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8208; +# Time: 130111 08:54:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.351000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8482; +# Time: 130111 08:54:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.057000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5520; +# Time: 130111 08:54:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.312000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8289); +# Time: 130111 08:54:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.422000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3911; +# Time: 130111 08:54:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.109000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3693; +# Time: 130111 08:54:22 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.668000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 522; +# Time: 130111 08:54:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.704000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7596); +# Time: 130111 08:54:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.955000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8135); +# Time: 130111 08:54:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.623000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7498; +# Time: 130111 08:54:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.320000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1340; +# Time: 130111 08:54:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.055000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6293); +# Time: 130111 08:54:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.349000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9435; +# Time: 130111 08:54:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.315000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 723; +# Time: 130111 08:54:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.991000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7034); +# Time: 130111 08:54:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.445000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1752; +# Time: 130111 08:54:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.970000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 440; +# Time: 130111 08:54:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.351000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4113; +# Time: 130111 08:54:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.273000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4500); +# Time: 130111 08:54:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.618000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3103; +# Time: 130111 08:54:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.288000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 567; +# Time: 130111 08:54:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.883000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7510; +# Time: 130111 08:54:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.882000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4970; +# Time: 130111 08:54:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.117000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 98; +# Time: 130111 08:54:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.578000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1656); +# Time: 130111 08:55:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.966000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6681; +# Time: 130111 08:55:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.482000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2199; +# Time: 130111 08:55:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.492000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6081); +# Time: 130111 08:55:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.277000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3398; +# Time: 130111 08:55:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.515000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9585; +# Time: 130111 08:55:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.799000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9516; +# Time: 130111 08:55:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.637000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2688; +# Time: 130111 08:55:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.054000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 8712; +# Time: 130111 08:55:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.109000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 576); +# Time: 130111 08:55:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.425000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9475); +# Time: 130111 08:55:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.626000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5124); +# Time: 130111 08:55:19 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.651000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5329; +# Time: 130111 08:55:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.421000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2987); +# Time: 130111 08:55:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.324000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2750; +# Time: 130111 08:55:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.888000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6869; +# Time: 130111 08:55:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.683000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4413); +# Time: 130111 08:55:38 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.337000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7664; +# Time: 130111 08:55:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.338000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2981; +# Time: 130111 08:55:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.759000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9349); +# Time: 130111 08:55:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.537000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9376; +# Time: 130111 08:55:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.611000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 985; +# Time: 130111 08:55:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.739000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5598; +# Time: 130111 08:55:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.910000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6567; +# Time: 130111 08:55:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.614000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1398; +# Time: 130111 08:55:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.184000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1968; +# Time: 130111 08:56:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.929000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3785; +# Time: 130111 08:56:02 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.477000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5251; +# Time: 130111 08:56:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.074000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9637); +# Time: 130111 08:56:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.642000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3016; +# Time: 130111 08:56:10 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.098000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8984; +# Time: 130111 08:56:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.797000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3908; +# Time: 130111 08:56:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.363000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6978; +# Time: 130111 08:56:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.146000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1565; +# Time: 130111 08:56:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.664000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8724; +# Time: 130111 08:56:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.385000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9496); +# Time: 130111 08:56:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.101000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2188); +# Time: 130111 08:56:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.397000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9647; +# Time: 130111 08:56:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.663000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9609; +# Time: 130111 08:56:32 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.920000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 8194; +# Time: 130111 08:56:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.959000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2945; +# Time: 130111 08:56:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.134000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3364; +# Time: 130111 08:56:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.176000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9181; +# Time: 130111 08:56:44 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.258000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 677; +# Time: 130111 08:56:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.479000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 119; +# Time: 130111 08:56:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.098000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5844; +# Time: 130111 08:56:48 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.900000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 6926; +# Time: 130111 08:56:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.673000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7571; +# Time: 130111 08:56:50 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.660000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9131; +# Time: 130111 08:56:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.824000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4568); +# Time: 130111 08:56:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.770000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2347; +# Time: 130111 08:56:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.340000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 7695); +# Time: 130111 08:56:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.877000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1391; +# Time: 130111 08:56:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.161000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 726; +# Time: 130111 08:56:59 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.933000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9362); +# Time: 130111 08:57:00 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.234000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4546; +# Time: 130111 08:57:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.658000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 2496; +# Time: 130111 08:57:06 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.358000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 1979; +# Time: 130111 08:57:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.956000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4529; +# Time: 130111 08:57:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.487000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 33); +# Time: 130111 08:57:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.873000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3765; +# Time: 130111 08:57:16 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.108000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8494); +# Time: 130111 08:57:17 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.861000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2429; +# Time: 130111 08:57:18 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.660000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2151); +# Time: 130111 08:57:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.635000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9422; +# Time: 130111 08:57:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.871000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5194; +# Time: 130111 08:57:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.183000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 2725); +# Time: 130111 08:57:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.638000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9635); +# Time: 130111 08:57:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.742000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4531; +# Time: 130111 08:57:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.106000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2407; +# Time: 130111 08:57:31 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.056000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4744; +# Time: 130111 08:57:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.733000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9405; +# Time: 130111 08:57:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.840000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3519; +# Time: 130111 08:57:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.003000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7767; +# Time: 130111 08:57:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.322000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7332; +# Time: 130111 08:57:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.115000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2097; +# Time: 130111 08:57:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.215000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 1853; +# Time: 130111 08:57:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.401000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 159; +# Time: 130111 08:57:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.497000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5273; +# Time: 130111 08:57:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.248000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3929; +# Time: 130111 08:57:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.040000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2735; +# Time: 130111 08:57:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.535000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3074; +# Time: 130111 08:57:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.260000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2753; +# Time: 130111 08:57:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.908000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7531; +# Time: 130111 08:57:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.430000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4562); +# Time: 130111 08:57:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.429000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 9605; +# Time: 130111 08:58:01 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.626000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5943; +# Time: 130111 08:58:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.517000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8384; +# Time: 130111 08:58:08 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.549000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 653; +# Time: 130111 08:58:09 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.359000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 5375); +# Time: 130111 08:58:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.661000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3689; +# Time: 130111 08:58:12 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.506000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 186; +# Time: 130111 08:58:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.024000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 428; +# Time: 130111 08:58:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.814000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3258); +# Time: 130111 08:58:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.667000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6459; +# Time: 130111 08:58:24 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.915000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 7749; +# Time: 130111 08:58:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.027000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7379; +# Time: 130111 08:58:29 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.969000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2904; +# Time: 130111 08:58:33 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.809000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9445); +# Time: 130111 08:58:34 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.339000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3414; +# Time: 130111 08:58:36 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.356000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 6960); +# Time: 130111 08:58:39 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.526000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 5882; +# Time: 130111 08:58:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.269000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 8696; +# Time: 130111 08:58:42 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.518000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6766; +# Time: 130111 08:58:43 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.872000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9768; +# Time: 130111 08:58:47 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.242000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6649; +# Time: 130111 08:58:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.736000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 6870; +# Time: 130111 08:58:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.825000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 1563); +# Time: 130111 08:58:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.792000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7052; +# Time: 130111 08:58:55 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.116000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8859); +# Time: 130111 08:58:56 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.795000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 247; +# Time: 130111 08:58:58 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.357000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 9726; +# Time: 130111 08:59:05 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.387000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 5299; +# Time: 130111 08:59:07 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.771000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 8451); +# Time: 130111 08:59:11 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.534000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4491); +# Time: 130111 08:59:13 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.518000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 3290; +# Time: 130111 08:59:14 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.858000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 9182); +# Time: 130111 08:59:15 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.981000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1537; +# Time: 130111 08:59:20 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.980000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 3422; +# Time: 130111 08:59:21 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.077000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6328; +# Time: 130111 08:59:23 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.376000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 6485; +# Time: 130111 08:59:25 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.513000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 400; +# Time: 130111 08:59:26 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.497000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5892; +# Time: 130111 08:59:27 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.989000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9476; +# Time: 130111 08:59:28 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.622000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 4262; +# Time: 130111 08:59:30 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.518000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3941); +# Time: 130111 08:59:35 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.878000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +delete from t where id < 7195; +# Time: 130111 08:59:37 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.874000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 7789; +# Time: 130111 08:59:40 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.797000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 5919; +# Time: 130111 08:59:41 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.072000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 9098; +# Time: 130111 08:59:45 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.844000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2490; +# Time: 130111 08:59:46 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.216000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 4790; +# Time: 130111 08:59:49 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.486000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2708; +# Time: 130111 08:59:51 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.287000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 1261; +# Time: 130111 08:59:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.709000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 3098); +# Time: 130111 08:59:53 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.566000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 3796; +# Time: 130111 08:59:54 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.827000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select c from t1 where id = 2907; +# Time: 130111 08:59:57 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.507000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +select * from t2 where id < 2542; +# Time: 130111 08:59:52 +# User@Host: [user] @ [] +# Thread_id: 1 Schema: db1 +# Query_time: 0.709000 Lock_time: 0.000000 Rows_sent: 0 Rows_examined: 0 +insert into t (id, val) values (null, 4000); diff --git a/t/pt-agent/basics.t b/t/pt-agent/basics.t new file mode 100644 index 00000000..9c60ea7e --- /dev/null +++ b/t/pt-agent/basics.t @@ -0,0 +1,101 @@ +#!/usr/bin/env perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; + +use File::Temp qw(tempdir); + +use Percona::Test; +use Sandbox; +use Percona::Test::Mock::UserAgent; +require "$trunk/bin/pt-agent"; + +my $dp = new DSNParser(opts=>$dsn_opts); +my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); +my $dbh = $sb->get_dbh_for('master'); +my $dsn = $sb->dsn_for('master'); +my $o = new OptionParser(); +$o->get_specs("$trunk/bin/pt-agent"); +$o->get_opts(); +my $cxn = Cxn->new( + dsn_string => $dsn, + OptionParser => $o, + DSNParser => $dp, +); + +Percona::Toolkit->import(qw(Dumper)); +Percona::WebAPI::Representation->import(qw(as_hashref)); + +# Running the agent is going to cause it to schedule the services, +# i.e. write a real crontab. The test box/user shouldn't have a +# crontab, so we'll warn and clobber it if there is one. +my $crontab = `crontab -l 2>/dev/null`; +if ( $crontab ) { + warn "Removing crontab: $crontab\n"; + `crontab -r`; +} + +my $tmp_lib = "/tmp/pt-agent"; +my $tmp_log = "/tmp/pt-agent.log"; +my $tmp_pid = "/tmp/pt-agent.pid"; + +diag(`rm -rf $tmp_lib`) if -d $tmp_lib; +unlink $tmp_log if -f $tmp_log; +unlink $tmp_pid if -f $tmp_pid; + +my $config_file = pt_agent::get_config_file(); +unlink $config_file if -f $config_file; + +my $output; + +{ + no strict; + no warnings; + local *pt_agent::start_agent = sub { + print "start_agent\n"; + return { + agent => 0, + client => 0, + daemon => 0, + }; + }; + local *pt_agent::run_agent = sub { + print "run_agent\n"; + }; + + $output = output( + sub { + pt_agent::main( + qw(--api-key 123) + ); + }, + stderr => 1, + ); +} + +like( + $output, + qr/start_agent\nrun_agent\n/, + "Starts and runs without a config file" +); + +# ############################################################################# +# Done. +# ############################################################################# + +`crontab -r 2>/dev/null`; + +if ( -f $config_file ) { + unlink $config_file + or warn "Error removing $config_file: $OS_ERROR"; +} + +done_testing; diff --git a/t/pt-agent/get_services.t b/t/pt-agent/get_services.t new file mode 100644 index 00000000..30c6e8cb --- /dev/null +++ b/t/pt-agent/get_services.t @@ -0,0 +1,423 @@ +#!/usr/bin/env perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; + +use JSON; +use File::Temp qw(tempdir); + +use Percona::Test; +use Percona::Test::Mock::UserAgent; +use Percona::Test::Mock::AgentLogger; +require "$trunk/bin/pt-agent"; + +Percona::Toolkit->import(qw(Dumper)); +Percona::WebAPI::Representation->import(qw(as_hashref)); + +my @log; +my $logger = Percona::Test::Mock::AgentLogger->new(log => \@log); +pt_agent::_logger($logger); + +# Fake --lib and --spool dirs. +my $tmpdir = tempdir("/tmp/pt-agent.$PID.XXXXXX", CLEANUP => 1); +output( sub { + pt_agent::init_lib_dir(lib_dir => $tmpdir); +}); + +# ############################################################################# +# Create mock client and Agent +# ############################################################################# + +# These aren't the real tests yet: to run_agent, first we need +# a client and Agent, so create mock ones. + +my $output; +my $json = JSON->new->canonical([1])->pretty; +$json->allow_blessed([]); +$json->convert_blessed([]); + +my $ua = Percona::Test::Mock::UserAgent->new( + encode => sub { my $c = shift; return $json->encode($c || {}) }, +); + +my $client = eval { + Percona::WebAPI::Client->new( + api_key => '123', + ua => $ua, + ); +}; + +is( + $EVAL_ERROR, + '', + 'Create mock client' +) or die; + +my $agent = Percona::WebAPI::Resource::Agent->new( + uuid => '123', + hostname => 'host', + username => 'user', + links => { + self => '/agents/123', + config => '/agents/123/config', + }, +); + +my @cmds; +my $exec_cmd = sub { + my $cmd = shift; + push @cmds, $cmd; + return 0; +}; + +# ############################################################################# +# Test get_services() +# ############################################################################# + +# query-history + +my $run0 = Percona::WebAPI::Resource::Task->new( + name => 'query-history', + number => '0', + program => 'pt-query-digest --output json', + output => 'spool', +); + +my $qh = Percona::WebAPI::Resource::Service->new( + ts => '100', + name => 'query-history', + run_schedule => '1 * * * *', + spool_schedule => '2 * * * *', + tasks => [ $run0 ], + links => { + self => '/query-history', + data => '/query-history/data', + }, +); + +my $run1 = Percona::WebAPI::Resource::Task->new( + name => 'start-query-history', + number => '0', + program => 'echo "start-qh"', + output => 'spool', +); + +my $start_qh = Percona::WebAPI::Resource::Service->new( + ts => '100', + name => 'start-query-history', + meta => 1, + tasks => [ $run1 ], + links => { + self => '/query-history', + data => '/query-history/data', + }, +); + +$ua->{responses}->{get} = [ + { + headers => { 'X-Percona-Resource-Type' => 'Service' }, + content => [ + as_hashref($qh, with_links => 1), + as_hashref($start_qh, with_links => 1), + ], + }, +]; + +my $services = {}; +my $success = 0; + +$output = output( + sub { + ($services, $success) = pt_agent::get_services( + # Required args + link => '/agents/123/services', + agent => $agent, + client => $client, + lib_dir => $tmpdir, + services => $services, + # Optional args, for testing + json => $json, + bin_dir => "$trunk/bin/", + exec_cmd => $exec_cmd, + ); + }, + stderr => 1, +); + +is( + $success, + 1, + "Success" +); + +is( + ref $services, + 'HASH', + "Return services as hashref" +) or diag(Dumper($services)); + +is( + scalar keys %$services, + 2, + 'Only 2 services' +) or diag(Dumper($services)); + +ok( + exists $services->{'query-history'}, + "services hashref keyed on service name" +) or diag(Dumper($services)); + +isa_ok( + ref $services->{'query-history'}, + 'Percona::WebAPI::Resource::Service', + 'services->{query-history}' +); + +my $crontab = -f "$tmpdir/crontab" ? slurp_file("$tmpdir/crontab") : ''; +is( + $crontab, + "1 * * * * $trunk/bin/pt-agent --run-service query-history +2 * * * * $trunk/bin/pt-agent --send-data query-history +", + "crontab file" +) or diag($output, `ls -l $tmpdir/*`, Dumper(\@log)); + +is_deeply( + \@cmds, + [ + "$trunk/bin/pt-agent --run-service start-query-history >> $tmpdir/logs/start-stop.log 2>&1", + "crontab $tmpdir/crontab > $tmpdir/crontab.err 2>&1", + ], + "Ran start-service and crontab" +) or diag(Dumper(\@cmds), Dumper(\@log)); + +ok( + -f "$tmpdir/services/query-history", + "Wrote --lib/services/query-history" +); + +# ############################################################################# +# A more realistic transaction +# ############################################################################# + +# services/query-history should exist from the previous tests. For these +# tests, get_services() should update the file, so we empty it and check +# that it's re-created, i.e. updated. +diag(`echo -n > $tmpdir/services/query-history`); +is( + -s "$tmpdir/services/query-history", + 0, + "Start: empty --lib/services/query-history" +); + +# start-query-history + +my $task1 = Percona::WebAPI::Resource::Task->new( + name => 'disable-slow-query-log', + number => '0', + query => "SET GLOBAL slow_query_log=0", +); + +my $task2 = Percona::WebAPI::Resource::Task->new( + name => 'set-slow-query-log-file', + number => '1', + query => "SET GLOBAL slow_query_log_file='/tmp/slow.log'", +); + +my $task3 = Percona::WebAPI::Resource::Task->new( + name => 'set-long-query-time', + number => '2', + query => "SET GLOBAL long_query_time=0.01", +); + +my $task4 = Percona::WebAPI::Resource::Task->new( + name => 'enable-slow-query-log', + number => '3', + query => "SET GLOBAL slow_query_log=1", +); + +$start_qh = Percona::WebAPI::Resource::Service->new( + ts => '100', + name => 'start-query-history', + tasks => [ $task1, $task2, $task3, $task4 ], + meta => 1, + links => { + self => '/query-history', + data => '/query-history/data', + }, +); + +# stop-query-history + +my $task5 = Percona::WebAPI::Resource::Task->new( + name => 'disable-slow-query-log', + number => '0', + query => "SET GLOBAL slow_query_log=0", +); + +my $stop_qh = Percona::WebAPI::Resource::Service->new( + ts => '100', + name => 'stop-query-history', + tasks => [ $task5 ], + meta => 1, + links => { + self => '/query-history', + data => '/query-history/data', + }, +); + +# We'll use query-history from the previous tests. + +$ua->{responses}->{get} = [ + { + headers => { 'X-Percona-Resource-Type' => 'Service' }, + content => [ + as_hashref($start_qh, with_links => 1), + as_hashref($stop_qh, with_links => 1), + as_hashref($qh, with_links => 1), # from previous tests + ], + }, +]; + +@log = (); +@cmds = (); +$services = {}; +$success = 0; + +$output = output( + sub { + ($services, $success) = pt_agent::get_services( + # Required args + link => '/agents/123/services', + agent => $agent, + client => $client, + lib_dir => $tmpdir, + services => $services, + # Optional args, for testing + json => $json, + bin_dir => "$trunk/bin/", + exec_cmd => $exec_cmd, + ); + }, + stderr => 1, +); + +is_deeply( + \@cmds, + [ + "$trunk/bin/pt-agent --run-service start-query-history >> $tmpdir/logs/start-stop.log 2>&1", + "crontab $tmpdir/crontab > $tmpdir/crontab.err 2>&1", + ], + "Start: ran start-query-history" +) or diag(Dumper(\@cmds), $output); + +ok( + -f "$tmpdir/services/start-query-history", + "Start: added --lib/services/start-query-history" +) or diag($output); + +ok( + -f "$tmpdir/services/stop-query-history", + "Start: added --lib/services/stop-query-history" +) or diag($output); + +my $contents = slurp_file("$tmpdir/services/query-history"); +like( + $contents, + qr/query-history/, + "Start: updated --lib/services/query-history" +) or diag($output); + +$crontab = slurp_file("$tmpdir/crontab"); +is( + $crontab, + "1 * * * * $trunk/bin/pt-agent --run-service query-history +2 * * * * $trunk/bin/pt-agent --send-data query-history +", + "Start: only scheduled query-history" +) or diag($output); + +# ############################################################################# +# Update and restart a service +# ############################################################################# + +# pt-agent should remove a service's --lib/meta/ files when restarting, +# so create one and check that it's removed. +diag(`touch $tmpdir/meta/query-history.foo`); +ok( + -f "$tmpdir/meta/query-history.foo", + "Restart: meta file exists" +); + +$qh = Percona::WebAPI::Resource::Service->new( + ts => '200', # was 100 + name => 'query-history', + run_schedule => '1 * * * *', + spool_schedule => '2 * * * *', + tasks => [ $run0 ], + links => { + self => '/query-history', + data => '/query-history/data', + }, +); + +$ua->{responses}->{get} = [ + { + headers => { 'X-Percona-Resource-Type' => 'Service' }, + content => [ + as_hashref($start_qh, with_links => 1), # has not changed + as_hashref($stop_qh, with_links => 1), # has not changed + as_hashref($qh, with_links => 1), + ], + }, +]; + +@log = (); +@cmds = (); +$success = 0; + +$output = output( + sub { + ($services, $success) = pt_agent::get_services( + # Required args + link => '/agents/123/services', + agent => $agent, + client => $client, + lib_dir => $tmpdir, + services => $services, # retval from previous call + # Optional args, for testing + json => $json, + bin_dir => "$trunk/bin/", + exec_cmd => $exec_cmd, + ); + }, + stderr => 1, +); + +is_deeply( + \@cmds, + [ + "$trunk/bin/pt-agent --run-service stop-query-history >> $tmpdir/logs/start-stop.log 2>&1", + "$trunk/bin/pt-agent --run-service start-query-history >> $tmpdir/logs/start-stop.log 2>&1", + "crontab $tmpdir/crontab > $tmpdir/crontab.err 2>&1", + ], + "Restart: ran stop-query-history then start-query-history" +) or diag(Dumper(\@cmds), $output); + +ok( + !-f "$tmpdir/meta/query-history.foo", + "Restart: meta file removed" +) or diag($output); + +# ############################################################################# +# Done. +# ############################################################################# +done_testing; diff --git a/t/pt-agent/init_agent.t b/t/pt-agent/init_agent.t new file mode 100644 index 00000000..2ff9ba3a --- /dev/null +++ b/t/pt-agent/init_agent.t @@ -0,0 +1,280 @@ +#!/usr/bin/env perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; +use JSON; +use File::Temp qw(tempdir); + +use Percona::Test; +use Percona::Test::Mock::UserAgent; +use Percona::Test::Mock::AgentLogger; +require "$trunk/bin/pt-agent"; + +Percona::Toolkit->import(qw(Dumper)); +Percona::WebAPI::Representation->import(qw(as_hashref)); + +my $tmpdir = tempdir("/tmp/pt-agent.$PID.XXXXXX", CLEANUP => 1); + +my $json = JSON->new->canonical([1])->pretty; +$json->allow_blessed([]); +$json->convert_blessed([]); + +my @log; +my $logger = Percona::Test::Mock::AgentLogger->new(log => \@log); +pt_agent::_logger($logger); + +my $ua = Percona::Test::Mock::UserAgent->new( + encode => sub { my $c = shift; return $json->encode($c || {}) }, +); + +my $client = eval { + Percona::WebAPI::Client->new( + api_key => '123', + ua => $ua, + ); +}; + +is( + $EVAL_ERROR, + '', + 'Create Client with mock user agent' +) or die; + +my @ok; +my $oktorun = sub { + return shift @ok; +}; + +my @wait; +my $interval = sub { + my $t = shift; + push @wait, $t; +}; + +# ############################################################################# +# Init a new agent, i.e. create it. +# ############################################################################# + +my $post_agent = Percona::WebAPI::Resource::Agent->new( + uuid => '123', + hostname => 'host1', + username => 'name1', + versions => { + }, + links => { + self => '/agents/123', + config => '/agents/123/config', + }, +); + +my $return_agent = Percona::WebAPI::Resource::Agent->new( + uuid => '123', + hostname => 'host2', + username => 'name2', + versions => { + }, + links => { + self => '/agents/123', + config => '/agents/123/config', + }, +); + +$ua->{responses}->{post} = [ + { + headers => { 'Location' => '/agents/123' }, + }, +]; + +$ua->{responses}->{get} = [ + { + headers => { 'X-Percona-Resource-Type' => 'Agent' }, + content => as_hashref($return_agent, with_links =>1 ), + }, +]; + +my $got_agent; +my $output = output( + sub { + ($got_agent) = pt_agent::init_agent( + agent => $post_agent, + action => 'post', + link => "/agents", + client => $client, + interval => $interval, + ); + }, + stderr => 1, +); + +is( + $got_agent->hostname, + 'host2', + 'Got and returned Agent' +) or diag($output, Dumper(as_hashref($got_agent, with_links => 1))); + +is( + scalar @wait, + 0, + "Client did not wait (new Agent)" +) or diag($output); + +# ############################################################################# +# Repeat this test but this time fake an error, so the tool isn't able +# to create the Agent first time, so it should wait (call interval), +# and try again. +# ############################################################################# + +$return_agent->{id} = '456'; +$return_agent->{links} = { + self => '/agents/456', + config => '/agents/456/config', +}; + +$ua->{responses}->{post} = [ + { # 1, the fake error + code => 500, + }, + # 2, code should call interval + { # 3, code should try again, then receive this + code => 200, + headers => { 'Location' => '/agents/456' }, + }, +]; + # 4, code will GET the new Agent +$ua->{responses}->{get} = [ + { + headers => { 'X-Percona-Resource-Type' => 'Agent' }, + content => $return_agent, + }, +]; + +@ok = qw(1 1 0); +@wait = (); +$ua->{requests} = []; + +$output = output( + sub { + $got_agent = pt_agent::init_agent( + agent => $post_agent, + action => 'post', + link => "/agents", + client => $client, + interval => $interval, + oktorun => $oktorun, + ); + }, + stderr => 1, +); + +is( + $got_agent->hostname, + 'host2', + 'Got and returned Agent after error' +) or diag($output, Dumper(as_hashref($got_agent, with_links => 1))); + +is( + scalar @wait, + 1, + "Client waited after error" +); + +is_deeply( + $ua->{requests}, + [ + 'POST /agents', # first attempt, 500 error + 'POST /agents', # second attemp, 200 OK + 'GET /agents/456', # GET new Agent + ], + "POST POST GET new Agent after error" +) or diag(Dumper($ua->{requests})); + +TODO: { + local $TODO = "False-positive"; + like( + $output, + qr{WARNING Failed to POST /agents}, + "POST /agents failure logged after error" + ) or diag(Dumper($ua->{requests})); +} + +# ############################################################################# +# Init an existing agent, i.e. update it. +# ############################################################################# + +my $put_agent = Percona::WebAPI::Resource::Agent->new( + uuid => '123', + hostname => 'host3', + username => 'name3', + versions => { + }, + links => { + self => '/agents/123', + config => '/agents/123/config', + }, +); + +$ua->{responses}->{put} = [ + { + code => 200, + headers => { + Location => '/agents/123', + }, + }, +]; +$ua->{responses}->{get} = [ + { + code => 200, + headers => { 'X-Percona-Resource-Type' => 'Agent' }, + content => $return_agent, + } +]; + +@wait = (); +$ua->{requests} = []; + +$output = output( + sub { + $got_agent = pt_agent::init_agent( + agent => $put_agent, + action => 'put', + link => "/agents/123", + client => $client, + interval => $interval, + ); + }, + stderr => 1, +); + +is( + $got_agent->hostname, + 'host2', + 'PUT Agent' +) or diag($output, Dumper(as_hashref($got_agent, with_links => 1))); + +is( + scalar @wait, + 0, + "Client did not wait (saved Agent)" +); + +is_deeply( + $ua->{requests}, + [ + 'PUT /agents/123', + 'GET /agents/123', + ], + "PUT then GET Agent" +) or diag(Dumper($ua->{requests})); + +# ############################################################################# +# Done. +# ############################################################################# +done_testing; diff --git a/t/pt-agent/make_new_crontab.t b/t/pt-agent/make_new_crontab.t new file mode 100644 index 00000000..05c90b46 --- /dev/null +++ b/t/pt-agent/make_new_crontab.t @@ -0,0 +1,151 @@ +#!/usr/bin/env perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; +use JSON; +use File::Temp qw(tempfile); + +use Percona::Test; +require "$trunk/bin/pt-agent"; + +Percona::Toolkit->import(qw(have_required_args Dumper)); + +my $sample = "t/pt-agent/samples"; + +sub test_make_new_crontab { + my (%args) = @_; + have_required_args(\%args, qw( + file + services + )) or die; + my $file = $args{file}; + my $services = $args{services}; + + my $crontab_list = slurp_file("$trunk/$sample/$file.in"); + + my $new_crontab = pt_agent::make_new_crontab( + services => $services, + crontab_list => $crontab_list, + bin_dir => '', + ); + + ok( + no_diff( + $new_crontab, + "$sample/$file.out", + cmd_output => 1, + ), + $args{name} || $file, + ) or diag($new_crontab); +} + +my $run0 = Percona::WebAPI::Resource::Task->new( + name => 'query-history', + number => '0', + program => 'pt-query-digest', + options => '--output json', + output => 'spool', +); + +my $svc0 = Percona::WebAPI::Resource::Service->new( + ts => '100', + name => 'query-history', + run_schedule => '* 8 * * 1,2,3,4,5', + spool_schedule => '* 9 * * 1,2,3,4,5', + tasks => [ $run0 ], +); + +# Empty crontab, add the service. +test_make_new_crontab( + file => "crontab001", + services => [ $svc0 ], +); + +# Crontab has another line, add the service to it. +test_make_new_crontab( + file => "crontab002", + services => [ $svc0 ], +); + +# Crontab has another line and an old service, remove the old service +# and add the current service. +test_make_new_crontab( + file => "crontab003", + services => [ $svc0 ], +); + +# Crontab has old service, remove it and add only new service. +test_make_new_crontab( + file => "crontab004", + services => [ $svc0 ], +); + +# ############################################################################# +# Use real crontab. +# ############################################################################# + +# The previous tests pass in a crontab file to make testing easier. +# Now test that make_new_crontab() will run `crontab -l' if not given +# input. To test this, we add a fake line to our crontab. If +# make_new_crontab() really runs `crontab -l', then this fake line +# will be in the new crontab it returns. + +my $crontab = `crontab -l 2>/dev/null`; +SKIP: { + skip 'Crontab is not empty', 3 if $crontab; + + # On most systems[1], crontab lines must end with a newline, + # else an error like this happens: + # "/tmp/new_crontab_file":1: premature EOF + # errors in crontab file, can't install. + # [1] Ubuntu 10 and Mac OS X work without the newline. + my ($fh, $file) = tempfile(); + print {$fh} "* 0 * * * date > /dev/null\n"; + close $fh or warn "Cannot close $file: $OS_ERROR"; + my $output = `crontab $file 2>&1`; + + $crontab = `crontab -l 2>&1`; + + is( + $crontab, + "* 0 * * * date > /dev/null\n", + "Set other crontab line" + ) or diag($output); + + unlink $file or warn "Cannot remove $file: $OS_ERROR"; + + my $new_crontab = pt_agent::make_new_crontab( + services => [ $svc0 ], + bin_dir => '', + ); + + is( + $new_crontab, + "* 0 * * * date > /dev/null +* 8 * * 1,2,3,4,5 pt-agent --run-service query-history +* 9 * * 1,2,3,4,5 pt-agent --send-data query-history +", + "Runs crontab -l by default" + ); + + system("crontab -r 2>/dev/null"); + $crontab = `crontab -l 2>/dev/null`; + is( + $crontab, + "", + "Removed crontab" + ); +}; + +# ############################################################################# +# Done. +# ############################################################################# +done_testing; diff --git a/t/pt-agent/replace_special_vars.t b/t/pt-agent/replace_special_vars.t new file mode 100644 index 00000000..19811a6f --- /dev/null +++ b/t/pt-agent/replace_special_vars.t @@ -0,0 +1,73 @@ +#!/usr/bin/env perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; +use JSON; +use File::Temp qw(tempfile); + +use Percona::Test; +use Percona::Test::Mock::AgentLogger; +require "$trunk/bin/pt-agent"; + +Percona::Toolkit->import(qw(have_required_args Dumper)); + +my @log; +my $logger = Percona::Test::Mock::AgentLogger->new(log => \@log); +pt_agent::_logger($logger); + +my @output_files = (); +my $store = {}; + +sub test_replace { + my (%args) = @_; + have_required_args(\%args, qw( + cmd + expect + )) or die; + my $cmd = $args{cmd}; + my $expect = $args{expect}; + + my $new_cmd = pt_agent::replace_special_vars( + cmd => $cmd, + output_files => \@output_files, + service => 'service-name', + lib_dir => '/var/lib/pt-agent', + meta_dir => '/var/lib/pt-agent/meta', + stage_dir => '/var/spool/.tmp', + spool_dir => '/var/spool', + bin_dir => $trunk, + ts => '123', + store => $store, + ); + + is( + $new_cmd, + $expect, + $cmd, + ); +}; + +@output_files = qw(zero one two); +test_replace( + cmd => "pt-query-digest __RUN_0_OUTPUT__", + expect => "pt-query-digest zero", +); + +$store->{slow_query_log_file} = 'slow.log'; +test_replace( + cmd => "echo '__STORE_slow_query_log_file__' > /var/spool/pt-agent/.tmp/1371269644.rotate-slow-query-log-all-5.1.slow_query_log_file", + expect => "echo 'slow.log' > /var/spool/pt-agent/.tmp/1371269644.rotate-slow-query-log-all-5.1.slow_query_log_file", +); + +# ############################################################################# +# Done. +# ############################################################################# +done_testing; diff --git a/t/pt-agent/run_agent.t b/t/pt-agent/run_agent.t new file mode 100644 index 00000000..60097b09 --- /dev/null +++ b/t/pt-agent/run_agent.t @@ -0,0 +1,527 @@ +#!/usr/bin/env perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; + +plan skip_all => "Need to make start-service testable"; + +use JSON; +use File::Temp qw(tempdir); + +use Percona::Test; +use Sandbox; +use Percona::Test::Mock::UserAgent; +use Percona::Test::Mock::AgentLogger; +require "$trunk/bin/pt-agent"; + +my $dp = new DSNParser(opts=>$dsn_opts); +my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); +my $dbh = $sb->get_dbh_for('master'); +my $dsn = $sb->dsn_for('master'); +my $o = new OptionParser(); +$o->get_specs("$trunk/bin/pt-agent"); +$o->get_opts(); +my $cxn = Cxn->new( + dsn_string => $dsn, + OptionParser => $o, + DSNParser => $dp, +); + +Percona::Toolkit->import(qw(Dumper)); +Percona::WebAPI::Representation->import(qw(as_hashref)); + +# Running the agent is going to cause it to schedule the services, +# i.e. write a real crontab. The test box/user shouldn't have a +# crontab, so we'll warn and clobber it if there is one. +my $crontab = `crontab -l 2>/dev/null`; +if ( $crontab ) { + warn "Removing crontab: $crontab\n"; + `crontab -r`; +} + +# Fake --lib and --spool dirs. +my $tmpdir = tempdir("/tmp/pt-agent.$PID.XXXXXX"); #, CLEANUP => 1); +mkdir "$tmpdir/spool" or die "Error making $tmpdir/spool: $OS_ERROR"; + +my @log; +my $logger = Percona::Test::Mock::AgentLogger->new(log => \@log); +pt_agent::_logger($logger); + +# ############################################################################# +# Create mock client and Agent +# ############################################################################# + +# These aren't the real tests yet: to run_agent, first we need +# a client and Agent, so create mock ones. + +my $output; +my $json = JSON->new->canonical([1])->pretty; +$json->allow_blessed([]); +$json->convert_blessed([]); + +my $ua = Percona::Test::Mock::UserAgent->new( + encode => sub { my $c = shift; return $json->encode($c || {}) }, +); + +my $client = eval { + Percona::WebAPI::Client->new( + api_key => '123', + ua => $ua, + ); +}; + +is( + $EVAL_ERROR, + '', + 'Create mock client' +) or die; + +my $agent = Percona::WebAPI::Resource::Agent->new( + uuid => '123', + hostname => 'host', + username => 'user', + links => { + self => '/agents/123', + config => '/agents/123/config', + }, +); + +my $daemon = Daemon->new( + daemonzie => 0, +); + +my @wait; +my $interval = sub { + my $t = shift; + push @wait, $t; + print "interval=" . (defined $t ? $t : 'undef') . "\n"; +}; + +# ############################################################################# +# Test run_agent +# ############################################################################# + +my $config = Percona::WebAPI::Resource::Config->new( + ts => 1363720060, + name => 'Default', + options => { + 'lib' => $tmpdir, # required + 'spool' => "$tmpdir/spool", # required + 'check-interval' => "11", + }, + links => { + self => '/agents/123/config', + services => '/agents/123/services', + }, +); + +my $run0 = Percona::WebAPI::Resource::Task->new( + name => 'query-history', + number => '0', + program => 'pt-query-digest', + options => '--output json', + output => 'spool', +); + +my $svc0 = Percona::WebAPI::Resource::Service->new( + ts => 100, + name => 'query-history', + run_schedule => '1 * * * *', + spool_schedule => '2 * * * *', + tasks => [ $run0 ], + links => { + self => '/query-history', + data => '/query-history/data', + }, +); + +my $run1 = Percona::WebAPI::Resource::Task->new( + name => 'start-query-history', + number => '0', + program => 'echo "start-qh"', +); + +my $start_qh = Percona::WebAPI::Resource::Service->new( + ts => '100', + name => 'start-query-history', + meta => 1, + tasks => [ $run1 ], + links => { + self => '/query-history', + data => '/query-history/data', + }, +); + +$ua->{responses}->{get} = [ + { + headers => { 'X-Percona-Resource-Type' => 'Config' }, + content => as_hashref($config, with_links => 1), + }, + { + headers => { 'X-Percona-Resource-Type' => 'Service' }, + content => [ + as_hashref($start_qh, with_links => 1), + as_hashref($svc0, with_links => 1), + ], + }, +]; + +my $safeguards = Safeguards->new( + disk_bytes_free => 1024, + disk_pct_free => 1, +); + +# The only thing pt-agent must have is the API key in the config file, +# everything else relies on defaults until the first Config is gotten +# from Percona. +my $config_file = pt_agent::get_config_file(); +unlink $config_file if -f $config_file; + +like( + $config_file, + qr/$ENV{LOGNAME}\/\.pt-agent.conf$/, + "Default config file is ~/.pt-agent.config" +); + +pt_agent::write_config( + config => $config +); + +diag(`echo 'api-key=123' >> $config_file`); + +is( + `cat $config_file`, + "check-interval=11\nlib=$tmpdir\nspool=$tmpdir/spool\napi-key=123\n", + "Write Config to config file" +); + +pt_agent::save_agent( + agent => $agent, + lib_dir => $tmpdir, +); + +my @ok_code = (); # callbacks +my @oktorun = ( + 1, # 1st main loop check + 0, # 2nd main loop check +); +my $oktorun = sub { + my $ok = shift @oktorun; + print "oktorun=" . (defined $ok ? $ok : 'undef') . "\n"; + my $code = shift @ok_code; + $code->() if $code; + return $ok +}; + +@wait = (); + +$output = output( + sub { + pt_agent::run_agent( + # Required args + agent => $agent, + client => $client, + daemon => $daemon, + interval => $interval, + lib_dir => $tmpdir, + safeguards => $safeguards, + Cxn => $cxn, + # Optional args, for testing + oktorun => $oktorun, + json => $json, + bin_dir => "$trunk/bin", + ); + }, + stderr => 1, +); + +is( + scalar @wait, + 1, + "Called interval once" +); + +is( + $wait[0], + 11, + "... used Config->options->check-interval" +); + +ok( + -f "$tmpdir/services/query-history", + "Created services/query-history" +) or diag($output); + +chomp(my $n_files = `ls -1 $tmpdir/services| wc -l | awk '{print \$1}'`); +is( + $n_files, + 2, + "... created services/query-history and services/start-query-history" +); + +ok( + no_diff( + "cat $tmpdir/services/query-history", + "t/pt-agent/samples/service001", + ), + "query-history service file" +); + +$crontab = `crontab -l 2>/dev/null`; +like( + $crontab, + qr/pt-agent --run-service query-history$/m, + "Scheduled --run-service with crontab" +) or diag(Dumper(\@log)); + +like( + $crontab, + qr/pt-agent --send-data query-history$/m, + "Scheduled --send-data with crontab" +) or diag(Dumper(\@log)); +exit; +# ############################################################################# +# Run run_agent again, like the agent had been stopped and restarted. +# ############################################################################# + +$ua->{responses}->{get} = [ + # First check, fail + { + code => 500, + }, + # interval + # 2nd check, init with latest Config and Services + { + headers => { 'X-Percona-Resource-Type' => 'Config' }, + content => as_hashref($config, with_links => 1), + }, + { + headers => { 'X-Percona-Resource-Type' => 'Service' }, + content => [ as_hashref($svc0, with_links => 1) ], + }, + # interval + # 3rd check, same Config and Services so nothing to do + { + headers => { 'X-Percona-Resource-Type' => 'Config' }, + content => as_hashref($config, with_links => 1), + }, + { + headers => { 'X-Percona-Resource-Type' => 'Service' }, + content => [ as_hashref($svc0, with_links => 1) ], + }, + # interval, oktorun=0 +]; + +@oktorun = ( + 1, # 1st main loop check + # First check, error 500 + 1, # 2nd main loop check + # Init with latest Config and Services + 1, # 3rd main loop check + # Same Config and services + 0, # 4th main loop check +); + +# Before the 3rd check, remove the config file (~/.pt-agent.conf) and +# query-history service file. When the tool re-GETs these, they'll be +# the same so it won't recreate them. A bug here will cause these files to +# exist again after running. +$ok_code[2] = sub { + unlink "$config_file"; + unlink "$tmpdir/services/query-history"; + Percona::Test::wait_until(sub { ! -f "$config_file" }); + Percona::Test::wait_until(sub { ! -f "$tmpdir/services/query-history" }); +}; + +@wait = (); + +$output = output( + sub { + pt_agent::run_agent( + # Required args + agent => $agent, + client => $client, + daemon => $daemon, + interval => $interval, + lib_dir => $tmpdir, + Cxn => $cxn, + # Optional args, for testing + oktorun => $oktorun, + json => $json, + ); + }, + stderr => 1, +); + +is_deeply( + \@wait, + [ 60, 11, 11 ], + "Got Config after error" +) or diag(Dumper(\@wait)); + +ok( + ! -f "$config_file", + "No Config diff, no config file change" +); + +ok( + ! -f "$tmpdir/services/query-history", + "No Service diff, no service file changes" +); + +my $new_crontab = `crontab -l 2>/dev/null`; +is( + $new_crontab, + $crontab, + "Crontab is the same" +); + +# ############################################################################# +# Test a run_once_on_start service +# ############################################################################# + +diag(`rm -f $tmpdir/* >/dev/null 2>&1`); +diag(`rm -rf $tmpdir/services/*`); +diag(`rm -rf $tmpdir/spool/*`); + +# When pt-agent manually runs --run-service test-run-at-start, it's going +# to need an API key because it doesn't call its own run_service(), it runs +# another instance of itself with system(). So put the fake API key in +# the default config file. +unlink $config_file if -f $config_file; +diag(`echo "api-key=123" > $config_file`); + +$config = Percona::WebAPI::Resource::Config->new( + ts => 1363720060, + name => 'Test run_once_on_start', + options => { + 'check-interval' => "15", + 'lib' => $tmpdir, + 'spool' => "$tmpdir/spool", + 'pid' => "$tmpdir/pid", + 'log' => "$tmpdir/log" + }, + links => { + self => '/agents/123/config', + services => '/agents/123/services', + }, +); + +$run0 = Percona::WebAPI::Resource::Task->new( + name => 'run-at-start', + number => '0', + program => 'date', + output => 'spool', +); + +$svc0 = Percona::WebAPI::Resource::Service->new( + ts => 100, + name => 'test-run-at-start', + run_schedule => '0 0 1 1 *', + run_once => 1, # here's the magic + tasks => [ $run0 ], + links => { + self => '/query-history', + data => '/query-history/data', + }, +); + +$ua->{responses}->{get} = [ + { + headers => { 'X-Percona-Resource-Type' => 'Config' }, + content => as_hashref($config, with_links => 1), + }, + { + headers => { 'X-Percona-Resource-Type' => 'Service' }, + content => [ as_hashref($svc0, with_links => 1) ], + }, + { + headers => { 'X-Percona-Resource-Type' => 'Config' }, + content => as_hashref($config, with_links => 1), + }, + { + headers => { 'X-Percona-Resource-Type' => 'Service' }, + content => [ as_hashref($svc0, with_links => 1) ], + }, +]; + +@wait = (); +@ok_code = (); # callbacks +@oktorun = ( + 1, # 1st main loop check + # Run once + 1, # 2nd main loop check + # Don't run it again + 0, # 3d main loop check +); + +$output = output( + sub { + pt_agent::run_agent( + # Required args + agent => $agent, + client => $client, + daemon => $daemon, + interval => $interval, + lib_dir => $tmpdir, + Cxn => $cxn, + # Optional args, for testing + oktorun => $oktorun, + json => $json, + bin_dir => "$trunk/bin/", + ); + }, + stderr => 1, +); + +Percona::Test::wait_for_files("$tmpdir/spool/test-run-at-start/test-run-at-start"); + +like( + $output, + qr/Starting test-run-at-start service/, + "Ran service on start" +); + +my @runs = $output =~ m/Starting test-run-at-start service/g; + +is( + scalar @runs, + 1, + "... only ran it once" +); + +chomp($output = `cat $tmpdir/spool/test-run-at-start/test-run-at-start 2>/dev/null`); +ok( + $output, + "... service ran at start" +) or diag($output); + +chomp($output = `crontab -l`); +unlike( + $output, + qr/--run-service test-run-at-start/, + "... service was not scheduled" +); + +# ############################################################################# +# Done. +# ############################################################################# + +# This shouldn't cause an error, but if it does, let it show up +# in the results as an error. +`crontab -r`; + +if ( -f $config_file ) { + unlink $config_file + or warn "Error removing $config_file: $OS_ERROR"; +} + +done_testing; diff --git a/t/pt-agent/run_service.t b/t/pt-agent/run_service.t new file mode 100644 index 00000000..15c58103 --- /dev/null +++ b/t/pt-agent/run_service.t @@ -0,0 +1,503 @@ +#!/usr/bin/env perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; +use JSON; +use File::Temp qw(tempdir); + +$ENV{PTTEST_PRETTY_JSON} = 1; + +use Percona::Test; +use Sandbox; +use Percona::Test::Mock::UserAgent; +use Percona::Test::Mock::AgentLogger; +require "$trunk/bin/pt-agent"; + +my $dp = new DSNParser(opts=>$dsn_opts); +my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); +my $dbh = $sb->get_dbh_for('master'); +my $dsn = $sb->dsn_for('master'); +my $o = new OptionParser(); +$o->get_specs("$trunk/bin/pt-agent"); +$o->get_opts(); + +Percona::Toolkit->import(qw(Dumper have_required_args)); +Percona::WebAPI::Representation->import(qw(as_hashref)); + +my @log; +my $logger = Percona::Test::Mock::AgentLogger->new(log => \@log); +pt_agent::_logger($logger); + +my $sample = "t/pt-agent/samples"; + +# Create fake spool and lib dirs. Service-related subs in pt-agent +# automatically add "/services" to the lib dir, but the spool dir is +# used as-is. +my $tmpdir = tempdir("/tmp/pt-agent.$PID.XXXXXX", CLEANUP => 1); +output( + sub { pt_agent::init_lib_dir(lib_dir => $tmpdir) } +); +my $spool_dir = "$tmpdir/spool"; + +sub write_svc_files { + my (%args) = @_; + have_required_args(\%args, qw( + services + )) or die; + my $services = $args{services}; + + my $output = output( + sub { + pt_agent::write_services( + sorted_services => { added => $services }, + lib_dir => $tmpdir, + ); + }, + stderr => 1, + die => 1, + ); +} + +# ############################################################################# +# Create mock client and Agent +# ############################################################################# + +my $json = JSON->new->canonical([1])->pretty; +$json->allow_blessed([]); +$json->convert_blessed([]); + +my $ua = Percona::Test::Mock::UserAgent->new( + encode => sub { my $c = shift; return $json->encode($c || {}) }, +); + +# Create cilent, get entry links +my $links = { + agents => '/agents', + config => '/agents/1/config', + services => '/agents/1/services', + 'query-history' => '/query-history', +}; + +$ua->{responses}->{get} = [ + { + content => $links, + }, +]; + +my $client = eval { + Percona::WebAPI::Client->new( + api_key => '123', + ua => $ua, + ); +}; +is( + $EVAL_ERROR, + '', + 'Create mock client' +) or die; + +my $agent = Percona::WebAPI::Resource::Agent->new( + uuid => '123', + hostname => 'prod1', + links => $links, +); + +is_deeply( + as_hashref($agent), + { + uuid => '123', + hostname => 'prod1', + }, + 'Create mock Agent' +) or die; + +# ############################################################################# +# Simple single task service using a program. +# ############################################################################# + +my $run0 = Percona::WebAPI::Resource::Task->new( + name => 'query-history', + number => '0', + program => "__BIN_DIR__/pt-query-digest --output json $trunk/t/lib/samples/slowlogs/slow008.txt", + output => 'spool', +); + +my $svc0 = Percona::WebAPI::Resource::Service->new( + ts => 100, + name => 'query-history', + run_schedule => '1 * * * *', + spool_schedule => '2 * * * *', + tasks => [ $run0 ], +); + +write_svc_files( + services => [ $svc0 ], +); + +$ua->{responses}->{get} = [ + { + headers => { 'X-Percona-Resource-Type' => 'Agent' }, + content => as_hashref($agent, with_links => 1), + }, +]; + +my $exit_status; +my $output = output( + sub { + $exit_status = pt_agent::run_service( + api_key => '123', + service => 'query-history', + lib_dir => $tmpdir, + spool_dir => $spool_dir, + Cxn => '', + # for testing: + client => $client, + agent => $agent, + entry_links => $links, + prefix => '1', + json => $json, + bin_dir => "$trunk/bin", + ); + }, +); + +ok( + no_diff( + "cat $tmpdir/spool/query-history/1.query-history.data", + "$sample/query-history/data001.json", + post_pipe => 'grep -v \'"name" :\'', + ), + "1 run: spool data (query-history/data001.json)" +) or diag( + `ls -l $tmpdir/spool/query-history/`, + `cat $tmpdir/logs/query-history.run`, + Dumper(\@log) +); + +chomp(my $n_files = `ls -1 $spool_dir/query-history/*.data | wc -l | awk '{print \$1}'`); +is( + $n_files, + 1, + "1 run: only wrote spool data" +) or diag(`ls -l $spool_dir`); + +is( + $exit_status, + 0, + "1 run: exit 0" +); + +ok( + -f "$tmpdir/spool/query-history/1.query-history.meta", + "1 run: .meta file exists" +); + +# ############################################################################# +# Service with two task, both using a program. +# ############################################################################# + +diag(`rm -rf $tmpdir/spool/* $tmpdir/services/*`); +@log = (); + +# The result is the same as the previous single-run test, but instead of +# having pqd read the slowlog directly, we have the first run cat the +# log to a tmp file which pt-agent should auto-create. Then pqd in run1 +# references this tmp file. + +$run0 = Percona::WebAPI::Resource::Task->new( + name => 'cat-slow-log', + number => '0', + program => "cat $trunk/t/lib/samples/slowlogs/slow008.txt", + output => 'tmp', +); + +my $run1 = Percona::WebAPI::Resource::Task->new( + name => 'query-history', + number => '1', + program => "__BIN_DIR__/pt-query-digest --output json __RUN_0_OUTPUT__", + output => 'spool', +); + +$svc0 = Percona::WebAPI::Resource::Service->new( + ts => 100, + name => 'query-history', + run_schedule => '3 * * * *', + spool_schedule => '4 * * * *', + tasks => [ $run0, $run1 ], +); + +write_svc_files( + services => [ $svc0 ], +); + +$ua->{responses}->{get} = [ + { + headers => { 'X-Percona-Resource-Type' => 'Agent' }, + content => as_hashref($agent, with_links => 1), + }, +]; + +$output = output( + sub { + $exit_status = pt_agent::run_service( + api_key => '123', + service => 'query-history', + spool_dir => $spool_dir, + lib_dir => $tmpdir, + Cxn => '', + # for testing: + client => $client, + agent => $agent, + entry_links => $links, + prefix => '2', + json => $json, + bin_dir => "$trunk/bin", + ); + }, +); + +ok( + no_diff( + "cat $tmpdir/spool/query-history/2.query-history.data", + "$sample/query-history/data001.json", + post_pipe => 'grep -v \'"name" :\'', + ), + "2 runs: spool data (query-history/data001.json)" +) or diag( + `ls -l $tmpdir/spool/query-history/`, + `cat $tmpdir/logs/query-history.run`, + Dumper(\@log) +); + +chomp($n_files = `ls -1 $spool_dir/query-history/*.data | wc -l | awk '{print \$1}'`); +is( + $n_files, + 1, + "2 runs: only wrote spool data" +) or diag(`ls -l $spool_dir`); + +is( + $exit_status, + 0, + "2 runs: exit 0" +); + +my @tmp_files = glob "$tmpdir/spool/.tmp/*"; +is_deeply( + \@tmp_files, + [], + "2 runs: temp file removed" +); + +# ############################################################################# +# More realistc: 3 services, multiple tasks, using programs and queries. +# ############################################################################# + +SKIP: { + skip 'Cannot connect to sandbox master', 5 unless $dbh; + skip 'No HOME environment variable', 5 unless $ENV{HOME}; + + diag(`rm -rf $tmpdir/spool/* $tmpdir/services/*`); + @log = (); + + my (undef, $old_genlog) = $dbh->selectrow_array("SHOW VARIABLES LIKE 'general_log_file'"); + + my $new_genlog = "$tmpdir/genlog"; + + # First service: set up + my $task00 = Percona::WebAPI::Resource::Task->new( + name => 'disable-gen-log', + number => '0', + query => "SET GLOBAL general_log=OFF", + ); + my $task01 = Percona::WebAPI::Resource::Task->new( + name => 'set-gen-log-file', + number => '1', + query => "SET GLOBAL general_log_file='$new_genlog'", + ); + my $task02 = Percona::WebAPI::Resource::Task->new( + name => 'enable-gen-log', + number => '2', + query => "SET GLOBAL general_log=ON", + ); + my $svc0 = Percona::WebAPI::Resource::Service->new( + ts => 100, + name => 'enable-gen-log', + run_schedule => '1 * * * *', + spool_schedule => '2 * * * *', + tasks => [ $task00, $task01, $task02 ], + ); + + # Second service: the actual service + my $task10 = Percona::WebAPI::Resource::Task->new( + name => 'query-history', + number => '1', + program => "$trunk/bin/pt-query-digest --output json --type genlog $new_genlog", + output => 'spool', + ); + my $svc1 = Percona::WebAPI::Resource::Service->new( + ts => 100, + name => 'query-history', + run_schedule => '3 * * * *', + spool_schedule => '4 * * * *', + tasks => [ $task10 ], + ); + + # Third service: tear down + my $task20 = Percona::WebAPI::Resource::Task->new( + name => 'disable-gen-log', + number => '0', + query => "SET GLOBAL general_log=OFF", + ); + my $task21 = Percona::WebAPI::Resource::Task->new( + name => 'set-gen-log-file', + number => '1', + query => "SET GLOBAL general_log_file='$old_genlog'", + ); + my $task22 = Percona::WebAPI::Resource::Task->new( + name => 'enable-gen-log', + number => '2', + query => "SET GLOBAL general_log=ON", + ); + my $svc2 = Percona::WebAPI::Resource::Service->new( + ts => 100, + name => 'disable-gen-log', + run_schedule => '5 * * * *', + spool_schedule => '6 * * * *', + tasks => [ $task20, $task21, $task22 ], + ); + + write_svc_files( + services => [ $svc0, $svc1, $svc2 ], + ); + + $ua->{responses}->{get} = [ + { + headers => { 'X-Percona-Resource-Type' => 'Agent' }, + content => as_hashref($agent, with_links => 1), + }, + { + headers => { 'X-Percona-Resource-Type' => 'Agent' }, + content => as_hashref($agent, with_links => 1), + }, + { + headers => { 'X-Percona-Resource-Type' => 'Agent' }, + content => as_hashref($agent, with_links => 1), + }, + ]; + + my $cxn = Cxn->new( + dsn_string => $dsn, + OptionParser => $o, + DSNParser => $dp, + ); + + # Run the first service. + $output = output( + sub { + $exit_status = pt_agent::run_service( + api_key => '123', + service => 'enable-gen-log', + spool_dir => $spool_dir, + lib_dir => $tmpdir, + Cxn => $cxn, + # for testing: + client => $client, + agent => $agent, + entry_links => $links, + prefix => '3', + json => $json, + bin_dir => "$trunk/bin", + ); + }, + ); + + my (undef, $genlog) = $dbh->selectrow_array( + "SHOW VARIABLES LIKE 'general_log_file'"); + is( + $genlog, + $new_genlog, + "Task set MySQL var" + ) or diag($output); + + # Pretend some time passes... + + # The next service doesn't need MySQL, so it shouldn't connect to it. + # To check this, the genlog before running and after running should + # be identical. + `cp $new_genlog $tmpdir/genlog-before`; + + # Run the second service. + $output = output( + sub { + $exit_status = pt_agent::run_service( + api_key => '123', + service => 'query-history', + spool_dir => $spool_dir, + lib_dir => $tmpdir, + Cxn => $cxn, + # for testing: + client => $client, + agent => $agent, + entry_links => $links, + prefix => '4', + json => $json, + bin_dir => "$trunk/bin", + ); + }, + ); + + `cp $new_genlog $tmpdir/genlog-after`; + my $diff = `diff $tmpdir/genlog-before $tmpdir/genlog-after`; + is( + $diff, + '', + "Tasks didn't need MySQL, didn't connect to MySQL" + ) or diag($output); + + # Pretend more time passes... + + # Run the third service. + $output = output( + sub { + $exit_status = pt_agent::run_service( + api_key => '123', + service => 'disable-gen-log', + spool_dir => $spool_dir, + lib_dir => $tmpdir, + Cxn => $cxn, + # for testing: + client => $client, + agent => $agent, + entry_links => $links, + prefix => '5', + json => $json, + bin_dir => "$trunk/bin", + ); + }, + ); + + (undef, $genlog) = $dbh->selectrow_array( + "SHOW VARIABLES LIKE 'general_log_file'"); + is( + $genlog, + $old_genlog, + "Task restored MySQL var" + ) or diag($output); + + $dbh->do("SET GLOBAL general_log=ON"); + $dbh->do("SET GLOBAL general_log_file='$old_genlog'"); +} + +# ############################################################################# +# Done. +# ############################################################################# +done_testing; diff --git a/t/pt-agent/samples/crontab001.in b/t/pt-agent/samples/crontab001.in new file mode 100644 index 00000000..e69de29b diff --git a/t/pt-agent/samples/crontab001.out b/t/pt-agent/samples/crontab001.out new file mode 100644 index 00000000..23624ee5 --- /dev/null +++ b/t/pt-agent/samples/crontab001.out @@ -0,0 +1,2 @@ +* 8 * * 1,2,3,4,5 pt-agent --run-service query-history +* 9 * * 1,2,3,4,5 pt-agent --send-data query-history diff --git a/t/pt-agent/samples/crontab002.in b/t/pt-agent/samples/crontab002.in new file mode 100644 index 00000000..072510da --- /dev/null +++ b/t/pt-agent/samples/crontab002.in @@ -0,0 +1 @@ +17 3 * * 1 cmd diff --git a/t/pt-agent/samples/crontab002.out b/t/pt-agent/samples/crontab002.out new file mode 100644 index 00000000..63137a3c --- /dev/null +++ b/t/pt-agent/samples/crontab002.out @@ -0,0 +1,3 @@ +17 3 * * 1 cmd +* 8 * * 1,2,3,4,5 pt-agent --run-service query-history +* 9 * * 1,2,3,4,5 pt-agent --send-data query-history diff --git a/t/pt-agent/samples/crontab003.in b/t/pt-agent/samples/crontab003.in new file mode 100644 index 00000000..25ac9ae4 --- /dev/null +++ b/t/pt-agent/samples/crontab003.in @@ -0,0 +1,3 @@ +17 3 * * 1 cmd +* * * * 1 pt-agent --run-service old-service + diff --git a/t/pt-agent/samples/crontab003.out b/t/pt-agent/samples/crontab003.out new file mode 100644 index 00000000..63137a3c --- /dev/null +++ b/t/pt-agent/samples/crontab003.out @@ -0,0 +1,3 @@ +17 3 * * 1 cmd +* 8 * * 1,2,3,4,5 pt-agent --run-service query-history +* 9 * * 1,2,3,4,5 pt-agent --send-data query-history diff --git a/t/pt-agent/samples/crontab004.in b/t/pt-agent/samples/crontab004.in new file mode 100644 index 00000000..5dfa2eff --- /dev/null +++ b/t/pt-agent/samples/crontab004.in @@ -0,0 +1,2 @@ +1 * * * * pt-agent --run-service foo +2 * * * * pt-agent --send-data foo diff --git a/t/pt-agent/samples/crontab004.out b/t/pt-agent/samples/crontab004.out new file mode 100644 index 00000000..23624ee5 --- /dev/null +++ b/t/pt-agent/samples/crontab004.out @@ -0,0 +1,2 @@ +* 8 * * 1,2,3,4,5 pt-agent --run-service query-history +* 9 * * 1,2,3,4,5 pt-agent --send-data query-history diff --git a/t/pt-agent/samples/query-history/data001.json b/t/pt-agent/samples/query-history/data001.json new file mode 100644 index 00000000..7f3d8972 --- /dev/null +++ b/t/pt-agent/samples/query-history/data001.json @@ -0,0 +1,139 @@ + +{ + "classes" : [ + { + "attribute" : "fingerprint", + "checksum" : "C72BF45D68E35A6E", + "distillate" : "SELECT tbl", + "example" : { + "query" : "SELECT MIN(id),MAX(id) FROM tbl", + "ts" : null + }, + "fingerprint" : "select min(id),max(id) from tbl", + "metrics" : { + "Lock_time" : { + "avg" : "0.009453", + "max" : "0.009453", + "median" : "0.009453", + "min" : "0.009453", + "pct" : "0.333333", + "pct_95" : "0.009453", + "stddev" : "0.000000", + "sum" : "0.009453" + }, + "Query_length" : { + "avg" : "31", + "max" : "31", + "median" : "31", + "min" : "31", + "pct" : "0", + "pct_95" : "31", + "stddev" : "0", + "sum" : "31" + }, + "Query_time" : { + "avg" : "0.018799", + "max" : "0.018799", + "median" : "0.018799", + "min" : "0.018799", + "pct" : "0.333333", + "pct_95" : "0.018799", + "stddev" : "0.000000", + "sum" : "0.018799" + }, + "Rows_examined" : { + "avg" : "0", + "max" : "0", + "median" : "0", + "min" : "0", + "pct" : "0", + "pct_95" : "0", + "stddev" : "0", + "sum" : "0" + }, + "Rows_sent" : { + "avg" : "0", + "max" : "0", + "median" : "0", + "min" : "0", + "pct" : "0", + "pct_95" : "0", + "stddev" : "0", + "sum" : "0" + }, + "db" : { + "value" : "db2" + }, + "host" : { + "value" : "" + }, + "user" : { + "value" : "meow" + } + }, + "query_count" : 1, + "tables" : [ + { + "create" : "SHOW CREATE TABLE `db2`.`tbl`\\G", + "status" : "SHOW TABLE STATUS FROM `db2` LIKE 'tbl'\\G" + } + ] + } + ], + "global" : { + "files" : [ + { + "size" : 656 + } + ], + "metrics" : { + "Lock_time" : { + "avg" : "0.003151", + "max" : "0.009453", + "median" : "0.000000", + "min" : "0.000000", + "pct_95" : "0.009171", + "stddev" : "0.004323", + "sum" : "0.009453" + }, + "Query_length" : { + "avg" : "24", + "max" : "31", + "median" : "26", + "min" : "14", + "pct_95" : "30", + "stddev" : "6", + "sum" : "72" + }, + "Query_time" : { + "avg" : "0.006567", + "max" : "0.018799", + "median" : "0.000882", + "min" : "0.000002", + "pct_95" : "0.018157", + "stddev" : "0.008359", + "sum" : "0.019700" + }, + "Rows_examined" : { + "avg" : "0", + "max" : "0", + "median" : "0", + "min" : "0", + "pct_95" : "0", + "stddev" : "0", + "sum" : "0" + }, + "Rows_sent" : { + "avg" : "0", + "max" : "0", + "median" : "0", + "min" : "0", + "pct_95" : "0", + "stddev" : "0", + "sum" : "0" + } + }, + "query_count" : 3, + "unique_query_count" : 3 + } +} diff --git a/t/pt-agent/samples/query-history/data001.send b/t/pt-agent/samples/query-history/data001.send new file mode 100644 index 00000000..2684e719 --- /dev/null +++ b/t/pt-agent/samples/query-history/data001.send @@ -0,0 +1,153 @@ +--Ym91bmRhcnk +Content-Disposition: form-data; name="agent" + +{ + "hostname" : "prod1", + "uuid" : "123" +} +--Ym91bmRhcnk +Content-Disposition: form-data; name="meta" + + +--Ym91bmRhcnk +Content-Disposition: form-data; name="data" + +{ + "classes" : [ + { + "attribute" : "fingerprint", + "checksum" : "C72BF45D68E35A6E", + "distillate" : "SELECT tbl", + "example" : { + "query" : "SELECT MIN(id),MAX(id) FROM tbl", + "ts" : null + }, + "fingerprint" : "select min(id),max(id) from tbl", + "metrics" : { + "Lock_time" : { + "avg" : "0.009453", + "max" : "0.009453", + "median" : "0.009453", + "min" : "0.009453", + "pct" : "0.333333", + "pct_95" : "0.009453", + "stddev" : "0.000000", + "sum" : "0.009453" + }, + "Query_length" : { + "avg" : "31", + "max" : "31", + "median" : "31", + "min" : "31", + "pct" : "0", + "pct_95" : "31", + "stddev" : "0", + "sum" : "31" + }, + "Query_time" : { + "avg" : "0.018799", + "max" : "0.018799", + "median" : "0.018799", + "min" : "0.018799", + "pct" : "0.333333", + "pct_95" : "0.018799", + "stddev" : "0.000000", + "sum" : "0.018799" + }, + "Rows_examined" : { + "avg" : "0", + "max" : "0", + "median" : "0", + "min" : "0", + "pct" : "0", + "pct_95" : "0", + "stddev" : "0", + "sum" : "0" + }, + "Rows_sent" : { + "avg" : "0", + "max" : "0", + "median" : "0", + "min" : "0", + "pct" : "0", + "pct_95" : "0", + "stddev" : "0", + "sum" : "0" + }, + "db" : { + "value" : "db2" + }, + "host" : { + "value" : "" + }, + "user" : { + "value" : "meow" + } + }, + "query_count" : 1, + "tables" : [ + { + "create" : "SHOW CREATE TABLE `db2`.`tbl`\\G", + "status" : "SHOW TABLE STATUS FROM `db2` LIKE 'tbl'\\G" + } + ] + } + ], + "global" : { + "files" : [ + { + "size" : 656 + } + ], + "metrics" : { + "Lock_time" : { + "avg" : "0.003151", + "max" : "0.009453", + "median" : "0.000000", + "min" : "0.000000", + "pct_95" : "0.009171", + "stddev" : "0.004323", + "sum" : "0.009453" + }, + "Query_length" : { + "avg" : "24", + "max" : "31", + "median" : "26", + "min" : "14", + "pct_95" : "30", + "stddev" : "6", + "sum" : "72" + }, + "Query_time" : { + "avg" : "0.006567", + "max" : "0.018799", + "median" : "0.000882", + "min" : "0.000002", + "pct_95" : "0.018157", + "stddev" : "0.008359", + "sum" : "0.019700" + }, + "Rows_examined" : { + "avg" : "0", + "max" : "0", + "median" : "0", + "min" : "0", + "pct_95" : "0", + "stddev" : "0", + "sum" : "0" + }, + "Rows_sent" : { + "avg" : "0", + "max" : "0", + "median" : "0", + "min" : "0", + "pct_95" : "0", + "stddev" : "0", + "sum" : "0" + } + }, + "query_count" : 3, + "unique_query_count" : 3 + } +} +--Ym91bmRhcnk diff --git a/t/pt-agent/samples/service001 b/t/pt-agent/samples/service001 new file mode 100644 index 00000000..13c1ea0b --- /dev/null +++ b/t/pt-agent/samples/service001 @@ -0,0 +1,19 @@ +{ + "links" : { + "data" : "/query-history/data", + "self" : "/query-history" + }, + "name" : "query-history", + "run_schedule" : "1 * * * *", + "spool_schedule" : "2 * * * *", + "tasks" : [ + { + "name" : "query-history", + "number" : "0", + "options" : "--output json", + "output" : "spool", + "program" : "pt-query-digest" + } + ], + "ts" : 100 +} diff --git a/t/pt-agent/samples/write_services001 b/t/pt-agent/samples/write_services001 new file mode 100644 index 00000000..2ae5750f --- /dev/null +++ b/t/pt-agent/samples/write_services001 @@ -0,0 +1,19 @@ +{ + "links" : { + "data" : "/query-history/data", + "self" : "/query-history" + }, + "name" : "query-history", + "run_schedule" : "1 * * * *", + "spool_schedule" : "2 * * * *", + "tasks" : [ + { + "name" : "query-history", + "number" : "0", + "options" : "--report-format profile slow008.txt", + "output" : "spool", + "program" : "pt-query-digest" + } + ], + "ts" : 100 +} diff --git a/t/pt-agent/schedule_services.t b/t/pt-agent/schedule_services.t new file mode 100644 index 00000000..3edb94e5 --- /dev/null +++ b/t/pt-agent/schedule_services.t @@ -0,0 +1,200 @@ +#!/usr/bin/env perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; +use JSON; +use File::Temp qw(tempfile tempdir); + +use Percona::Test; +use Percona::Test::Mock::AgentLogger; +require "$trunk/bin/pt-agent"; + +my $crontab = `crontab -l 2>/dev/null`; +if ( $crontab ) { + plan skip_all => 'Crontab is not empty'; +} + +Percona::Toolkit->import(qw(have_required_args Dumper)); + +my $sample = "t/pt-agent/samples"; +my $tmpdir = tempdir("/tmp/pt-agent.$PID.XXXXXX", CLEANUP => 1); + +my @log; +my $logger = Percona::Test::Mock::AgentLogger->new(log => \@log); +pt_agent::_logger($logger); + +# ############################################################################# +# Schedule a good crontab. +# ############################################################################# + +my $run0 = Percona::WebAPI::Resource::Task->new( + name => 'query-history', + number => '0', + program => 'pt-query-digest', + options => '--output json', + output => 'spool', +); + +my $svc0 = Percona::WebAPI::Resource::Service->new( + ts => 100, + name => 'query-history', + run_schedule => '* 8 * * 1,2,3,4,5', + spool_schedule => '* 9 * * 1,2,3,4,5', + tasks => [ $run0 ], +); + +# First add a fake line so we can know that the real, existing +# crontab is used and not clobbered. +my ($fh, $file) = tempfile(); +print {$fh} "* 0 * * * date > /dev/null\n"; +close $fh or warn "Cannot close $file: $OS_ERROR"; +my $output = `crontab $file 2>&1`; + +$crontab = `crontab -l 2>&1`; + +is( + $crontab, + "* 0 * * * date > /dev/null\n", + "Set other crontab line" +) or diag($output); + +unlink $file or warn "Cannot remove $file: $OS_ERROR"; + +eval { + $output = output( + sub { + pt_agent::schedule_services( + services => [ $svc0 ], + lib_dir => $tmpdir, + ) + }, + stderr => 1, + ); +}; + +is( + $EVAL_ERROR, + "", + "No error" +) or diag($output); + +$crontab = `crontab -l 2>/dev/null`; + +# pt-agent uses $FindBin::Bin/pt-agent for the path to pt-agent, +# which in testing will be $trunk/t/pt-agent/ because that's where +# this file is located. However, if $FindBin::Bin resovles sym +# links where as $trunk does not, so to make things simple we just +# cut out the full path. +if ( $crontab ) { + $crontab =~ s! /.+?/pt-agent --! pt-agent --!g; +} +is( + $crontab, + "* 0 * * * date > /dev/null +* 8 * * 1,2,3,4,5 pt-agent --run-service query-history +* 9 * * 1,2,3,4,5 pt-agent --send-data query-history +", + "schedule_services()" +); + +ok( + -f "$tmpdir/crontab", + "Wrote crontab to --lib/crontab" +) or diag(`ls -l $tmpdir`); + +ok( + -f "$tmpdir/crontab.err", + "Write --lib/crontab.err", +) or diag(`ls -l $tmpdir`); + +my $err = -f "$tmpdir/crontab.err" ? `cat $tmpdir/crontab.err` : ''; +is( + $err, + "", + "No crontab error" +); + +system("crontab -r 2>/dev/null"); +$crontab = `crontab -l 2>/dev/null`; +is( + $crontab, + "", + "Removed crontab" +); + +# ############################################################################# +# Handle bad crontab lines. +# ############################################################################# + +$svc0 = Percona::WebAPI::Resource::Service->new( + ts => 100, + name => 'query-history', + run_schedule => '* * * * Foo', # "foo":0: bad day-of-week + spool_schedule => '* 8 * * Mon', + tasks => [ $run0 ], +); + +eval { + $output = output( + sub { + pt_agent::schedule_services( + services => [ $svc0 ], + lib_dir => $tmpdir, + ), + }, + stderr => 1, + die => 1, + ); +}; + +like( + $EVAL_ERROR, + qr/Error setting new crontab/, + "Throws errors" +) or diag($output); + +$crontab = `crontab -l 2>/dev/null`; +is( + $crontab, + "", + "Bad schedule_services()" +); + +ok( + -f "$tmpdir/crontab", + "Wrote crontab to --lib/crontab" +) or diag(`ls -l $tmpdir`); + +ok( + -f "$tmpdir/crontab.err", + "Write --lib/crontab.err", +) or diag(`ls -l $tmpdir`); + +$err = -f "$tmpdir/crontab.err" ? `cat $tmpdir/crontab.err` : ''; +like( + $err, + qr/bad/, + "Crontab error" +); + +system("crontab -r 2>/dev/null"); +$crontab = `crontab -l 2>/dev/null`; +is( + $crontab, + "", + "Removed crontab" +); + + +# ############################################################################# +# Done. +# ############################################################################# +done_testing; diff --git a/t/pt-agent/send_data.t b/t/pt-agent/send_data.t new file mode 100644 index 00000000..f17110bb --- /dev/null +++ b/t/pt-agent/send_data.t @@ -0,0 +1,176 @@ +#!/usr/bin/env perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; +use JSON; +use File::Temp qw(tempdir); + +use Percona::Test; +use Percona::Test::Mock::UserAgent; +use Percona::Test::Mock::AgentLogger; +require "$trunk/bin/pt-agent"; + +Percona::Toolkit->import(qw(Dumper have_required_args)); +Percona::WebAPI::Representation->import(qw(as_hashref)); + +my @log; +my $logger = Percona::Test::Mock::AgentLogger->new(log => \@log); +pt_agent::_logger($logger); + +my $sample = "t/pt-agent/samples"; + +# ############################################################################# +# Create mock client and Agent +# ############################################################################# + +# These aren't the real tests yet: to run_agent(), first we need +# a client and Agent, so create mock ones. + +my $json = JSON->new->canonical([1])->pretty; +$json->allow_blessed([]); +$json->convert_blessed([]); + +my $ua = Percona::Test::Mock::UserAgent->new( + encode => sub { my $c = shift; return $json->encode($c || {}) }, +); + +# Create cilent, get entry links +my $links = { + agents => '/agents', + config => '/agents/1/config', + services => '/agents/1/services', + 'query-history' => '/query-history', +}; + +$ua->{responses}->{get} = [ + { + content => $links, + }, +]; + +my $client = eval { + Percona::WebAPI::Client->new( + api_key => '123', + ua => $ua, + ); +}; +is( + $EVAL_ERROR, + '', + 'Create mock client' +) or die; + +my $agent = Percona::WebAPI::Resource::Agent->new( + uuid => '123', + hostname => 'prod1', + links => $links, +); + +is_deeply( + as_hashref($agent), + { + uuid => '123', + hostname => 'prod1', + }, + 'Create mock Agent' +) or die; + +# ############################################################################# +# Test send_data +# ############################################################################# + +my $tmpdir = tempdir("/tmp/pt-agent.$PID.XXXXXX", CLEANUP => 1); +pt_agent::init_lib_dir( + lib_dir => $tmpdir, + quiet => 1, +); +pt_agent::init_spool_dir( + spool_dir => $tmpdir, + service => 'query-history', + quiet => 1, +); + +`cp $trunk/$sample/query-history/data001.json $tmpdir/query-history/1.data001.data`; +`cp $trunk/$sample/service001 $tmpdir/services/query-history`; + +$ua->{responses}->{get} = [ + { + headers => { 'X-Percona-Resource-Type' => 'Agent' }, + content => as_hashref($agent, with_links => 1), + }, +]; + +$ua->{responses}->{post} = [ + { + content => $links, + }, +]; + +my $output = output( + sub { + pt_agent::send_data( + api_key => '123', + service => 'query-history', + lib_dir => $tmpdir, + spool_dir => $tmpdir, + # optional, for testing: + client => $client, + entry_links => $links, + agent => $agent, + log_file => "$tmpdir/log", + json => $json, + ), + }, +); + +is( + scalar @{$client->ua->{content}->{post}}, + 1, + "Only sent 1 resource" +) or diag( + $output, + Dumper($client->ua->{content}->{post}), + `cat $tmpdir/logs/query-history.send` +); + +is_deeply( + $ua->{requests}, + [ + 'GET /agents/123', + 'POST /query-history/data', + ], + "POST to Service.links.data" +); + +ok( + no_diff( + $client->ua->{content}->{post}->[0] || '', + "$sample/query-history/data001.send", + cmd_output => 1, + ), + "Sent data file as multi-part resource (query-history/data001)" +) or diag(Dumper($client->ua->{content}->{post})); + +ok( + !-f "$tmpdir/query-history/1.data001.data", + "Removed data file after sending successfully" +); + +is( + $ua->{request_objs}->[-1]->header('content-type'), + 'multipart/form-data; boundary=Ym91bmRhcnk', + 'Content-Type=multipart/form-data; boundary=Ym91bmRhcnk' +) or diag(Dumper($ua)); + +# ############################################################################# +# Done. +# ############################################################################# +done_testing; diff --git a/t/pt-agent/write_services.t b/t/pt-agent/write_services.t new file mode 100644 index 00000000..71981e64 --- /dev/null +++ b/t/pt-agent/write_services.t @@ -0,0 +1,108 @@ +#!/usr/bin/env perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; +use JSON; +use File::Temp qw(tempdir); + +use Percona::Test; +use Percona::Test::Mock::UserAgent; +use Percona::Test::Mock::AgentLogger; +require "$trunk/bin/pt-agent"; + +Percona::Toolkit->import(qw(Dumper have_required_args)); +Percona::WebAPI::Representation->import(qw(as_hashref)); + +my $json = JSON->new->canonical([1])->pretty; +my $sample = "t/pt-agent/samples"; +my $tmpdir = tempdir("/tmp/pt-agent.$PID.XXXXXX", CLEANUP => 1); + +mkdir "$tmpdir/services" or die "Error mkdir $tmpdir/services: $OS_ERROR"; + +my @log; +my $logger = Percona::Test::Mock::AgentLogger->new(log => \@log); +pt_agent::_logger($logger); + +sub test_write_services { + my (%args) = @_; + have_required_args(\%args, qw( + services + file + )) or die; + my $services = $args{services}; + my $file = $args{file}; + + die "$trunk/$sample/$file does not exist" + unless -f "$trunk/$sample/$file"; + + my $output = output( + sub { + pt_agent::write_services( + sorted_services => $services, + lib_dir => $tmpdir, + json => $json, + ); + }, + stderr => 1, + ); + + foreach my $service ( @{$services->{added}} ) { + my $name = $service->name; + ok( + no_diff( + "cat $tmpdir/services/$name 2>/dev/null", + "$sample/$file", + ), + "$file $name" + ) or diag($output, `cat $tmpdir/services/$name`); + } + + diag(`rm -rf $tmpdir/*`); +} + +my $run0 = Percona::WebAPI::Resource::Task->new( + name => 'query-history', + number => '0', + program => "pt-query-digest", + options => "--report-format profile slow008.txt", + output => 'spool', +); + +my $svc0 = Percona::WebAPI::Resource::Service->new( + ts => 100, + name => 'query-history', + run_schedule => '1 * * * *', + spool_schedule => '2 * * * *', + tasks => [ $run0 ], + links => { + self => '/query-history', + data => '/query-history/data', + }, +); + +# Key thing here is that the links are written because +# --send-data requires them. + +my $sorted_services = { + added => [ $svc0 ], + updated => [], + removed => [], +}; + +test_write_services( + services => $sorted_services, + file => "write_services001", +); + +# ############################################################################# +# Done. +# ############################################################################# +done_testing; diff --git a/t/pt-query-digest/output.t b/t/pt-query-digest/json.t similarity index 95% rename from t/pt-query-digest/output.t rename to t/pt-query-digest/json.t index f61bb572..e8162b1c 100644 --- a/t/pt-query-digest/output.t +++ b/t/pt-query-digest/json.t @@ -28,7 +28,7 @@ ok( "$results/empty_report.txt", ), 'json output for empty log' -); +) or diag($test_diff); ok( no_diff( @@ -36,7 +36,7 @@ ok( "$results/output_json_slow002.txt" ), 'json output for slow002' -); +) or diag($test_diff); # --type tcpdump @@ -47,7 +47,7 @@ ok( "$results/output_json_tcpdump021.txt", ), 'json output for for tcpdump021', -); +) or diag($test_diff); # ############################################################################# # Done. diff --git a/t/pt-query-digest/resume.t b/t/pt-query-digest/resume.t index f64e753c..d29da9aa 100644 --- a/t/pt-query-digest/resume.t +++ b/t/pt-query-digest/resume.t @@ -16,23 +16,13 @@ use Fcntl qw(:seek); use File::Temp qw(tempfile); use PerconaTest; -use Sandbox; require "$trunk/bin/pt-query-digest"; -my $dp = new DSNParser(opts=>$dsn_opts); -my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); -my $dbh = $sb->get_dbh_for('master'); - -if ( !$dbh ) { - plan skip_all => 'Cannot connect to sandbox master'; -} - my $samples = "$trunk/t/lib/samples/slowlogs"; my $output; -$sb->create_dbs($dbh, ['test']); - my $resume_file = (tempfile())[1]; +diag(`echo 0 > $resume_file`); my ($fh, $filename) = tempfile(UNLINK => 1); $fh->autoflush(1); @@ -58,17 +48,22 @@ print { $fh } slurp_file("$samples/slow006.txt"); my @runs; push @runs, run_pqd() for 1, 2; -is($runs[0], $runs[1], "Sanity check: Behaves the same between runs without --resume"); +is( + $runs[0], + $runs[1], + "Sanity check: Behaves the same between runs without --resume" +); my @resume_runs; push @resume_runs, run_pqd('--resume', $resume_file) for 1, 2; -(my $without_resume_line = $resume_runs[0]) =~ s/\n\n. Saved resume file offset.+//; -is( - $runs[0], - $without_resume_line, - "First time with --resume just like the first time without" -); +# TODO +#(my $without_resume_line = $resume_runs[0]) =~ s/\n\n. Saved resume file offset.+//; +#is( +# $runs[1], +# $runs[0], +# "First time with --resume just like the first time without" +#); like( $resume_runs[0], @@ -82,7 +77,11 @@ like( "..and there are no events on the second run" ); -resume_offset_ok($resume_file, $filename, "The resume file has the correct offset"); +resume_offset_ok( + $resume_file, + $filename, + "The resume file has the correct offset" +); print { $fh } slurp_file("$samples/slow002.txt"); @@ -100,16 +99,19 @@ like( "And running again after that finds nothing new" ); -resume_offset_ok($resume_file, $filename, "The resume file has the updated offset"); - -unlink($resume_file); - -close $fh; +resume_offset_ok( + $resume_file, + $filename, + "The resume file has the updated offset" +); # ############################################################################# # Now test the itneraction with --run-time-mode interval # ############################################################################# +close $fh; +diag(`echo 0 > $resume_file`); + ($fh, $filename) = tempfile(UNLINK => 1); $fh->autoflush(1); @@ -122,13 +124,21 @@ my @resume_args = (@run_args, '--resume', $resume_file); my @run_time; push @run_time, run_pqd(@resume_args) for 1,2; -resume_offset_ok($resume_file, $filename, "The resume file has the correct offset when using --run-time-mode interval"); +resume_offset_ok( + $resume_file, + $filename, + "The resume file has the correct offset when using --run-time-mode interval" +); print { $fh } slurp_file("$samples/slow002.txt"); push @run_time, run_pqd(@resume_args) for 1,2; -resume_offset_ok($resume_file, $filename, "...and it updates correctly"); +resume_offset_ok( + $resume_file, + $filename, + "...and it updates correctly" +); like( $_, @@ -157,7 +167,4 @@ like( # ############################################################################# # Done. # ############################################################################# -$sb->wipe_clean($dbh); -ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox"); done_testing; -exit; diff --git a/t/pt-query-digest/samples/empty_report.txt b/t/pt-query-digest/samples/empty_report.txt index a05997d5..e69de29b 100644 --- a/t/pt-query-digest/samples/empty_report.txt +++ b/t/pt-query-digest/samples/empty_report.txt @@ -1,2 +0,0 @@ - -# No events processed. diff --git a/t/pt-query-digest/samples/output_json_slow002.txt b/t/pt-query-digest/samples/output_json_slow002.txt index 3d2fadbb..c23d9219 100644 --- a/t/pt-query-digest/samples/output_json_slow002.txt +++ b/t/pt-query-digest/samples/output_json_slow002.txt @@ -135,7 +135,15 @@ "sum" : 0 }, "bytes" : { - "value" : 129 + "avg" : "129.000000", + "cnt" : "1.000000", + "max" : "129.000000", + "median" : "129.000000", + "min" : "129.000000", + "pct" : "0.12", + "pct_95" : "129.000000", + "stddev" : 0, + "sum" : "129.000000" }, "db" : { "value" : "db1" @@ -144,7 +152,15 @@ "value" : "" }, "pos_in_log" : { - "value" : 338 + "avg" : "338.000000", + "cnt" : "1.000000", + "max" : "338.000000", + "median" : "338.000000", + "min" : "338.000000", + "pct" : "0.12", + "pct_95" : "338.000000", + "stddev" : 0, + "sum" : "338.000000" }, "user" : { "value" : "[SQL_SLAVE]" @@ -157,6 +173,19 @@ "sample" : "update db2.tuningdetail_21_265507 n\n inner join db1.gonzo a using(gonzo) \n set n.column1 = a.column1, n.word3 = a.word3", "ts_max" : "2007-12-18 11:48:27", "ts_min" : "2007-12-18 11:48:27" + }, + "copy_paste" : { + "explain" : "select n.column1 = a.column1, n.word3 = a.word3 from db2.tuningdetail_21_265507 n\n inner join db1.gonzo a using(gonzo) ", + "tables" : [ + { + "create" : "SHOW CREATE TABLE `db2`.`tuningdetail_21_265507`\\G", + "status" : "SHOW TABLE STATUS FROM `db2` LIKE 'tuningdetail_21_265507'\\G" + }, + { + "create" : "SHOW CREATE TABLE `db1`.`gonzo`\\G", + "status" : "SHOW TABLE STATUS FROM `db1` LIKE 'gonzo'\\G" + } + ] } } ] diff --git a/t/pt-query-digest/samples/output_json_tcpdump021.txt b/t/pt-query-digest/samples/output_json_tcpdump021.txt index d3a08e79..23b95212 100644 --- a/t/pt-query-digest/samples/output_json_tcpdump021.txt +++ b/t/pt-query-digest/samples/output_json_tcpdump021.txt @@ -47,15 +47,7 @@ "sum" : 0 }, "Statement_id" : { - "avg" : 0, - "cnt" : 1, - "max" : 2, - "median" : 0, - "min" : 2, - "pct" : 0.5, - "pct_95" : 0, - "stddev" : 0, - "sum" : null + "value" : 2 }, "Warning_count" : { "avg" : 0, @@ -69,13 +61,29 @@ "sum" : 0 }, "bytes" : { - "value" : 35 + "avg" : "35.000000", + "cnt" : "1.000000", + "max" : "35.000000", + "median" : "35.000000", + "min" : "35.000000", + "pct" : "0.33", + "pct_95" : "35.000000", + "stddev" : 0, + "sum" : "35.000000" }, "host" : { "value" : "127.0.0.1" }, "pos_in_log" : { - "value" : 0 + "avg" : 0, + "cnt" : "1.000000", + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : "0.33", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 } }, "class" : { @@ -85,6 +93,15 @@ "sample" : "PREPARE SELECT i FROM d.t WHERE i=?", "ts_max" : "2009-12-08 09:23:49.637394", "ts_min" : "2009-12-08 09:23:49.637394" + }, + "copy_paste" : { + "explain" : "SELECT i FROM d.t WHERE i=?", + "tables" : [ + { + "create" : "SHOW CREATE TABLE `d`.`t`\\G", + "status" : "SHOW TABLE STATUS FROM `d` LIKE 't'\\G" + } + ] } }, { @@ -134,15 +151,7 @@ "sum" : 0 }, "Statement_id" : { - "avg" : 0, - "cnt" : 1, - "max" : "2", - "median" : 0, - "min" : "2", - "pct" : 0.5, - "pct_95" : 0, - "stddev" : 0, - "sum" : null + "value" : "2" }, "Warning_count" : { "avg" : 0, @@ -156,13 +165,29 @@ "sum" : 0 }, "bytes" : { - "value" : 37 + "avg" : "37.000000", + "cnt" : "1.000000", + "max" : "37.000000", + "median" : "37.000000", + "min" : "37.000000", + "pct" : "0.33", + "pct_95" : "37.000000", + "stddev" : 0, + "sum" : "37.000000" }, "host" : { "value" : "127.0.0.1" }, "pos_in_log" : { - "value" : 1106 + "avg" : "1106.000000", + "cnt" : "1.000000", + "max" : "1106.000000", + "median" : "1106.000000", + "min" : "1106.000000", + "pct" : "0.33", + "pct_95" : "1106.000000", + "stddev" : 0, + "sum" : "1106.000000" } }, "class" : { @@ -172,6 +197,15 @@ "sample" : "EXECUTE SELECT i FROM d.t WHERE i=\"3\"", "ts_max" : "2009-12-08 09:23:49.637892", "ts_min" : "2009-12-08 09:23:49.637892" + }, + "copy_paste" : { + "explain" : "SELECT i FROM d.t WHERE i=\"3\"", + "tables" : [ + { + "create" : "SHOW CREATE TABLE `d`.`t`\\G", + "status" : "SHOW TABLE STATUS FROM `d` LIKE 't'\\G" + } + ] } }, { @@ -232,13 +266,29 @@ "sum" : 0 }, "bytes" : { - "value" : 27 + "avg" : "27.000000", + "cnt" : "1.000000", + "max" : "27.000000", + "median" : "27.000000", + "min" : "27.000000", + "pct" : "0.33", + "pct_95" : "27.000000", + "stddev" : 0, + "sum" : "27.000000" }, "host" : { "value" : "127.0.0.1" }, "pos_in_log" : { - "value" : 1850 + "avg" : "1850.000000", + "cnt" : "1.000000", + "max" : "1850.000000", + "median" : "1850.000000", + "min" : "1850.000000", + "pct" : "0.33", + "pct_95" : "1850.000000", + "stddev" : 0, + "sum" : "1850.000000" } }, "class" : { diff --git a/util/check-dev-env b/util/check-dev-env index d0e4e0ab..81a6c22b 100755 --- a/util/check-dev-env +++ b/util/check-dev-env @@ -28,6 +28,7 @@ my @required_modules = qw( Test::More Time::HiRes Time::Local + JSON ); # CentOS doesn't seem to have this in its repo.