diff --git a/bin/pt-query-digest b/bin/pt-query-digest index 94fed640..695c5056 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -14,6 +14,7 @@ use warnings FATAL => 'all'; BEGIN { $INC{$_} = __FILE__ for map { (my $pkg = "$_.pm") =~ s!::!/!g; $pkg } (qw( Percona::Toolkit + Mo DSNParser Quoter OptionParser @@ -29,6 +30,7 @@ BEGIN { EventAggregator ReportFormatter QueryReportFormatter + JSONReportFormatter EventTimeline QueryParser TableParser @@ -41,11 +43,9 @@ BEGIN { RawLogParser ProtocolParser HTTPProtocolParser - ExecutionThrottler MasterSlave Progress FileIterator - ExplainAnalyzer Runtime Pipeline VersionCheck @@ -72,6 +72,468 @@ our $VERSION = '2.1.8'; # End Percona::Toolkit package # ########################################################################### +# ########################################################################### +# Mo package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Mo.pm +# t/lib/Mo.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +BEGIN { +$INC{"Mo.pm"} = __FILE__; +package Mo; +our $VERSION = '0.30_Percona'; # Forked from 0.30 of Mo. + +{ + no strict 'refs'; + sub _glob_for { + return \*{shift()} + } + + sub _stash_for { + return \%{ shift() . "::" }; + } +} + +use strict; +use warnings qw( FATAL all ); + +use Carp (); +use Scalar::Util qw(looks_like_number blessed); + + +our %TYPES = ( + Bool => sub { !$_[0] || (defined $_[0] && looks_like_number($_[0]) && $_[0] == 1) }, + Num => sub { defined $_[0] && looks_like_number($_[0]) }, + Int => sub { defined $_[0] && looks_like_number($_[0]) && $_[0] == int($_[0]) }, + Str => sub { defined $_[0] }, + Object => sub { defined $_[0] && blessed($_[0]) }, + FileHandle => sub { local $@; require IO::Handle; fileno($_[0]) && $_[0]->opened }, + + map { + my $type = /R/ ? $_ : uc $_; + $_ . "Ref" => sub { ref $_[0] eq $type } + } qw(Array Code Hash Regexp Glob Scalar) +); + +our %metadata_for; +{ + package Mo::Object; + + sub new { + my $class = shift; + my $args = $class->BUILDARGS(@_); + + my @args_to_delete; + while ( my ($attr, $meta) = each %{$metadata_for{$class}} ) { + next unless exists $meta->{init_arg}; + my $init_arg = $meta->{init_arg}; + + if ( defined $init_arg ) { + $args->{$attr} = delete $args->{$init_arg}; + } + else { + push @args_to_delete, $attr; + } + } + + delete $args->{$_} for @args_to_delete; + + for my $attribute ( keys %$args ) { + if ( my $coerce = $metadata_for{$class}{$attribute}{coerce} ) { + $args->{$attribute} = $coerce->($args->{$attribute}); + } + if ( my $I = $metadata_for{$class}{$attribute}{isa} ) { + ( (my $I_name), $I ) = @{$I}; + Mo::_check_type_constaints($attribute, $I, $I_name, $args->{$attribute}); + } + } + + while ( my ($attribute, $meta) = each %{$metadata_for{$class}} ) { + next unless $meta->{required}; + Carp::confess("Attribute ($attribute) is required for $class") + if ! exists $args->{$attribute} + } + + @_ = %$args; + my $self = bless $args, $class; + + my @build_subs; + my $linearized_isa = mro::get_linear_isa($class); + + for my $isa_class ( @$linearized_isa ) { + unshift @build_subs, *{ Mo::_glob_for "${isa_class}::BUILD" }{CODE}; + } + exists &$_ && $_->( $self, @_ ) for grep { defined } @build_subs; + return $self; + } + + sub BUILDARGS { + shift; + my $ref; + if ( @_ == 1 && ref($_[0]) ) { + Carp::confess("Single parameters to new() must be a HASH ref") + unless ref($_[0]) eq ref({}); + $ref = {%{$_[0]}} # We want a new reference, always + } + else { + $ref = { @_ }; + } + return $ref; + } +} + +my %export_for; +sub Mo::import { + warnings->import(qw(FATAL all)); + strict->import(); + + my $caller = scalar caller(); # Caller's package + my $caller_pkg = $caller . "::"; # Caller's package with :: at the end + my (%exports, %options); + + my (undef, @features) = @_; + my %ignore = ( map { $_ => 1 } qw( is isa init_arg builder buildargs clearer predicate build handles default required ) ); + for my $feature (grep { !$ignore{$_} } @features) { + { local $@; require "Mo/$feature.pm"; } + { + no strict 'refs'; + &{"Mo::${feature}::e"}( + $caller_pkg, + \%exports, + \%options, + \@_ + ); + } + } + + return if $exports{M}; + + %exports = ( + extends => sub { + for my $class ( map { "$_" } @_ ) { + $class =~ s{::|'}{/}g; + { local $@; eval { require "$class.pm" } } # or warn $@; + } + _set_package_isa($caller, @_); + _set_inherited_metadata($caller); + }, + override => \&override, + has => sub { + my $names = shift; + for my $attribute ( ref $names ? @$names : $names ) { + my %args = @_; + my $method = ($args{is} || '') eq 'ro' + ? sub { + Carp::confess("Cannot assign a value to a read-only accessor at reader ${caller_pkg}${attribute}") + if $#_; + return $_[0]{$attribute}; + } + : sub { + return $#_ + ? $_[0]{$attribute} = $_[1] + : $_[0]{$attribute}; + }; + + $metadata_for{$caller}{$attribute} = (); + + if ( my $I = $args{isa} ) { + my $orig_I = $I; + my $type; + if ( $I =~ /\A(ArrayRef|Maybe)\[(.*)\]\z/ ) { + $I = _nested_constraints($attribute, $1, $2); + } + $metadata_for{$caller}{$attribute}{isa} = [$orig_I, $I]; + my $orig_method = $method; + $method = sub { + if ( $#_ ) { + Mo::_check_type_constaints($attribute, $I, $orig_I, $_[1]); + } + goto &$orig_method; + }; + } + + if ( my $builder = $args{builder} ) { + my $original_method = $method; + $method = sub { + $#_ + ? goto &$original_method + : ! exists $_[0]{$attribute} + ? $_[0]{$attribute} = $_[0]->$builder + : goto &$original_method + }; + } + + if ( my $code = $args{default} ) { + Carp::confess("${caller}::${attribute}'s default is $code, but should be a coderef") + unless ref($code) eq 'CODE'; + my $original_method = $method; + $method = sub { + $#_ + ? goto &$original_method + : ! exists $_[0]{$attribute} + ? $_[0]{$attribute} = $_[0]->$code + : goto &$original_method + }; + } + + if ( my $role = $args{does} ) { + my $original_method = $method; + $method = sub { + if ( $#_ ) { + Carp::confess(qq) + unless Scalar::Util::blessed($_[1]) && eval { $_[1]->does($role) } + } + goto &$original_method + }; + } + + if ( my $coercion = $args{coerce} ) { + $metadata_for{$caller}{$attribute}{coerce} = $coercion; + my $original_method = $method; + $method = sub { + if ( $#_ ) { + return $original_method->($_[0], $coercion->($_[1])) + } + goto &$original_method; + } + } + + $method = $options{$_}->($method, $attribute, @_) + for sort keys %options; + + *{ _glob_for "${caller}::$attribute" } = $method; + + if ( $args{required} ) { + $metadata_for{$caller}{$attribute}{required} = 1; + } + + if ($args{clearer}) { + *{ _glob_for "${caller}::$args{clearer}" } + = sub { delete shift->{$attribute} } + } + + if ($args{predicate}) { + *{ _glob_for "${caller}::$args{predicate}" } + = sub { exists shift->{$attribute} } + } + + if ($args{handles}) { + _has_handles($caller, $attribute, \%args); + } + + if (exists $args{init_arg}) { + $metadata_for{$caller}{$attribute}{init_arg} = $args{init_arg}; + } + } + }, + %exports, + ); + + $export_for{$caller} = [ keys %exports ]; + + for my $keyword ( keys %exports ) { + *{ _glob_for "${caller}::$keyword" } = $exports{$keyword} + } + *{ _glob_for "${caller}::extends" }{CODE}->( "Mo::Object" ) + unless @{ *{ _glob_for "${caller}::ISA" }{ARRAY} || [] }; +}; + +sub _check_type_constaints { + my ($attribute, $I, $I_name, $val) = @_; + ( ref($I) eq 'CODE' + ? $I->($val) + : (ref $val eq $I + || ($val && $val eq $I) + || (exists $TYPES{$I} && $TYPES{$I}->($val))) + ) + || Carp::confess( + qq + . qq + . (defined $val ? Mo::Dumper($val) : 'undef') ) +} + +sub _has_handles { + my ($caller, $attribute, $args) = @_; + my $handles = $args->{handles}; + + my $ref = ref $handles; + my $kv; + if ( $ref eq ref [] ) { + $kv = { map { $_,$_ } @{$handles} }; + } + elsif ( $ref eq ref {} ) { + $kv = $handles; + } + elsif ( $ref eq ref qr// ) { + Carp::confess("Cannot delegate methods based on a Regexp without a type constraint (isa)") + unless $args->{isa}; + my $target_class = $args->{isa}; + $kv = { + map { $_, $_ } + grep { $_ =~ $handles } + grep { !exists $Mo::Object::{$_} && $target_class->can($_) } + grep { $_ ne 'has' && $_ ne 'extends' } + keys %{ _stash_for $target_class } + }; + } + else { + Carp::confess("handles for $ref not yet implemented"); + } + + while ( my ($method, $target) = each %{$kv} ) { + my $name = _glob_for "${caller}::$method"; + Carp::confess("You cannot overwrite a locally defined method ($method) with a delegation") + if defined &$name; + + my ($target, @curried_args) = ref($target) ? @$target : $target; + *$name = sub { + my $self = shift; + my $delegate_to = $self->$attribute(); + my $error = "Cannot delegate $method to $target because the value of $attribute"; + Carp::confess("$error is not defined") unless $delegate_to; + Carp::confess("$error is not an object (got '$delegate_to')") + unless Scalar::Util::blessed($delegate_to) || (!ref($delegate_to) && $delegate_to->can($target)); + return $delegate_to->$target(@curried_args, @_); + } + } +} + +sub _nested_constraints { + my ($attribute, $aggregate_type, $type) = @_; + + my $inner_types; + if ( $type =~ /\A(ArrayRef|Maybe)\[(.*)\]\z/ ) { + $inner_types = _nested_constraints($1, $2); + } + else { + $inner_types = $TYPES{$type}; + } + + if ( $aggregate_type eq 'ArrayRef' ) { + return sub { + my ($val) = @_; + return unless ref($val) eq ref([]); + + if ($inner_types) { + for my $value ( @{$val} ) { + return unless $inner_types->($value) + } + } + else { + for my $value ( @{$val} ) { + return unless $value && ($value eq $type + || (Scalar::Util::blessed($value) && $value->isa($type))); + } + } + return 1; + }; + } + elsif ( $aggregate_type eq 'Maybe' ) { + return sub { + my ($value) = @_; + return 1 if ! defined($value); + if ($inner_types) { + return unless $inner_types->($value) + } + else { + return unless $value eq $type + || (Scalar::Util::blessed($value) && $value->isa($type)); + } + return 1; + } + } + else { + Carp::confess("Nested aggregate types are only implemented for ArrayRefs and Maybe"); + } +} + +sub _set_package_isa { + my ($package, @new_isa) = @_; + + *{ _glob_for "${package}::ISA" } = [@new_isa]; +} + +sub _set_inherited_metadata { + my $class = shift; + my $linearized_isa = mro::get_linear_isa($class); + my %new_metadata; + + for my $isa_class (reverse @$linearized_isa) { + %new_metadata = ( + %new_metadata, + %{ $metadata_for{$isa_class} || {} }, + ); + } + $metadata_for{$class} = \%new_metadata; +} + +sub unimport { + my $caller = scalar caller(); + my $stash = _stash_for( $caller ); + + delete $stash->{$_} for @{$export_for{$caller}}; +} + +sub Dumper { + require Data::Dumper; + local $Data::Dumper::Indent = 0; + local $Data::Dumper::Sortkeys = 0; + local $Data::Dumper::Quotekeys = 0; + local $Data::Dumper::Terse = 1; + + Data::Dumper::Dumper(@_) +} + +BEGIN { + if ($] >= 5.010) { + { local $@; require mro; } + } + else { + local $@; + eval { + require MRO::Compat; + } or do { + *mro::get_linear_isa = *mro::get_linear_isa_dfs = sub { + no strict 'refs'; + + my $classname = shift; + + my @lin = ($classname); + my %stored; + foreach my $parent (@{"$classname\::ISA"}) { + my $plin = mro::get_linear_isa_dfs($parent); + foreach (@$plin) { + next if exists $stored{$_}; + push(@lin, $_); + $stored{$_} = 1; + } + } + return \@lin; + }; + } + } +} + +sub override { + my ($methods, $code) = @_; + my $caller = scalar caller; + + for my $method ( ref($methods) ? @$methods : $methods ) { + my $full_method = "${caller}::${method}"; + *{_glob_for $full_method} = $code; + } +} + +} +1; +} +# ########################################################################### +# End Mo package +# ########################################################################### + # ########################################################################### # DSNParser package # This package is a copy without comments from the original. The original @@ -1620,24 +2082,26 @@ use Time::Local qw(timegm timelocal); use Digest::MD5 qw(md5_hex); use B qw(); -require Exporter; -our @ISA = qw(Exporter); -our %EXPORT_TAGS = (); -our @EXPORT = (); -our @EXPORT_OK = qw( - micro_t - percentage_of - secs_to_time - time_to_secs - shorten - ts - parse_timestamp - unix_timestamp - any_unix_timestamp - make_checksum - crc32 - encode_json -); +BEGIN { + require Exporter; + our @ISA = qw(Exporter); + our %EXPORT_TAGS = (); + our @EXPORT = (); + our @EXPORT_OK = qw( + micro_t + percentage_of + secs_to_time + time_to_secs + shorten + ts + parse_timestamp + unix_timestamp + any_unix_timestamp + make_checksum + crc32 + encode_json + ); +} our $mysql_ts = qr/(\d\d)(\d\d)(\d\d) +(\d+):(\d+):(\d+)(\.\d+)?/; our $proper_ts = qr/(\d\d\d\d)-(\d\d)-(\d\d)[T ](\d\d):(\d\d):(\d\d)(\.\d+)?/; @@ -5448,15 +5912,6 @@ sub calculate_statistical_metrics { $classes->{$class}->{$attrib}->{all}, $classes->{$class}->{$attrib} ); - - if ( $args{apdex_t} && $attrib eq 'Query_time' ) { - $class_metrics->{$class}->{$attrib}->{apdex_t} = $args{apdex_t}; - $class_metrics->{$class}->{$attrib}->{apdex} - = $self->calculate_apdex( - t => $args{apdex_t}, - samples => $classes->{$class}->{$attrib}->{all}, - ); - } } } } @@ -5581,9 +6036,6 @@ sub metrics { median => $metrics->{classes}->{$where}->{$attrib}->{median} || 0, pct_95 => $metrics->{classes}->{$where}->{$attrib}->{pct_95} || 0, stddev => $metrics->{classes}->{$where}->{$attrib}->{stddev} || 0, - - apdex_t => $metrics->{classes}->{$where}->{$attrib}->{apdex_t}, - apdex => $metrics->{classes}->{$where}->{$attrib}->{apdex}, }; } @@ -5899,51 +6351,6 @@ sub _deep_copy_attrib_vals { return $copy; } -sub calculate_apdex { - my ( $self, %args ) = @_; - my @required_args = qw(t samples); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless $args{$arg}; - } - my ($t, $samples) = @args{@required_args}; - - if ( $t <= 0 ) { - die "Invalid target threshold (T): $t. T must be greater than zero"; - } - - my $f = 4 * $t; - PTDEBUG && _d("Apdex T =", $t, "F =", $f); - - my $satisfied = 0; - my $tolerating = 0; - my $frustrated = 0; # just for debug output - my $n_samples = 0; - BUCKET: - for my $bucket ( keys %$samples ) { - my $n_responses = $samples->{$bucket}; - my $response_time = $buck_vals[$bucket]; - - if ( $response_time <= $t ) { - $satisfied += $n_responses; - } - elsif ( $response_time <= $f ) { - $tolerating += $n_responses; - } - else { - $frustrated += $n_responses; - } - - $n_samples += $n_responses; - } - - my $apdex = sprintf('%.2f', ($satisfied + ($tolerating / 2)) / $n_samples); - PTDEBUG && _d($n_samples, "samples,", $satisfied, "satisfied,", - $tolerating, "tolerating,", $frustrated, "frustrated, Apdex score:", - $apdex); - - return $apdex; -} - sub _get_value { my ( $self, %args ) = @_; my ($event, $attrib, $alts) = @args{qw(event attribute alternates)}; @@ -5997,8 +6404,7 @@ sub _d { { package ReportFormatter; -use strict; -use warnings FATAL => 'all'; +use Mo; use English qw(-no_match_vars); use constant PTDEBUG => $ENV{PTDEBUG} || 0; @@ -6008,40 +6414,102 @@ use POSIX qw(ceil); eval { require Term::ReadKey }; my $have_term = $EVAL_ERROR ? 0 : 1; -sub new { - my ( $class, %args ) = @_; - my @required_args = qw(); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless $args{$arg}; - } - my $self = { - underline_header => 1, - line_prefix => '# ', - line_width => 78, - column_spacing => ' ', - extend_right => 0, - truncate_line_mark => '...', - column_errors => 'warn', - truncate_header_side => 'left', - strip_whitespace => 1, - %args, # args above can be overriden, args below cannot - n_cols => 0, - }; - if ( ($self->{line_width} || '') eq 'auto' ) { +has underline_header => ( + is => 'ro', + isa => 'Bool', + default => sub { 1 }, +); +has line_prefix => ( + is => 'ro', + isa => 'Str', + default => sub { '# ' }, +); +has line_width => ( + is => 'ro', + isa => 'Int', + default => sub { 78 }, +); +has column_spacing => ( + is => 'ro', + isa => 'Str', + default => sub { ' ' }, +); +has extend_right => ( + is => 'ro', + isa => 'Bool', + default => sub { '' }, +); +has truncate_line_mark => ( + is => 'ro', + isa => 'Str', + default => sub { '...' }, +); +has column_errors => ( + is => 'ro', + isa => 'Str', + default => sub { 'warn' }, +); +has truncate_header_side => ( + is => 'ro', + isa => 'Str', + default => sub { 'left' }, +); +has strip_whitespace => ( + is => 'ro', + isa => 'Bool', + default => sub { 1 }, +); +has title => ( + is => 'rw', + isa => 'Str', + predicate => 'has_title', +); + + +has n_cols => ( + is => 'rw', + isa => 'Int', + default => sub { 0 }, + init_arg => undef, +); + +has cols => ( + is => 'ro', + isa => 'ArrayRef', + init_arg => undef, + default => sub { [] }, + clearer => 'clear_cols', +); + +has lines => ( + is => 'ro', + isa => 'ArrayRef', + init_arg => undef, + default => sub { [] }, + clearer => 'clear_lines', +); + +has truncate_headers => ( + is => 'rw', + isa => 'Bool', + default => sub { undef }, + init_arg => undef, + clearer => 'clear_truncate_headers', +); + +sub BUILDARGS { + my $class = shift; + my $args = $class->SUPER::BUILDARGS(@_); + + if ( ($args->{line_width} || '') eq 'auto' ) { die "Cannot auto-detect line width because the Term::ReadKey module " . "is not installed" unless $have_term; - ($self->{line_width}) = GetTerminalSize(); + ($args->{line_width}) = GetTerminalSize(); + PTDEBUG && _d('Line width:', $args->{line_width}); } - PTDEBUG && _d('Line width:', $self->{line_width}); - return bless $self, $class; -} - -sub set_title { - my ( $self, $title ) = @_; - $self->{title} = $title; - return; + return $args; } sub set_columns { @@ -6057,7 +6525,7 @@ sub set_columns { die "Column does not have a name" unless defined $col_name; if ( $col->{width} ) { - $col->{width_pct} = ceil(($col->{width} * 100) / $self->{line_width}); + $col->{width_pct} = ceil(($col->{width} * 100) / $self->line_width()); PTDEBUG && _d('col:', $col_name, 'width:', $col->{width}, 'chars =', $col->{width_pct}, '%'); } @@ -6084,10 +6552,10 @@ sub set_columns { $col->{right_most} = 1 if $i == $#cols; - push @{$self->{cols}}, $col; + push @{$self->cols}, $col; } - $self->{n_cols} = scalar @cols; + $self->n_cols( scalar @cols ); if ( ($used_width || 0) > 100 ) { die "Total width_pct for all columns is >100%"; @@ -6097,15 +6565,15 @@ sub set_columns { my $wid_per_col = int((100 - $used_width) / scalar @auto_width_cols); PTDEBUG && _d('Line width left:', (100-$used_width), '%;', 'each auto width col:', $wid_per_col, '%'); - map { $self->{cols}->[$_]->{width_pct} = $wid_per_col } @auto_width_cols; + map { $self->cols->[$_]->{width_pct} = $wid_per_col } @auto_width_cols; } - $min_hdr_wid += ($self->{n_cols} - 1) * length $self->{column_spacing}; + $min_hdr_wid += ($self->n_cols() - 1) * length $self->column_spacing(); PTDEBUG && _d('min header width:', $min_hdr_wid); - if ( $min_hdr_wid > $self->{line_width} ) { + if ( $min_hdr_wid > $self->line_width() ) { PTDEBUG && _d('Will truncate headers because min header width', - $min_hdr_wid, '> line width', $self->{line_width}); - $self->{truncate_headers} = 1; + $min_hdr_wid, '> line width', $self->line_width()); + $self->truncate_headers(1); } return; @@ -6114,14 +6582,14 @@ sub set_columns { sub add_line { my ( $self, @vals ) = @_; my $n_vals = scalar @vals; - if ( $n_vals != $self->{n_cols} ) { + if ( $n_vals != $self->n_cols() ) { $self->_column_error("Number of values $n_vals does not match " - . "number of columns $self->{n_cols}"); + . "number of columns " . $self->n_cols()); } for my $i ( 0..($n_vals-1) ) { - my $col = $self->{cols}->[$i]; + my $col = $self->cols->[$i]; my $val = defined $vals[$i] ? $vals[$i] : $col->{undef_value}; - if ( $self->{strip_whitespace} ) { + if ( $self->strip_whitespace() ) { $val =~ s/^\s+//g; $val =~ s/\s+$//; $vals[$i] = $val; @@ -6130,7 +6598,7 @@ sub add_line { $col->{min_val} = min($width, ($col->{min_val} || $width)); $col->{max_val} = max($width, ($col->{max_val} || $width)); } - push @{$self->{lines}}, \@vals; + push @{$self->lines}, \@vals; return; } @@ -6138,26 +6606,28 @@ sub get_report { my ( $self, %args ) = @_; $self->_calculate_column_widths(); - $self->_truncate_headers() if $self->{truncate_headers}; + if ( $self->truncate_headers() ) { + $self->_truncate_headers(); + } $self->_truncate_line_values(%args); my @col_fmts = $self->_make_column_formats(); - my $fmt = ($self->{line_prefix} || '') - . join($self->{column_spacing}, @col_fmts); + my $fmt = $self->line_prefix() + . join($self->column_spacing(), @col_fmts); PTDEBUG && _d('Format:', $fmt); (my $hdr_fmt = $fmt) =~ s/%([^-])/%-$1/g; my @lines; - push @lines, sprintf "$self->{line_prefix}$self->{title}" if $self->{title}; + push @lines, $self->line_prefix() . $self->title() if $self->has_title(); push @lines, $self->_truncate_line( - sprintf($hdr_fmt, map { $_->{name} } @{$self->{cols}}), + sprintf($hdr_fmt, map { $_->{name} } @{$self->cols}), strip => 1, mark => '', ); - if ( $self->{underline_header} ) { - my @underlines = map { '=' x $_->{print_width} } @{$self->{cols}}; + if ( $self->underline_header() ) { + my @underlines = map { '=' x $_->{print_width} } @{$self->cols}; push @lines, $self->_truncate_line( sprintf($fmt, map { $_ || '' } @underlines), mark => '', @@ -6168,19 +6638,23 @@ sub get_report { my $vals = $_; my $i = 0; my @vals = map { - my $val = defined $_ ? $_ : $self->{cols}->[$i++]->{undef_value}; + my $val = defined $_ ? $_ : $self->cols->[$i++]->{undef_value}; $val = '' if !defined $val; $val =~ s/\n/ /g; $val; } @$vals; my $line = sprintf($fmt, @vals); - if ( $self->{extend_right} ) { + if ( $self->extend_right() ) { $line; } else { $self->_truncate_line($line); } - } @{$self->{lines}}; + } @{$self->lines}; + + $self->clear_cols(); + $self->clear_lines(); + $self->clear_truncate_headers(); return join("\n", @lines) . "\n"; } @@ -6188,7 +6662,7 @@ sub get_report { sub truncate_value { my ( $self, $col, $val, $width, $side ) = @_; return $val if length $val <= $width; - return $val if $col->{right_most} && $self->{extend_right}; + return $val if $col->{right_most} && $self->extend_right(); $side ||= $col->{truncate_side}; my $mark = $col->{truncate_mark}; if ( $side eq 'right' ) { @@ -6208,8 +6682,8 @@ sub _calculate_column_widths { my ( $self ) = @_; my $extra_space = 0; - foreach my $col ( @{$self->{cols}} ) { - my $print_width = int($self->{line_width} * ($col->{width_pct} / 100)); + foreach my $col ( @{$self->cols} ) { + my $print_width = int($self->line_width() * ($col->{width_pct} / 100)); PTDEBUG && _d('col:', $col->{name}, 'width pct:', $col->{width_pct}, 'char width:', $print_width, @@ -6233,7 +6707,7 @@ sub _calculate_column_widths { PTDEBUG && _d('Extra space:', $extra_space); while ( $extra_space-- ) { - foreach my $col ( @{$self->{cols}} ) { + foreach my $col ( @{$self->cols} ) { if ( $col->{auto_width} && ( $col->{print_width} < $col->{max_val} || $col->{print_width} < $col->{header_width}) @@ -6248,8 +6722,8 @@ sub _calculate_column_widths { sub _truncate_headers { my ( $self, $col ) = @_; - my $side = $self->{truncate_header_side}; - foreach my $col ( @{$self->{cols}} ) { + my $side = $self->truncate_header_side(); + foreach my $col ( @{$self->cols} ) { my $col_name = $col->{name}; my $print_width = $col->{print_width}; next if length $col_name <= $print_width; @@ -6262,10 +6736,10 @@ sub _truncate_headers { sub _truncate_line_values { my ( $self, %args ) = @_; - my $n_vals = $self->{n_cols} - 1; - foreach my $vals ( @{$self->{lines}} ) { + my $n_vals = $self->n_cols() - 1; + foreach my $vals ( @{$self->lines} ) { for my $i ( 0..$n_vals ) { - my $col = $self->{cols}->[$i]; + my $col = $self->cols->[$i]; my $val = defined $vals->[$i] ? $vals->[$i] : $col->{undef_value}; my $width = length $val; @@ -6291,9 +6765,9 @@ sub _truncate_line_values { sub _make_column_formats { my ( $self ) = @_; my @col_fmts; - my $n_cols = $self->{n_cols} - 1; + my $n_cols = $self->n_cols() - 1; for my $i ( 0..$n_cols ) { - my $col = $self->{cols}->[$i]; + my $col = $self->cols->[$i]; my $width = $col->{right_most} && !$col->{right_justify} ? '' : $col->{print_width}; @@ -6306,12 +6780,12 @@ sub _make_column_formats { sub _truncate_line { my ( $self, $line, %args ) = @_; - my $mark = defined $args{mark} ? $args{mark} : $self->{truncate_line_mark}; + my $mark = defined $args{mark} ? $args{mark} : $self->truncate_line_mark(); if ( $line ) { $line =~ s/\s+$// if $args{strip}; my $len = length($line); - if ( $len > $self->{line_width} ) { - $line = substr($line, 0, $self->{line_width} - length $mark); + if ( $len > $self->line_width() ) { + $line = substr($line, 0, $self->line_width() - length $mark); $line .= $mark if $mark; } } @@ -6321,7 +6795,7 @@ sub _truncate_line { sub _column_error { my ( $self, $err ) = @_; my $msg = "Column error: $err"; - $self->{column_errors} eq 'die' ? die $msg : warn $msg; + $self->column_errors() eq 'die' ? die $msg : warn $msg; return; } @@ -6350,8 +6824,7 @@ sub _d { { package QueryReportFormatter; -use strict; -use warnings FATAL => 'all'; +use Mo; use English qw(-no_match_vars); use POSIX qw(floor); @@ -6364,25 +6837,68 @@ use constant PTDEBUG => $ENV{PTDEBUG} || 0; use constant LINE_LENGTH => 74; use constant MAX_STRING_LENGTH => 10; -sub new { - my ( $class, %args ) = @_; - foreach my $arg ( qw(OptionParser QueryRewriter Quoter) ) { - die "I need a $arg argument" unless $args{$arg}; +{ local $EVAL_ERROR; eval { require Quoter } }; +{ local $EVAL_ERROR; eval { require ReportFormatter } }; + +has Quoter => ( + is => 'ro', + isa => 'Quoter', + default => sub { Quoter->new() }, +); + +has label_width => ( + is => 'ro', + isa => 'Int', +); + +has global_headers => ( + is => 'ro', + isa => 'ArrayRef', + default => sub { [qw( total min max avg 95% stddev median)] }, +); + +has event_headers => ( + is => 'ro', + isa => 'ArrayRef', + default => sub { [qw(pct total min max avg 95% stddev median)] }, +); + +has ReportFormatter => ( + is => 'ro', + isa => 'ReportFormatter', + builder => '_build_report_formatter', +); + +sub _build_report_formatter { + return ReportFormatter->new( + line_width => LINE_LENGTH, + extend_right => 1, + ); +} + +sub BUILDARGS { + my $class = shift; + my $args = $class->SUPER::BUILDARGS(@_); + + foreach my $arg ( qw(OptionParser QueryRewriter) ) { + die "I need a $arg argument" unless $args->{$arg}; } - my $label_width = $args{label_width} || 12; + my $label_width = $args->{label_width} ||= 12; PTDEBUG && _d('Label width:', $label_width); - my $cheat_width = $label_width + 1; - + my $o = delete $args->{OptionParser}; my $self = { - %args, - label_width => $label_width, + %$args, + options => { + show_all => $o->get('show-all'), + shorten => $o->get('shorten'), + report_all => $o->get('report-all'), + report_histogram => $o->get('report-histogram'), + }, num_format => "# %-${label_width}s %3s %7s %7s %7s %7s %7s %7s %7s", bool_format => "# %-${label_width}s %3d%% yes, %3d%% no", string_format => "# %-${label_width}s %s", - global_headers => [qw( total min max avg 95% stddev median)], - event_headers => [qw(pct total min max avg 95% stddev median)], hidden_attrib => { # Don't sort/print these attribs in the reports. arg => 1, # They're usually handled specially, or not fingerprint => 1, # printed at all. @@ -6390,18 +6906,7 @@ sub new { ts => 1, }, }; - return bless $self, $class; -} - -sub set_report_formatter { - my ( $self, %args ) = @_; - my @required_args = qw(report formatter); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless exists $args{$arg}; - } - my ($report, $formatter) = @args{@required_args}; - $self->{formatter_for}->{$report} = $formatter; - return; + return $self; } sub print_reports { @@ -6507,7 +7012,7 @@ sub header { shorten(scalar keys %{$results->{classes}}, d=>1_000), shorten($qps || 0, d=>1_000), shorten($conc || 0, d=>1_000)); - $line .= ('_' x (LINE_LENGTH - length($line) + $self->{label_width} - 12)); + $line .= ('_' x (LINE_LENGTH - length($line) + $self->label_width() - 12)); push @result, $line; if ( my $ts = $results->{globals}->{ts} ) { @@ -6568,8 +7073,8 @@ sub header { return join("\n", map { s/\s+$//; $_ } @result) . "\n"; } -sub query_report { - my ( $self, %args ) = @_; +sub query_report_values { + my ($self, %args) = @_; foreach my $arg ( qw(ea worst orderby groupby) ) { die "I need a $arg argument" unless defined $arg; } @@ -6577,11 +7082,63 @@ sub query_report { my $groupby = $args{groupby}; my $worst = $args{worst}; - my $o = $self->{OptionParser}; - my $q = $self->{Quoter}; + my $q = $self->Quoter; my $qv = $self->{QueryReview}; my $qr = $self->{QueryRewriter}; + my @values; + ITEM: + foreach my $top_event ( @$worst ) { + my $item = $top_event->[0]; + my $reason = $args{explain_why} ? $top_event->[1] : ''; + my $rank = $top_event->[2]; + my $stats = $ea->results->{classes}->{$item}; + my $sample = $ea->results->{samples}->{$item}; + my $samp_query = $sample->{arg} || ''; + + my %item_vals = ( + item => $item, + samp_query => $samp_query, + rank => ($rank || 0), + reason => $reason, + ); + + my $review_vals; + if ( $qv ) { + $review_vals = $qv->get_review_info($item); + next ITEM if $review_vals->{reviewed_by} && !$self->{options}->{report_all}; + for my $col ( $qv->review_cols() ) { + push @{$item_vals{review_vals}}, [$col, $review_vals->{$col}]; + } + } + + $item_vals{default_db} = $sample->{db} ? $sample->{db} + : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} + : undef; + $item_vals{tables} = [$self->{QueryParser}->extract_tables( + query => $samp_query, + default_db => $item_vals{default_db}, + Quoter => $self->Quoter, + )]; + + if ( $samp_query && ($args{variations} && @{$args{variations}}) ) { + $item_vals{crc} = crc32($samp_query); + } + + push @values, \%item_vals; + } + return \@values; +} + +sub query_report { + my ( $self, %args ) = @_; + + my $ea = $args{ea}; + my $groupby = $args{groupby}; + my $report_values = $self->query_report_values(%args); + + my $qr = $self->{QueryRewriter}; + my $report = ''; if ( $args{print_header} ) { @@ -6596,55 +7153,31 @@ sub query_report { ); ITEM: - foreach my $top_event ( @$worst ) { - my $item = $top_event->[0]; - my $reason = $args{explain_why} ? $top_event->[1] : ''; - my $rank = $top_event->[2]; - my $stats = $ea->results->{classes}->{$item}; - my $sample = $ea->results->{samples}->{$item}; - my $samp_query = $sample->{arg} || ''; - - my $review_vals; - if ( $qv ) { - $review_vals = $qv->get_review_info($item); - next ITEM if $review_vals->{reviewed_by} && !$o->get('report-all'); - } - - my ($default_db) = $sample->{db} ? $sample->{db} - : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} - : undef; - my @tables; - if ( $o->get('for-explain') ) { - @tables = $self->{QueryParser}->extract_tables( - query => $samp_query, - default_db => $default_db, - Quoter => $self->{Quoter}, - ); - } - - $report .= "\n" if $rank > 1; # space between each event report + foreach my $vals ( @$report_values ) { + my $item = $vals->{item}; + $report .= "\n" if $vals->{rank} > 1; # space between each event report $report .= $self->event_report( %args, item => $item, - sample => $sample, - rank => $rank, - reason => $reason, + sample => $ea->results->{samples}->{$item}, + rank => $vals->{rank}, + reason => $vals->{reason}, attribs => $attribs, - db => $default_db, + db => $vals->{default_db}, ); - if ( $o->get('report-histogram') ) { + if ( $self->{options}->{report_histogram} ) { $report .= $self->chart_distro( %args, - attrib => $o->get('report-histogram'), - item => $item, + attrib => $self->{options}->{report_histogram}, + item => $vals->{item}, ); } - if ( $qv && $review_vals ) { + if ( $vals->{review_vals} ) { $report .= "# Review information\n"; - foreach my $col ( $qv->review_cols() ) { - my $val = $review_vals->{$col}; + foreach my $elem ( @{$vals->{review_vals}} ) { + my ($col, $val) = @$elem; if ( !$val || $val ne '0000-00-00 00:00:00' ) { # issue 202 $report .= sprintf "# %13s: %-s\n", $col, ($val ? $val : ''); } @@ -6652,18 +7185,15 @@ sub query_report { } if ( $groupby eq 'fingerprint' ) { - $samp_query = $qr->shorten($samp_query, $o->get('shorten')) - if $o->get('shorten'); + my $samp_query = $qr->shorten($vals->{samp_query}, $self->{options}->{shorten}) + if $self->{options}->{shorten}; - $report .= "# Fingerprint\n# $item\n" - if $o->get('fingerprints'); + PTDEBUG && _d("Fingerprint\n# $vals->{item}\n"); - $report .= $self->tables_report(@tables) - if $o->get('for-explain'); + $report .= $self->tables_report(@{$vals->{tables}}); - if ( $samp_query && ($args{variations} && @{$args{variations}}) ) { - my $crc = crc32($samp_query); - $report.= "# CRC " . ($crc ? $crc % 1_000 : "") . "\n"; + if ( $vals->{crc} ) { + $report.= "# CRC " . ($vals->{crc} % 1_000) . "\n"; } my $log_type = $args{log_type} || ''; @@ -6677,14 +7207,13 @@ sub query_report { } else { $report .= "# EXPLAIN /*!50100 PARTITIONS*/\n$samp_query${mark}\n"; - $report .= $self->explain_report($samp_query, $default_db); + $report .= $self->explain_report($samp_query, $vals->{default_db}); } } else { $report .= "$samp_query${mark}\n"; my $converted = $qr->convert_to_select($samp_query); - if ( $o->get('for-explain') - && $converted + if ( $converted && $converted =~ m/^[\(\s]*select/i ) { $report .= "# Converted for EXPLAIN\n# EXPLAIN /*!50100 PARTITIONS*/\n$converted${mark}\n"; } @@ -6692,7 +7221,7 @@ sub query_report { } else { if ( $groupby eq 'tables' ) { - my ( $db, $tbl ) = $q->split_unquote($item); + my ( $db, $tbl ) = $self->Quoter->split_unquote($item); $report .= $self->tables_report([$db, $tbl]); } $report .= "$item\n"; @@ -6702,20 +7231,19 @@ sub query_report { return $report; } -sub event_report { - my ( $self, %args ) = @_; - foreach my $arg ( qw(ea item orderby) ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my $ea = $args{ea}; - my $item = $args{item}; +sub event_report_values { + my ($self, %args) = @_; + + my $ea = $args{ea}; + my $item = $args{item}; my $orderby = $args{orderby}; my $results = $ea->results(); - my $o = $self->{OptionParser}; - my @result; + + my %vals; my $store = $results->{classes}->{$item}; - return "# No such event $item\n" unless $store; + + return unless $store; my $global_cnt = $results->{globals}->{$orderby}->{cnt}; my $class_cnt = $store->{$orderby}->{cnt}; @@ -6734,68 +7262,26 @@ sub event_report { }; } - my $line = sprintf( - '# %s %d: %s QPS, %sx concurrency, ID 0x%s at byte %.f ', - ($ea->{groupby} eq 'fingerprint' ? 'Query' : 'Item'), - $args{rank} || 0, - shorten($qps || 0, d=>1_000), - shorten($conc || 0, d=>1_000), - make_checksum($item), - $results->{samples}->{$item}->{pos_in_log} || 0, - ); - $line .= ('_' x (LINE_LENGTH - length($line) + $self->{label_width} - 12)); - push @result, $line; - - if ( $args{reason} ) { - push @result, - "# This item is included in the report because it matches " - . ($args{reason} eq 'top' ? '--limit.' : '--outliers.'); - } - - { + $vals{groupby} = $ea->{groupby}; + $vals{qps} = $qps || 0; + $vals{concurrency} = $conc || 0; + $vals{checksum} = make_checksum($item); + $vals{pos_in_log} = $results->{samples}->{$item}->{pos_in_log} || 0; + $vals{reason} = $args{reason}; + $vals{variance_to_mean} = do { my $query_time = $ea->metrics(where => $item, attrib => 'Query_time'); - push @result, - sprintf("# Scores: Apdex = %s [%3.1f]%s, V/M = %.2f", - (defined $query_time->{apdex} ? "$query_time->{apdex}" : "NS"), - ($query_time->{apdex_t} || 0), - ($query_time->{cnt} < 100 ? "*" : ""), - ($query_time->{stddev}**2 / ($query_time->{avg} || 1)), - ); + $query_time->{stddev}**2 / ($query_time->{avg} || 1) + }; + + $vals{counts} = { + class_cnt => $class_cnt, + global_cnt => $global_cnt, + }; + + if ( my $ts = $store->{ts}) { + $vals{time_range} = $self->format_time_range($ts) || "unknown"; } - if ( $o->get('explain') && $results->{samples}->{$item}->{arg} ) { - eval { - my $sparkline = $self->explain_sparkline( - $results->{samples}->{$item}->{arg}, $args{db}); - push @result, "# EXPLAIN sparkline: $sparkline\n"; - }; - if ( $EVAL_ERROR ) { - PTDEBUG && _d("Failed to get EXPLAIN sparkline:", $EVAL_ERROR); - } - } - - if ( my $attrib = $o->get('report-histogram') ) { - my $sparkline = $self->distro_sparkline( - %args, - attrib => $attrib, - item => $item, - ); - if ( $sparkline ) { - push @result, "# $attrib sparkline: |$sparkline|"; - } - } - - if ( my $ts = $store->{ts} ) { - my $time_range = $self->format_time_range($ts) || "unknown"; - push @result, "# Time range: $time_range"; - } - - push @result, $self->make_event_header(); - - push @result, - sprintf $self->{num_format}, 'Count', - percentage_of($class_cnt, $global_cnt), $class_cnt, map { '' } (1..8); - my $attribs = $args{attribs}; if ( !$attribs ) { $attribs = $self->sort_attribs( @@ -6804,10 +7290,9 @@ sub event_report { ); } + $vals{attributes} = { map { $_ => [] } qw(num innodb bool string) }; + foreach my $type ( qw(num innodb) ) { - if ( $type eq 'innodb' && @{$attribs->{$type}} ) { - push @result, "# InnoDB:"; - }; NUM_ATTRIB: foreach my $attrib ( @{$attribs->{$type}} ) { @@ -6827,15 +7312,12 @@ sub event_report { $pct = percentage_of( $vals->{sum}, $results->{globals}->{$attrib}->{sum}); - push @result, - sprintf $self->{num_format}, - $self->make_label($attrib), $pct, @values; + push @{$vals{attributes}{$type}}, + [ $attrib, $pct, @values ]; } } if ( @{$attribs->{bool}} ) { - push @result, "# Boolean:"; - my $printed_bools = 0; BOOL_ATTRIB: foreach my $attrib ( @{$attribs->{bool}} ) { next BOOL_ATTRIB unless exists $store->{$attrib}; @@ -6843,33 +7325,115 @@ sub event_report { next unless scalar %$vals; if ( $vals->{sum} > 0 ) { - push @result, - sprintf $self->{bool_format}, - $self->make_label($attrib), $self->bool_percents($vals); - $printed_bools = 1; + push @{$vals{attributes}{bool}}, + [ $attrib, $self->bool_percents($vals) ]; } } - pop @result unless $printed_bools; } if ( @{$attribs->{string}} ) { - push @result, "# String:"; - my $printed_strings = 0; STRING_ATTRIB: foreach my $attrib ( @{$attribs->{string}} ) { next STRING_ATTRIB unless exists $store->{$attrib}; my $vals = $store->{$attrib}; next unless scalar %$vals; + push @{$vals{attributes}{string}}, + [ $attrib, $vals ]; + } + } + + + return \%vals; +} + + +sub event_report { + my ( $self, %args ) = @_; + foreach my $arg ( qw(ea item orderby) ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + + my $item = $args{item}; + my $val = $self->event_report_values(%args); + my @result; + + return "# No such event $item\n" unless $val; + + my $line = sprintf( + '# %s %d: %s QPS, %sx concurrency, ID 0x%s at byte %.f ', + ($val->{groupby} eq 'fingerprint' ? 'Query' : 'Item'), + $args{rank} || 0, + shorten($val->{qps}, d=>1_000), + shorten($val->{concurrency}, d=>1_000), + $val->{checksum}, + $val->{pos_in_log}, + ); + $line .= ('_' x (LINE_LENGTH - length($line) + $self->label_width() - 12)); + push @result, $line; + + if ( $val->{reason} ) { + push @result, + "# This item is included in the report because it matches " + . ($val->{reason} eq 'top' ? '--limit.' : '--outliers.'); + } + + push @result, + sprintf("# Scores: V/M = %.2f", $val->{variance_to_mean} ); + + if ( $val->{time_range} ) { + push @result, "# Time range: $val->{time_range}"; + } + + push @result, $self->make_event_header(); + + push @result, + sprintf $self->{num_format}, 'Count', + percentage_of($val->{counts}{class_cnt}, $val->{counts}{global_cnt}), + $val->{counts}{class_cnt}, + map { '' } (1..8); + + + my $attribs = $val->{attributes}; + + foreach my $type ( qw(num innodb) ) { + if ( $type eq 'innodb' && @{$attribs->{$type}} ) { + push @result, "# InnoDB:"; + }; + + NUM_ATTRIB: + foreach my $attrib ( @{$attribs->{$type}} ) { + my ($attrib_name, @vals) = @$attrib; + push @result, + sprintf $self->{num_format}, + $self->make_label($attrib_name), @vals; + } + } + + if ( @{$attribs->{bool}} ) { + push @result, "# Boolean:"; + BOOL_ATTRIB: + foreach my $attrib ( @{$attribs->{bool}} ) { + my ($attrib_name, @vals) = @$attrib; + push @result, + sprintf $self->{bool_format}, + $self->make_label($attrib_name), @vals; + } + } + + if ( @{$attribs->{string}} ) { + push @result, "# String:"; + STRING_ATTRIB: + foreach my $attrib ( @{$attribs->{string}} ) { + my ($attrib_name, $vals) = @$attrib; push @result, sprintf $self->{string_format}, - $self->make_label($attrib), - $self->format_string_list($attrib, $vals, $class_cnt); - $printed_strings = 1; + $self->make_label($attrib_name), + $self->format_string_list($attrib_name, $vals, $val->{counts}{class_cnt}); } - pop @result unless $printed_strings; } + return join("\n", map { s/\s+$//; $_ } @result) . "\n"; } @@ -6921,73 +7485,6 @@ sub chart_distro { return join("\n", @results) . "\n"; } - -sub distro_sparkline { - my ( $self, %args ) = @_; - foreach my $arg ( qw(ea item attrib) ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my $ea = $args{ea}; - my $item = $args{item}; - my $attrib = $args{attrib}; - - my $results = $ea->results(); - my $store = $results->{classes}->{$item}->{$attrib}; - my $vals = $store->{all}; - - my $all_zeros_sparkline = " " x 8; - - return $all_zeros_sparkline unless defined $vals && scalar %$vals; - - my @buck_tens = $ea->buckets_of(10); - my @distro = map { 0 } (0 .. 7); - my @buckets = map { 0 } (0..999); - map { $buckets[$_] = $vals->{$_} } keys %$vals; - $vals = \@buckets; - map { $distro[$buck_tens[$_]] += $vals->[$_] } (1 .. @$vals - 1); - - my $vals_per_mark; - my $max_val = 0; - my $max_disp_width = 64; - foreach my $n_vals ( @distro ) { - $max_val = $n_vals if $n_vals > $max_val; - } - $vals_per_mark = $max_val / $max_disp_width; - - my ($min, $max); - foreach my $i ( 0 .. $#distro ) { - my $n_vals = $distro[$i]; - my $n_marks = $n_vals / ($vals_per_mark || 1); - $n_marks = 1 if $n_marks < 1 && $n_vals > 0; - - $min = $n_marks if $n_marks && (!$min || $n_marks < $min); - $max = $n_marks if !$max || $n_marks > $max; - } - return $all_zeros_sparkline unless $min && $max; - - - $min = 0 if $min == $max; - my @range_min; - my $d = floor((($max+0.00001)-$min) / 4); - for my $x ( 1..4 ) { - push @range_min, $min + ($d * $x); - } - - my $sparkline = ""; - foreach my $i ( 0 .. $#distro ) { - my $n_vals = $distro[$i]; - my $n_marks = $n_vals / ($vals_per_mark || 1); - $n_marks = 1 if $n_marks < 1 && $n_vals > 0; - $sparkline .= $n_marks <= 0 ? ' ' - : $n_marks <= $range_min[0] ? '_' - : $n_marks <= $range_min[1] ? '.' - : $n_marks <= $range_min[2] ? '-' - : '^'; - } - - return $sparkline; -} - sub profile { my ( $self, %args ) = @_; foreach my $arg ( qw(ea worst groupby) ) { @@ -6999,7 +7496,6 @@ sub profile { my $groupby = $args{groupby}; my $qr = $self->{QueryRewriter}; - my $o = $self->{OptionParser}; my $results = $ea->results(); my $total_r = $results->{globals}->{Query_time}->{sum} || 0; @@ -7021,40 +7517,20 @@ sub profile { $qr->distill($samp_query, %{$args{distill_args}}) : $item, id => $groupby eq 'fingerprint' ? make_checksum($item) : '', vmr => ($query_time->{stddev}**2) / ($query_time->{avg} || 1), - apdex => defined $query_time->{apdex} ? $query_time->{apdex} : "NS", ); - if ( $o->get('explain') && $samp_query ) { - my ($default_db) = $sample->{db} ? $sample->{db} - : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} - : undef; - eval { - $profile{explain_sparkline} = $self->explain_sparkline( - $samp_query, $default_db); - }; - if ( $EVAL_ERROR ) { - PTDEBUG && _d("Failed to get EXPLAIN sparkline:", $EVAL_ERROR); - } - } - push @profiles, \%profile; } - my $report = $self->{formatter_for}->{profile} || new ReportFormatter( - line_width => LINE_LENGTH, - long_last_column => 1, - extend_right => 1, - ); - $report->set_title('Profile'); + my $report = $self->ReportFormatter(); + $report->title('Profile'); my @cols = ( { name => 'Rank', right_justify => 1, }, { name => 'Query ID', }, { name => 'Response time', right_justify => 1, }, { name => 'Calls', right_justify => 1, }, { name => 'R/Call', right_justify => 1, }, - { name => 'Apdx', right_justify => 1, width => 4, }, { name => 'V/M', right_justify => 1, width => 5, }, - ( $o->get('explain') ? { name => 'EXPLAIN' } : () ), { name => 'Item', }, ); $report->set_columns(@cols); @@ -7070,9 +7546,7 @@ sub profile { "$rt $rtp", $item->{cnt}, $rc, - $item->{apdex}, $vmr, - ( $o->get('explain') ? $item->{explain_sparkline} || "" : () ), $item->{sample}, ); $report->add_line(@vals); @@ -7098,9 +7572,7 @@ sub profile { "$rt $rtp", $misc->{cnt}, $rc, - 'NS', # Apdex is not meaningful here '0.0', # variance-to-mean ratio is not meaningful here - ( $o->get('explain') ? "MISC" : () ), "<".scalar @$other." ITEMS>", ); } @@ -7185,12 +7657,8 @@ sub prepared { return unless scalar @prepared; - my $report = $self->{formatter_for}->{prepared} || new ReportFormatter( - line_width => LINE_LENGTH, - long_last_column => 1, - extend_right => 1, - ); - $report->set_title('Prepared statements'); + my $report = $self->ReportFormatter(); + $report->title('Prepared statements'); $report->set_columns( { name => 'Rank', right_justify => 1, }, { name => 'Query ID', }, @@ -7224,11 +7692,11 @@ sub make_global_header { my @lines; push @lines, - sprintf $self->{num_format}, "Attribute", '', @{$self->{global_headers}}; + sprintf $self->{num_format}, "Attribute", '', @{$self->global_headers()}; push @lines, sprintf $self->{num_format}, - (map { "=" x $_ } $self->{label_width}), + (map { "=" x $_ } $self->label_width()), (map { " " x $_ } qw(3)), # no pct column in global header (map { "=" x $_ } qw(7 7 7 7 7 7 7)); @@ -7242,11 +7710,11 @@ sub make_event_header { my @lines; push @lines, - sprintf $self->{num_format}, "Attribute", @{$self->{event_headers}}; + sprintf $self->{num_format}, "Attribute", @{$self->event_headers()}; push @lines, sprintf $self->{num_format}, - map { "=" x $_ } ($self->{label_width}, qw(3 7 7 7 7 7 7 7)); + map { "=" x $_ } ($self->label_width(), qw(3 7 7 7 7 7 7 7)); $self->{event_header_lines} = \@lines; return @lines; @@ -7261,7 +7729,7 @@ sub make_label { if ( $val =~ m/^InnoDB/ ) { $val =~ s/^InnoDB //; $val = $val eq 'trx id' ? "InnoDB trxID" - : substr($val, 0, $self->{label_width}); + : substr($val, 0, $self->label_width()); } $val = $val eq 'user' ? 'Users' @@ -7272,7 +7740,7 @@ sub make_label { : $val eq 'bytes' ? 'Query size' : $val eq 'Tmp disk tables' ? 'Tmp disk tbl' : $val eq 'Tmp table sizes' ? 'Tmp tbl size' - : substr($val, 0, $self->{label_width}); + : substr($val, 0, $self->label_width); return $val; } @@ -7286,8 +7754,7 @@ sub bool_percents { sub format_string_list { my ( $self, $attrib, $vals, $class_cnt ) = @_; - my $o = $self->{OptionParser}; - my $show_all = $o->get('show-all'); + my $show_all = $self->{options}->{show_all}; if ( !exists $vals->{unq} ) { return ($vals->{cnt}); @@ -7417,7 +7884,7 @@ sub pref_sort { sub tables_report { my ( $self, @tables ) = @_; return '' unless @tables; - my $q = $self->{Quoter}; + my $q = $self->Quoter(); my $tables = ""; foreach my $db_tbl ( @tables ) { my ( $db, $tbl ) = @$db_tbl; @@ -7436,7 +7903,7 @@ sub explain_report { return '' unless $query; my $dbh = $self->{dbh}; - my $q = $self->{Quoter}; + my $q = $self->Quoter(); my $qp = $self->{QueryParser}; return '' unless $dbh && $q && $qp; @@ -7485,34 +7952,6 @@ sub format_time_range { return $min && $max ? "$min to $max" : ''; } -sub explain_sparkline { - my ( $self, $query, $db ) = @_; - return unless $query; - - my $q = $self->{Quoter}; - my $dbh = $self->{dbh}; - my $ex = $self->{ExplainAnalyzer}; - return unless $dbh && $ex; - - if ( $db ) { - PTDEBUG && _d($dbh, "USE", $db); - $dbh->do("USE " . $q->quote($db)); - } - my $res = $ex->normalize( - $ex->explain_query( - dbh => $dbh, - query => $query, - ) - ); - - my $sparkline; - if ( $res ) { - $sparkline = $ex->sparkline(explain => $res); - } - - return $sparkline; -} - sub _d { my ($package, undef, $line) = caller 0; @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } @@ -7527,6 +7966,139 @@ sub _d { # End QueryReportFormatter package # ########################################################################### +# ########################################################################### +# JSONReportFormatter package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/JSONReportFormatter.pm +# t/lib/JSONReportFormatter.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package JSONReportFormatter; +use Mo; + +use List::Util qw(sum); +use Transformers qw(make_checksum parse_timestamp); + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +my $have_json = eval { require JSON }; + +our $pretty_json = undef; +our $sorted_json = undef; + +extends qw(QueryReportFormatter); + +has _json => ( + is => 'ro', + init_arg => undef, + builder => '_build_json', +); + +sub _build_json { + return unless $have_json; + return JSON->new->utf8 + ->pretty($pretty_json) + ->canonical($sorted_json); +} + +sub encode_json { + my ($self, $encode) = @_; + if ( my $json = $self->_json ) { + return $json->encode($encode); + } + else { + return Transformers::encode_json($encode); + } +} + +override [qw(rusage date hostname files header profile prepared)] => sub { + return; +}; + +override event_report => sub { + my ($self, %args) = @_; + return $self->event_report_values(%args); +}; + +override query_report => sub { + my ($self, %args) = @_; + foreach my $arg ( qw(ea worst orderby groupby) ) { + die "I need a $arg argument" unless defined $arg; + } + + my $ea = $args{ea}; + my $worst = $args{worst}; + + my @attribs = @{$ea->get_attributes()}; + + my @queries; + foreach my $worst_info ( @$worst ) { + my $item = $worst_info->[0]; + my $stats = $ea->results->{classes}->{$item}; + my $sample = $ea->results->{samples}->{$item}; + + my $all_log_pos = $ea->{result_classes}->{$item}->{pos_in_log}->{all}; + my $times_seen = sum values %$all_log_pos; + + my %class = ( + sample => $sample->{arg}, + fingerprint => $item, + checksum => make_checksum($item), + cnt => $times_seen, + ); + + my %metrics; + foreach my $attrib ( @attribs ) { + $metrics{$attrib} = $ea->metrics( + attrib => $attrib, + where => $item, + ); + } + + foreach my $attrib ( keys %metrics ) { + if ( ! grep { $_ } values %{$metrics{$attrib}} ) { + delete $metrics{$attrib}; + next; + } + + if ($attrib eq 'ts') { + my $ts = delete $metrics{ts}; + foreach my $thing ( qw(min max) ) { + next unless defined $ts && defined $ts->{$thing}; + $ts->{$thing} = parse_timestamp($ts->{$thing}); + } + $class{ts_min} = $ts->{min}; + $class{ts_max} = $ts->{max}; + } + elsif ( ($ea->{type_for}->{$attrib} || '') eq 'num' ) { + for my $value ( values %{$metrics{$attrib}} ) { + next unless $value; + $value = sprintf '%.6f', $value; + } + if ( my $pct = $metrics{$attrib}->{pct} ) { + $metrics{$attrib}->{pct} = sprintf('%.2f', $pct); + } + } + } + push @queries, { + class => \%class, + attributes => \%metrics, + }; + } + + my $json = $self->encode_json(\@queries); + $json .= "\n" if $json !~ /\n\Z/; + return $json . "\n"; +}; + +1; +} +# ########################################################################### +# End JSONReportFormatter package +# ########################################################################### + # ########################################################################### # EventTimeline package # This package is a copy without comments from the original. The original @@ -8588,7 +9160,7 @@ sub new { sub set_history_options { my ( $self, %args ) = @_; - foreach my $arg ( qw(table dbh tbl_struct col_pat) ) { + foreach my $arg ( qw(table tbl_struct col_pat) ) { die "I need a $arg argument" unless $args{$arg}; } @@ -8622,7 +9194,7 @@ sub set_history_options { } @cols) . ')'; PTDEBUG && _d($sql); - $self->{history_sth} = $args{dbh}->prepare($sql); + $self->{history_sth} = $self->{dbh}->prepare($sql); $self->{history_metrics} = \@metrics; return; @@ -10349,143 +10921,6 @@ sub _d { # End HTTPProtocolParser package # ########################################################################### -# ########################################################################### -# ExecutionThrottler package -# This package is a copy without comments from the original. The original -# with comments and its test file can be found in the Bazaar repository at, -# lib/ExecutionThrottler.pm -# t/lib/ExecutionThrottler.t -# See https://launchpad.net/percona-toolkit for more information. -# ########################################################################### -{ -package ExecutionThrottler; - -use strict; -use warnings FATAL => 'all'; -use English qw(-no_match_vars); -use constant PTDEBUG => $ENV{PTDEBUG} || 0; - -use List::Util qw(sum min max); -use Time::HiRes qw(time); -use Data::Dumper; -$Data::Dumper::Indent = 1; -$Data::Dumper::Sortkeys = 1; -$Data::Dumper::Quotekeys = 0; - -sub new { - my ( $class, %args ) = @_; - my @required_args = qw(rate_max get_rate check_int step); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my $self = { - step => 0.05, # default - %args, - rate_ok => undef, - last_check => undef, - stats => { - rate_avg => 0, - rate_samples => [], - }, - int_rates => [], - skip_prob => 0.0, - }; - - return bless $self, $class; -} - -sub throttle { - my ( $self, %args ) = @_; - my $time = $args{misc}->{time} || time; - if ( $self->_time_to_check($time) ) { - my $rate_avg = (sum(@{$self->{int_rates}}) || 0) - / (scalar @{$self->{int_rates}} || 1); - my $running_avg = $self->_save_rate_avg($rate_avg); - PTDEBUG && _d('Average rate for last interval:', $rate_avg); - - if ( $args{stats} ) { - $args{stats}->{throttle_checked_rate}++; - $args{stats}->{throttle_rate_avg} = sprintf '%.2f', $running_avg; - } - - @{$self->{int_rates}} = (); - - if ( $rate_avg > $self->{rate_max} ) { - $self->{skip_prob} += $self->{step}; - $self->{skip_prob} = 1.0 if $self->{skip_prob} > 1.0; - PTDEBUG && _d('Rate max exceeded'); - $args{stats}->{throttle_rate_max_exceeded}++ if $args{stats}; - } - else { - $self->{skip_prob} -= $self->{step}; - $self->{skip_prob} = 0.0 if $self->{skip_prob} < 0.0; - $args{stats}->{throttle_rate_ok}++ if $args{stats}; - } - - PTDEBUG && _d('Skip probability:', $self->{skip_prob}); - $self->{last_check} = $time; - } - else { - my $current_rate = $self->{get_rate}->(); - push @{$self->{int_rates}}, $current_rate; - if ( $args{stats} ) { - $args{stats}->{throttle_rate_min} = min( - ($args{stats}->{throttle_rate_min} || ()), $current_rate); - $args{stats}->{throttle_rate_max} = max( - ($args{stats}->{throttle_rate_max} || ()), $current_rate); - } - PTDEBUG && _d('Current rate:', $current_rate); - } - - if ( $args{event} ) { - $args{event}->{Skip_exec} = $self->{skip_prob} <= rand() ? 'No' : 'Yes'; - } - - return $args{event}; -} - -sub _time_to_check { - my ( $self, $time ) = @_; - if ( !$self->{last_check} ) { - $self->{last_check} = $time; - return 0; - } - return $time - $self->{last_check} >= $self->{check_int} ? 1 : 0; -} - -sub rate_avg { - my ( $self ) = @_; - return $self->{stats}->{rate_avg} || 0; -} - -sub skip_probability { - my ( $self ) = @_; - return $self->{skip_prob}; -} - -sub _save_rate_avg { - my ( $self, $rate ) = @_; - my $samples = $self->{stats}->{rate_samples}; - push @$samples, $rate; - shift @$samples if @$samples > 1_000; - $self->{stats}->{rate_avg} = sum(@$samples) / (scalar @$samples); - return $self->{stats}->{rate_avg} || 0; -} - -sub _d { - my ($package, undef, $line) = caller 0; - @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } - map { defined $_ ? $_ : 'undef' } - @_; - print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; -} - -1; -} -# ########################################################################### -# End ExecutionThrottler package -# ########################################################################### - # ########################################################################### # MasterSlave package # This package is a copy without comments from the original. The original @@ -11447,226 +11882,6 @@ sub _d { # End FileIterator package # ########################################################################### -# ########################################################################### -# ExplainAnalyzer package -# This package is a copy without comments from the original. The original -# with comments and its test file can be found in the Bazaar repository at, -# lib/ExplainAnalyzer.pm -# t/lib/ExplainAnalyzer.t -# See https://launchpad.net/percona-toolkit for more information. -# ########################################################################### -{ -package ExplainAnalyzer; - -use strict; -use warnings FATAL => 'all'; -use English qw(-no_match_vars); -use constant PTDEBUG => $ENV{PTDEBUG} || 0; - -use Data::Dumper; -$Data::Dumper::Indent = 1; -$Data::Dumper::Sortkeys = 1; -$Data::Dumper::Quotekeys = 0; - -sub new { - my ( $class, %args ) = @_; - foreach my $arg ( qw(QueryRewriter QueryParser) ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my $self = { - %args, - }; - return bless $self, $class; -} - -sub explain_query { - my ( $self, %args ) = @_; - foreach my $arg ( qw(dbh query) ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my ($query, $dbh) = @args{qw(query dbh)}; - $query = $self->{QueryRewriter}->convert_to_select($query); - if ( $query !~ m/^\s*select/i ) { - PTDEBUG && _d("Cannot EXPLAIN non-SELECT query:", - (length $query <= 100 ? $query : substr($query, 0, 100) . "...")); - return; - } - my $sql = "EXPLAIN $query"; - PTDEBUG && _d($dbh, $sql); - my $explain = $dbh->selectall_arrayref($sql, { Slice => {} }); - PTDEBUG && _d("Result of EXPLAIN:", Dumper($explain)); - return $explain; -} - -sub normalize { - my ( $self, $explain ) = @_; - my @result; # Don't modify the input. - - foreach my $row ( @$explain ) { - $row = { %$row }; # Make a copy -- don't modify the input. - - foreach my $col ( qw(key possible_keys key_len ref) ) { - $row->{$col} = [ split(/,/, $row->{$col} || '') ]; - } - - $row->{Extra} = { - map { - my $var = $_; - - if ( my ($key, $vals) = $var =~ m/(Using union)\(([^)]+)\)/ ) { - $key => [ split(/,/, $vals) ]; - } - - else { - $var => 1; - } - } - split(/; /, $row->{Extra} || '') # Split on semicolons. - }; - - push @result, $row; - } - - return \@result; -} - -sub get_alternate_indexes { - my ( $self, $keys, $possible_keys ) = @_; - my %used = map { $_ => 1 } @$keys; - return [ grep { !$used{$_} } @$possible_keys ]; -} - -sub get_index_usage { - my ( $self, %args ) = @_; - foreach my $arg ( qw(query explain) ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my ($query, $explain) = @args{qw(query explain)}; - my @result; - - my $lookup = $self->{QueryParser}->get_aliases($query); - - foreach my $row ( @$explain ) { - - next if !defined $row->{table} - || $row->{table} =~ m/^<(derived|union)\d/; - - my $table = $lookup->{TABLE}->{$row->{table}} || $row->{table}; - my $db = $lookup->{DATABASE}->{$table} || $args{db}; - push @result, { - db => $db, - tbl => $table, - idx => $row->{key}, - alt => $self->get_alternate_indexes( - $row->{key}, $row->{possible_keys}), - }; - } - - PTDEBUG && _d("Index usage for", - (length $query <= 100 ? $query : substr($query, 0, 100) . "..."), - ":", Dumper(\@result)); - return \@result; -} - -sub get_usage_for { - my ( $self, $checksum, $db ) = @_; - die "I need a checksum and db" unless defined $checksum && defined $db; - my $usage; - if ( exists $self->{usage}->{$db} # Don't auto-vivify - && exists $self->{usage}->{$db}->{$checksum} ) - { - $usage = $self->{usage}->{$db}->{$checksum}; - } - PTDEBUG && _d("Usage for", - (length $checksum <= 100 ? $checksum : substr($checksum, 0, 100) . "..."), - "on", $db, ":", Dumper($usage)); - return $usage; -} - -sub save_usage_for { - my ( $self, $checksum, $db, $usage ) = @_; - die "I need a checksum and db" unless defined $checksum && defined $db; - $self->{usage}->{$db}->{$checksum} = $usage; -} - -sub fingerprint { - my ( $self, %args ) = @_; - my @required_args = qw(explain); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my ($explain) = @args{@required_args}; -} - -sub sparkline { - my ( $self, %args ) = @_; - my @required_args = qw(explain); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my ($explain) = @args{@required_args}; - PTDEBUG && _d("Making sparkline for", Dumper($explain)); - - my $access_code = { - 'ALL' => 'a', - 'const' => 'c', - 'eq_ref' => 'e', - 'fulltext' => 'f', - 'index' => 'i', - 'index_merge' => 'm', - 'range' => 'n', - 'ref_or_null' => 'o', - 'ref' => 'r', - 'system' => 's', - 'unique_subquery' => 'u', - }; - - my $sparkline = ''; - my ($T, $F); # Using temporary, Using filesort - - foreach my $tbl ( @$explain ) { - my $code; - if ( defined $tbl->{type} ) { - $code = $access_code->{$tbl->{type}} || "?"; - $code = uc $code if $tbl->{Extra}->{'Using index'}; - } - else { - $code = '-' - }; - $sparkline .= $code; - - $T = 1 if $tbl->{Extra}->{'Using temporary'}; - $F = 1 if $tbl->{Extra}->{'Using filesort'}; - } - - if ( $T || $F ) { - if ( $explain->[-1]->{Extra}->{'Using temporary'} - || $explain->[-1]->{Extra}->{'Using filesort'} ) { - $sparkline .= ">" . ($T ? "T" : "") . ($F ? "F" : ""); - } - else { - $sparkline = ($T ? "T" : "") . ($F ? "F" : "") . ">$sparkline"; - } - } - - PTDEBUG && _d("sparkline:", $sparkline); - return $sparkline; -} - -sub _d { - my ($package, undef, $line) = caller 0; - @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } - map { defined $_ ? $_ : 'undef' } - @_; - print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; -} - -1; -} -# ########################################################################### -# End ExplainAnalyzer package -# ########################################################################### - # ########################################################################### # Runtime package # This package is a copy without comments from the original. The original @@ -11825,7 +12040,7 @@ sub new { } my $self = { - instrument => 0, + instrument => PTDEBUG, continue_on_error => 0, %args, @@ -11852,9 +12067,7 @@ sub add { push @{$self->{procs}}, $process; push @{$self->{names}}, $name; - if ( my $n = $args{retry_on_error} ) { - $self->{retries}->{$name} = $n; - } + $self->{retries}->{$name} = $args{retry_on_error} || 100; if ( $self->{instrument} ) { $self->{instrumentation}->{$name} = { time => 0, calls => 0 }; } @@ -11923,7 +12136,11 @@ sub execute { my $msg = "Pipeline process " . ($procno + 1) . " ($name) caused an error: " . $EVAL_ERROR; - if ( defined $self->{retries}->{$name} ) { + if ( !$self->{continue_on_error} ) { + die $msg . "Terminating pipeline because --continue-on-error " + . "is false.\n"; + } + elsif ( defined $self->{retries}->{$name} ) { my $n = $self->{retries}->{$name}; if ( $n ) { warn $msg . "Will retry pipeline process $procno ($name) " @@ -11935,9 +12152,6 @@ sub execute { . "($name) caused too many errors.\n"; } } - elsif ( !$self->{continue_on_error} ) { - die $msg; - } else { warn $msg; } @@ -13296,33 +13510,57 @@ sub _d { # ########################################################################### package pt_query_digest; +use strict; +use warnings FATAL => 'all'; use English qw(-no_match_vars); -use Time::Local qw(timelocal); -use Time::HiRes qw(time usleep); -use List::Util qw(max); -use POSIX qw(signal_h); -use Data::Dumper; -$Data::Dumper::Indent = 1; -$OUTPUT_AUTOFLUSH = 1; +use constant PTDEBUG => $ENV{PTDEBUG} || 0; -Transformers->import(qw(shorten micro_t percentage_of ts make_checksum - any_unix_timestamp parse_timestamp unix_timestamp crc32)); +use Time::Local qw(timelocal); +use Time::HiRes qw(time usleep); +use List::Util qw(max); +use Scalar::Util qw(looks_like_number); +use POSIX qw(signal_h); +use Data::Dumper; use Percona::Toolkit; -use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +$OUTPUT_AUTOFLUSH = 1; + +Transformers->import(qw( + shorten + micro_t + percentage_of + ts + make_checksum + any_unix_timestamp + parse_timestamp + unix_timestamp + crc32 +)); use sigtrap 'handler', \&sig_int, 'normal-signals'; # Global variables. Only really essential variables should be here. my $oktorun = 1; -my $ex_dbh; # For --execute my $ep_dbh; # For --explain my $ps_dbh; # For Processlist my $aux_dbh; # For --aux-dsn (--since/--until "MySQL expression") +my $resume_file; +my $offset; + +(my $tool = __PACKAGE__) =~ tr/_/-/; + sub main { - local @ARGV = @_; # set global ARGV for this package - $oktorun = 1; # reset between tests else pipeline won't run + # Reset global vars, else tests will fail. + local @ARGV = @_; + $oktorun = 1; + $resume_file = undef; + $offset = undef; # ########################################################################## # Get configuration information. @@ -13347,31 +13585,11 @@ sub main { } if ( !$o->get('help') ) { - if ( $review_dsn - && (!defined $review_dsn->{D} || !defined $review_dsn->{t}) ) { - $o->save_error('The --review DSN requires a D (database) and t' - . ' (table) part specifying the query review table'); - } - if ( $o->get('mirror') - && (!$o->get('execute') || !$o->get('processlist')) ) { - $o->save_error('--mirror requires --execute and --processlist'); - } if ( $o->get('outliers') && grep { $_ !~ m/^\w+:[0-9.]+(?::[0-9.]+)?$/ } @{$o->get('outliers')} ) { $o->save_error('--outliers requires two or three colon-separated fields'); } - if ( $o->get('execute-throttle') ) { - my ($rate_max, $int, $step) = @{$o->get('execute-throttle')}; - $o->save_error("--execute-throttle max time must be between 1 and 100") - unless $rate_max && $rate_max > 0 && $rate_max <= 100; - $o->save_error("No check interval value for --execute-throttle") - unless $int; - $o->save_error("--execute-throttle check interval must be an integer") - if $int =~ m/[^\d]/; - $o->save_error("--execute-throttle step must be between 1 and 100") - if $step && ($step < 1 || $step > 100); - } if ( $o->get('progress') ) { eval { Progress->validate_spec($o->get('progress')) }; if ( $EVAL_ERROR ) { @@ -13380,9 +13598,20 @@ sub main { } } - if ( $o->get('apdex-threshold') <= 0 ) { - $o->save_error("Apdex threshold must be a positive decimal value"); + if ( my $review_dsn = $o->get('review') ) { + $o->save_error('--review does not accept a t option. Perhaps you meant ' + . 'to use --review-table or --history-table?') + if defined $review_dsn->{t}; } + + for my $tables ('review-table', 'history-table') { + my $got = $o->get($tables); + if ( grep !defined, Quoter->split_unquote($got) ) { + $o->save_error("--$tables should be passed a " + . "fully-qualified table name, got $got"); + } + } + if ( my $patterns = $o->get('embedded-attributes') ) { $o->save_error("--embedded-attributes should be passed two " . "comma-separated patterns, got " . scalar(@$patterns) ) @@ -13440,7 +13669,6 @@ sub main { # ######################################################################## # Set up for --explain # ######################################################################## - my $exa; if ( my $ep_dsn = $o->get('explain') ) { $ep_dbh = get_cxn( for => '--explain', @@ -13450,19 +13678,13 @@ sub main { opts => { AutoCommit => 1 }, ); $ep_dbh->{InactiveDestroy} = 1; # Don't die on fork(). - - $exa = new ExplainAnalyzer( - QueryRewriter => $qr, - QueryParser => $qp, - ); } # ######################################################################## - # Set up for --review and --review-history. + # Set up for --review. # ######################################################################## my $qv; # QueryReview my $qv_dbh; # For QueryReview - my $qv_dbh2; # For QueryReview and --review-history if ( $review_dsn ) { my $tp = new TableParser(Quoter => $q); $qv_dbh = get_cxn( @@ -13473,28 +13695,33 @@ sub main { opts => { AutoCommit => 1 }, ); $qv_dbh->{InactiveDestroy} = 1; # Don't die on fork(). - my @db_tbl = @{$review_dsn}{qw(D t)}; - my $db_tbl = $q->quote(@db_tbl); - # Create the review table if desired - if ( $o->get('create-review-table') ) { - my $sql = $o->read_para_after( - __FILE__, qr/MAGIC_create_review/); - $sql =~ s/query_review/IF NOT EXISTS $db_tbl/; - PTDEBUG && _d($sql); - $qv_dbh->do($sql); - } + my @db_tbl = Quoter->split_unquote($o->get('review-table')); + my @hdb_tbl = Quoter->split_unquote($o->get('history-table')); - # Check for the existence of the table. - if ( !$tp->check_table( - dbh => $qv_dbh, - db => $db_tbl[0], - tbl => $db_tbl[1]) ) - { - die "The query review table $db_tbl does not exist. " - . "Specify --create-review-table to create it, " - . "and ensure that the MySQL user has privileges to create " - . "and update the table.\n"; + my $db_tbl = $q->quote(@db_tbl); + my $hdb_tbl = $q->quote(@hdb_tbl); + + my $create_review_sql = $o->read_para_after( + __FILE__, qr/MAGIC_create_review/); + $create_review_sql =~ s/query_review/IF NOT EXISTS $db_tbl/; + + my $create_history_sql = $o->read_para_after( + __FILE__, qr/MAGIC_create_review_history/); + $create_history_sql =~ s/query_review_history/IF NOT EXISTS $hdb_tbl/; + + for my $create ( + [ $db_tbl, $create_review_sql ], + [ $hdb_tbl, $create_history_sql ], + ) { + my ($tbl_name, $sql) = @$create; + create_review_tables( + dbh => $qv_dbh, + full_table => $tbl_name, + create_table_sql => $sql, + create_table => $o->get('create-review-tables'), + TableParser => $tp, + ); } # Set up the new QueryReview object. @@ -13506,79 +13733,43 @@ sub main { quoter => $q, ); - # Set up the review-history table - if ( my $review_history_dsn = $o->get('review-history') ) { - $qv_dbh2 = get_cxn( - for => '--review-history', - dsn => $review_history_dsn, - OptionParser => $o, - DSNParser => $dp, - opts => { AutoCommit => 1 }, - ); - $qv_dbh2->{InactiveDestroy} = 1; # Don't die on fork(). - my @hdb_tbl = @{$o->get('review-history')}{qw(D t)}; - my $hdb_tbl = $q->quote(@hdb_tbl); - - # Create the review-history table if desired - if ( $o->get('create-review-history-table') ) { - my $sql = $o->read_para_after( - __FILE__, qr/MAGIC_create_review_history/); - $sql =~ s/query_review_history/IF NOT EXISTS $hdb_tbl/; - PTDEBUG && _d($sql); - $qv_dbh2->do($sql); - } - - # Check for the existence of the table. - if ( !$tp->check_table( - dbh => $qv_dbh2, - db => $hdb_tbl[0], - tbl => $hdb_tbl[1]) ) - { - die "The query review history table $hdb_tbl does not exist. " - . "Specify --create-review-history-table to create it, " - . "and ensure that the MySQL user has privileges to create " - . "and update the table.\n"; - } - - # Inspect for MAGIC_history_cols. Add them to the --select list - # only if an explicit --select list was given. Otherwise, leave - # --select undef which will cause EventAggregator to aggregate every - # attribute available which will include the history columns. - # If no --select list was given and we make one by adding the history - # columsn to it, then EventAggregator will only aggregate the - # history columns and nothing else--we don't want this. - my $tbl = $tp->parse($tp->get_create_table($qv_dbh2, @hdb_tbl)); - my $pat = $o->read_para_after(__FILE__, qr/MAGIC_history_cols/); - $pat =~ s/\s+//g; - $pat = qr/^(.*?)_($pat)$/; - # Get original --select values. - my %select = map { $_ => 1 } @{$o->get('select')}; - foreach my $col ( @{$tbl->{cols}} ) { - my ( $attr, $metric ) = $col =~ m/$pat/; - next unless $attr && $metric; - $attr = ucfirst $attr if $attr =~ m/_/; # TableParser lowercases - # Add history table values to original select values. - $select{$attr}++; - } - - if ( $o->got('select') ) { - # Re-set --select with its original values plus the history - # table values. - $o->set('select', [keys %select]); - PTDEBUG && _d("--select after parsing --review-history table:", - @{$o->get('select')}); - } - - # And tell the QueryReview that it has more work to do. - $qv->set_history_options( - table => $hdb_tbl, - dbh => $qv_dbh2, - tbl_struct => $tbl, - col_pat => $pat, - ); + # Inspect for MAGIC_history_cols. Add them to the --select list + # only if an explicit --select list was given. Otherwise, leave + # --select undef which will cause EventAggregator to aggregate every + # attribute available which will include the history columns. + # If no --select list was given and we make one by adding the history + # columsn to it, then EventAggregator will only aggregate the + # history columns and nothing else--we don't want this. + my $tbl = $tp->parse($tp->get_create_table($qv_dbh, @hdb_tbl)); + my $pat = $o->read_para_after(__FILE__, qr/MAGIC_history_cols/); + $pat =~ s/\s+//g; + $pat = qr/^(.*?)_($pat)$/; + # Get original --select values. + my %select = map { $_ => 1 } @{$o->get('select')}; + foreach my $col ( @{$tbl->{cols}} ) { + my ( $attr, $metric ) = $col =~ $pat; + next unless $attr && $metric; + $attr = ucfirst $attr if $attr =~ m/_/; # TableParser lowercases + # Add history table values to original select values. + $select{$attr}++; } + + if ( $o->got('select') ) { + # Re-set --select with its original values plus the history + # table values. + $o->set('select', [sort keys %select]); + PTDEBUG && _d("--select after parsing the history table:", + @{$o->get('select')}); + } + + # And tell the QueryReview that it has more work to do. + $qv->set_history_options( + table => $hdb_tbl, + tbl_struct => $tbl, + col_pat => $pat, + ); } - + # ######################################################################## # Create all the pipeline processes that do all the work: get input, # parse events, manage runtime, switch iterations, aggregate, etc. @@ -13599,13 +13790,7 @@ sub main { stats => \%stats, }; - # Enable timings to instrument code for either of these two opts. - # Else, don't instrument to avoid cost of measurement. - my $instrument = $o->get('pipeline-profile') || $o->get('execute-throttle'); - PTDEBUG && _d('Instrument:', $instrument); - my $pipeline = new Pipeline( - instrument => $instrument, continue_on_error => $o->get('continue-on-error'), ); @@ -13633,7 +13818,7 @@ sub main { } # prep { # input - my $fi = new FileIterator(); + my $fi = FileIterator->new(); my $next_file = $fi->get_file_itr(@ARGV); my $input_fh; # the current input fh my $pr; # Progress obj for ^ @@ -13642,20 +13827,51 @@ sub main { name => 'input', process => sub { my ( $args ) = @_; + # Only get the next file when there's no fh or no more events in # the current fh. This allows us to do collect-and-report cycles # (i.e. iterations) on huge files. This doesn't apply to infinite # inputs because they don't set more_events false. if ( !$args->{input_fh} || !$args->{more_events} ) { + + # Close the current file. if ( $args->{input_fh} ) { close $args->{input_fh} or die "Cannot close input fh: $OS_ERROR"; } + + # Open the next file. my ($fh, $filename, $filesize) = $next_file->(); if ( $fh ) { PTDEBUG && _d('Reading', $filename); + PTDEBUG && _d('File size:', $filesize); push @read_files, $filename || "STDIN"; + # Read the file offset for --resume. + if ( ($resume_file = $o->get('resume')) && $filename ) { + if ( -s $resume_file ) { + open my $resume_fh, '<', $resume_file + or die "Error opening $resume_file: $OS_ERROR"; + chomp(my $resume_offset = <$resume_fh>); + close $resume_fh + or die "Error close $resume_file: $OS_ERROR"; + if ( !looks_like_number($resume_offset) ) { + die "Offset $resume_offset in $resume_file " + . "does not look like a number.\n"; + } + PTDEBUG && _d('Resuming at offset', $resume_offset); + seek $fh, $resume_offset, 0 + or die "Error seeking to $resume_offset in " + . "$resume_file: $OS_ERROR"; + warn "Resuming $filename from offset $resume_offset " + . "(file size: $filesize)...\n"; + } + else { + PTDEBUG && _d('Not resuming', $filename, 'because', + $resume_file, 'does not exist'); + } + } + # Create callback to read next event. Some inputs, like # Processlist, may use something else but most next_event. if ( my $read_time = $o->get('read-timeout') ) { @@ -13665,8 +13881,15 @@ sub main { else { $args->{next_event} = sub { return <$fh>; }; } + $args->{filename} = $filename; $args->{input_fh} = $fh; - $args->{tell} = sub { return tell $fh; }; + $args->{tell} = sub { + $offset = tell $fh; # update global $offset + if ( $args->{filename} ) { + $args->{pos_for}->{$args->{filename}} = $offset; + } + return $offset; # legacy: return global $offset + }; $args->{more_events} = 1; # Reset in case we read two logs out of order by time. @@ -13725,14 +13948,12 @@ sub main { $err = $EVAL_ERROR; if ( $err ) { # Try to reconnect when there's an error. eval { - ($cur_server, $ps_dbh) = find_role( - OptionParser => $o, - DSNParser => $dp, - dbh => $ps_dbh, - current => $cur_server, - read_only => 0, - comment => 'for --processlist' - ); + if ( !$ps_dbh || !$ps_dbh->ping ) { + PTDEBUG && _d('Getting a dbh from', $cur_server); + $ps_dbh = $dp->get_dbh( + $dp->get_cxn_params($o->get($cur_server)), {AutoCommit => 1}); + $ps_dbh->{InactiveDestroy} = 1; # Don't die on fork(). + } $cur_time = time(); $sth = $ps_dbh->prepare('SHOW FULL PROCESSLIST'); $cxn = $ps_dbh->{mysql_thread_id}; @@ -13745,18 +13966,6 @@ sub main { } } } until ( $sth && !$err ); - if ( $o->get('mirror') - && time() - $cur_time > $o->get('mirror')) { - ($cur_server, $ps_dbh) = find_role( - OptionParser => $o, - DSNParser => $dp, - dbh => $ps_dbh, - current => $cur_server, - read_only => 0, - comment => 'for --processlist' - ); - $cur_time = time(); - } return [ grep { $_->[0] != $cxn } @{ $sth->fetchall_arrayref(); } ]; }; @@ -13949,7 +14158,7 @@ sub main { ); $aux_dbh->{InactiveDestroy} = 1; # Don't die on fork(). } - $aux_dbh ||= $qv_dbh || $qv_dbh2 || $ex_dbh || $ps_dbh || $ep_dbh; + $aux_dbh ||= $qv_dbh || $ps_dbh || $ep_dbh; PTDEBUG && _d('aux dbh:', $aux_dbh); my $time_callback = sub { @@ -14081,7 +14290,6 @@ sub main { files => \@read_files, Pipeline => $pipeline, QueryReview => $qv, - ExplainAnalyzer => $exa, %common_modules, ); } @@ -14089,7 +14297,38 @@ sub main { print "\n# No events processed.\n"; } - if ( $o->get('statistics') ) { + if ( PTDEBUG ) { + # Print statistics about internal counters. This option is mostly for + # development and debugging. The statistics report is printed for each + # iteration after all other reports, even if no events are processed or + # C<--no-report> is specified. The statistics report looks like: + + # No events processed. + + # Statistic Count %/Events + # ================================================ ====== ======== + # events_read 142030 100.00 + # events_parsed 50430 35.51 + # events_aggregated 0 0.00 + # ignored_midstream_server_response 18111 12.75 + # no_tcp_data 91600 64.49 + # pipeline_restarted_after_MemcachedProtocolParser 142030 100.00 + # pipeline_restarted_after_TcpdumpParser 1 0.00 + # unknown_client_command 1 0.00 + # unknown_client_data 32318 22.75 + + # The first column is the internal counter name; the second column is counter's + # count; and the third column is the count as a percentage of C. + + # In this case, it shows why no events were processed/aggregated: 100% of events + # were rejected by the C. Of those, 35.51% were data + # packets, but of these 12.75% of ignored mid-stream server response, one was + # an unknown client command, and 22.75% were unknown client data. The other + # 64.49% were TCP control packets (probably most ACKs). + + # Since pt-query-digest is complex, you will probably need someone familiar + # with its code to decipher the statistics report. + if ( keys %stats ) { my $report = new ReportFormatter( line_width => 74, @@ -14395,139 +14634,6 @@ sub main { } } # sample - my $ex_dsn; - { # execute throttle and execute - my $et; - if ( my $et_args = $o->get('execute-throttle') ) { - # These were check earlier; no need to check them again. - my ($rate_max, $int, $step) = @{$o->get('execute-throttle')}; - $step ||= 5; - $step /= 100; # step specified as percent but $et expect 0.1=10%, etc. - PTDEBUG && _d('Execute throttle:', $rate_max, $int, $step); - - my $get_rate = sub { - my $instrument = $pipeline->instrumentation; - return percentage_of( - $instrument->{execute}->{time} || 0, - $instrument->{Pipeline}->{time} || 0, - ); - }; - - $et = new ExecutionThrottler( - rate_max => $rate_max, - get_rate => $get_rate, - check_int => $int, - step => $step, - ); - - $pipeline->add( - name => 'execute throttle', - process => sub { - my ( $args ) = @_; - $args->{event} = $et->throttle( - event => $args->{event}, - stats => \%stats, - misc => $args->{misc}, - ); - return $args; - }, - ); - } # execute throttle - - if ( $ex_dsn = $o->get('execute') ) { - if ( $o->get('ask-pass') ) { - $ex_dsn->{p} = OptionParser::prompt_noecho("Enter password for " - . "--execute: "); - $o->set('execute', $ex_dsn); - } - - my $cur_server = 'execute'; - ($cur_server, $ex_dbh) = find_role( - OptionParser => $o, - DSNParser => $dp, - dbh => $ex_dbh, - current => $cur_server, - read_only => 1, - comment => 'for --execute' - ); - my $cur_time = time(); - my $curdb; - my $default_db = $o->get('execute')->{D}; - PTDEBUG && _d('Default db:', $default_db); - - $pipeline->add( - name => 'execute', - process => sub { - my ( $args ) = @_; - my $event = $args->{event}; - $event->{Exec_orig_time} = $event->{Query_time}; - if ( ($event->{Skip_exec} || '') eq 'Yes' ) { - PTDEBUG && _d('Not executing event because of ', - '--execute-throttle'); - # Zero Query_time to 'Exec time' will show the real time - # spent executing queries. - $event->{Query_time} = 0; - $stats{execute_skipped}++; - return $args; - } - $stats{execute_executed}++; - my $db = $event->{db} || $default_db; - eval { - if ( $db && (!$curdb || $db ne $curdb) ) { - $ex_dbh->do("USE $db"); - $curdb = $db; - } - my $start = time(); - $ex_dbh->do($event->{arg}); - my $end = time(); - $event->{Query_time} = $end - $start; - $event->{Exec_diff_time} - = $event->{Query_time} - $event->{Exec_orig_time}; - if ($o->get('mirror') && $end-$cur_time > $o->get('mirror')) { - ($cur_server, $ex_dbh) = find_role( - OptionParser => $o, - DSNParser => $dp, - dbh => $ex_dbh, - current => $cur_server, - read_only => 1, - comment => 'for --execute' - ); - $cur_time = $end; - } - }; - if ( $EVAL_ERROR ) { - PTDEBUG && _d($EVAL_ERROR); - $stats{execute_error}++; - # Don't try to re-execute the statement. Just skip it. - if ( $EVAL_ERROR =~ m/server has gone away/ ) { - print STDERR $EVAL_ERROR; - eval { - ($cur_server, $ex_dbh) = find_role( - OptionParser => $o, - DSNParser => $dp, - dbh => $ex_dbh, - current => $cur_server, - read_only => 1, - comment => 'for --execute' - ); - $cur_time = time(); - }; - if ( $EVAL_ERROR ) { - print STDERR $EVAL_ERROR; - sleep 1; - } - return; - } - if ( $EVAL_ERROR =~ m/No database/ ) { - $stats{execute_no_database}++; - } - } - return $args; - }, - ); - } # execute - } # execute throttle and execute - if ( $o->get('print') ) { my $w = new SlowLogWriter(); $pipeline->add( @@ -14636,7 +14742,6 @@ sub main { instances => [ ($qv_dbh ? { dbh => $qv_dbh, dsn => $review_dsn } : ()), ($ps_dbh ? { dbh => $ps_dbh, dsn => $ps_dsn } : ()), - ($ex_dbh ? { dbh => $ex_dbh, dsn => $ex_dsn } : ()) ], protocol => $o->get('version-check'), ); @@ -14661,13 +14766,15 @@ sub main { } PTDEBUG && _d("Pipeline data:", Dumper($pipeline_data)); + save_resume_offset(); + # Disconnect all open $dbh's map { $dp->disconnect($_); PTDEBUG && _d('Disconnected dbh', $_); } grep { $_ } - ($qv_dbh, $qv_dbh2, $ex_dbh, $ps_dbh, $ep_dbh, $aux_dbh); + ($qv_dbh, $ps_dbh, $ep_dbh, $aux_dbh); return 0; } # End main() @@ -14676,6 +14783,77 @@ sub main { # Subroutines. # ############################################################################ +sub create_review_tables { + my ( %args ) = @_; + my @required_args = qw(dbh full_table TableParser); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $create_table_sql = $args{create_table_sql}; + my ($dbh, $full_table, $tp) = @args{@required_args}; + + PTDEBUG && _d('Checking --review table', $full_table); + + # If the repl db doesn't exit, auto-create it, maybe. + my ($db, $tbl) = Quoter->split_unquote($full_table); + my $show_db_sql = qq{SHOW DATABASES LIKE '$db'}; + PTDEBUG && _d($show_db_sql); + my @db_exists = $dbh->selectrow_array($show_db_sql); + if ( !@db_exists && !$args{create_table} ) { + die "--review database $db does not exist and " + . "--no-create-review-tables was specified. You need " + . "to create the database.\n"; + } + else { + # Even if the db already exists, do this in case it does not exist + # on a slave. + my $create_db_sql + = "CREATE DATABASE IF NOT EXISTS " + . Quoter->quote($db) + . " /* $tool */"; + PTDEBUG && _d($create_db_sql); + eval { + $dbh->do($create_db_sql); + }; + if ( $EVAL_ERROR && !@db_exists ) { + warn $EVAL_ERROR; + die "--review database $db does not exist and it cannot be " + . "created automatically. You need to create the database.\n"; + } + } + + # USE the correct db + my $sql = "USE " . Quoter->quote($db); + PTDEBUG && _d($sql); + $dbh->do($sql); + + # Check if the table exists; if not, create it, maybe. + my $tbl_exists = $tp->check_table( + dbh => $dbh, + db => $db, + tbl => $tbl, + ); + + PTDEBUG && _d('Table exists: ', $tbl_exists ? 'yes' : 'no'); + + if ( !$tbl_exists && !$args{create_table} ) { + die "Table $full_table does not exist and " + . "--no-create-review-tables was specified. " + . "You need to create the table.\n"; + } + else { + PTDEBUG && _d($dbh, $create_table_sql); + eval { + $dbh->do($create_table_sql); + }; + if ( $EVAL_ERROR && !$args{create_table} ) { + warn $EVAL_ERROR; + die "--review history table $full_table does not exist and it cannot be " + . "created automatically. You need to create the table.\n" + } + } +} + # TODO: This sub is poorly named since it does more than print reports: # it aggregates, reports, does QueryReview stuff, etc. sub print_reports { @@ -14694,9 +14872,7 @@ sub print_reports { for my $i ( 0..$#groupby ) { if ( $o->get('report') || $qv ) { - $eas->[$i]->calculate_statistical_metrics( - apdex_t => $o->get('apdex-threshold'), - ); + $eas->[$i]->calculate_statistical_metrics(); } my ($orderby_attrib, $orderby_func) = split(/:/, $orderby[$i]); @@ -14734,19 +14910,18 @@ sub print_reports { $print_header = 1; } - my $qrf = new QueryReportFormatter( - dbh => $ep_dbh, - %args, - ); - # http://code.google.com/p/maatkit/issues/detail?id=1141 - $qrf->set_report_formatter( - report => 'profile', - formatter => new ReportFormatter ( - line_width => $o->get('explain') ? 82 : 74, - long_last_column => 1, - extend_right => 1, - ), + my $report_class = $o->get('output') =~ m/\Ajson\z/i + ? 'JSONReportFormatter' + : 'QueryReportFormatter'; + my $qrf = $report_class->new( + dbh => $ep_dbh, + QueryReview => $args{QueryReview}, + QueryRewriter => $args{QueryRewriter}, + OptionParser => $args{OptionParser}, + QueryParser => $args{QueryParser}, + Quoter => $args{Quoter}, ); + $qrf->print_reports( reports => \@reports, ea => $eas->[$i], @@ -14777,14 +14952,6 @@ sub print_reports { $tls->[$i]->reset_aggregated_data(); } - if ( $o->get('table-access') ) { # --table-access - print_table_access_report( - ea => $eas->[$i], - worst => $worst, - %args, - ); - } - $eas->[$i]->reset_aggregated_data(); # Reset for next iteration. # Print header report only once. So remove it from the @@ -14795,7 +14962,7 @@ sub print_reports { } # Each groupby - if ( $o->get('pipeline-profile') ) { + if ( PTDEBUG ) { my $report = new ReportFormatter( line_width => 74, ); @@ -14804,7 +14971,7 @@ sub print_reports { { name => 'Time', right_justify => 1 }, { name => 'Count', right_justify => 1 }, ); - $report->set_title('Pipeline profile'); + $report->title('Pipeline profile'); my $instrument = $pipeline->instrumentation; my $total_time = $instrument->{Pipeline}; foreach my $process_name ( $pipeline->processes() ) { @@ -14816,51 +14983,12 @@ sub print_reports { # Reset profile for next iteration. $pipeline->reset(); - print "\n" . $report->get_report(); + _d($report->get_report()); } return; } -# Pass in the currently open $dbh (if any), where $current points to ('execute' -# or 'processlist') and whether you want to be connected to the read_only -# server. Get back which server you're looking at, and the $dbh. Assumes that -# one of the servers is ALWAYS read only and the other is ALWAYS not! If -# there's some transition period where this isn't true, maybe both will end up -# pointing to the same place, but that should resolve shortly. -# The magic switching functionality only works if --mirror is given! Otherwise -# it just returns the correct $dbh. $comment is some descriptive text for -# debuggin, like 'for --execute'. -sub find_role { - my ( %args ) = @_; - my $o = $args{OptionParser}; - my $dp = $args{DSNParser}; - my $dbh = $args{dbh}; - my $current = $args{current}; - my $read_only = $args{read_only}; - my $comment = $args{comment}; - - if ( !$dbh || !$dbh->ping ) { - PTDEBUG && _d('Getting a dbh from', $current, $comment); - $dbh = $dp->get_dbh( - $dp->get_cxn_params($o->get($current)), {AutoCommit => 1}); - $dbh->{InactiveDestroy} = 1; # Don't die on fork(). - } - if ( $o->get('mirror') ) { - my ( $is_read_only ) = $dbh->selectrow_array('SELECT @@global.read_only'); - PTDEBUG && _d("read_only on", $current, $comment, ':', - $is_read_only, '(want', $read_only, ')'); - if ( $is_read_only != $read_only ) { - $current = $current eq 'execute' ? 'processlist' : 'execute'; - PTDEBUG && _d("read_only wrong", $comment, "getting a dbh from", $current); - $dbh = $dp->get_dbh( - $dp->get_cxn_params($o->get($current)), {AutoCommit => 1}); - $dbh->{InactiveDestroy} = 1; # Don't die on fork(). - } - } - return ($current, $dbh); -} - # Catches signals so we can exit gracefully. sub sig_int { my ( $signal ) = @_; @@ -14870,6 +14998,7 @@ sub sig_int { } else { print STDERR "# Exiting on SIG$signal.\n"; + save_resume_offset(); exit(1); } } @@ -15009,54 +15138,6 @@ sub get_worst_queries { return $ea->top_events(%top_spec); } -sub print_table_access_report { - my ( %args ) = @_; - my @required_args = qw(ea worst QueryParser QueryRewriter OptionParser Quoter); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless $args{$arg}; - } - my ($ea, $worst, $qp, $qr, $o, $q) = @args{@required_args}; - - my %seen; - PTDEBUG && _d('Doing table access report'); - - foreach my $worst_info ( @$worst ) { - my $item = $worst_info->[0]; - my $stats = $ea->results->{classes}->{$item}; - my $sample = $ea->results->{samples}->{$item}; - my $samp_query = $sample->{arg} || ''; - my ($default_db) = $sample->{db} ? $sample->{db} - : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} - : undef; - eval { - QUERY: - foreach my $query ( $qp->split($samp_query) ) { - my $rw = $qp->query_type($query, $qr)->{rw}; - next QUERY unless $rw; - my @tables = $qp->extract_tables( - query => $query, - default_db => $default_db, - Quoter => $args{Quoter}, - ); - next QUERY unless scalar @tables; - DB_TBL: - foreach my $tbl_info ( @tables ) { - my ($db, $tbl) = @$tbl_info; - $db = $db ? "`$db`." : ''; - next DB_TBL if $seen{"$db$tbl"}++; # Unique-ify for issue 337. - print "$rw $db`$tbl`\n"; - } - } - }; - if ( $EVAL_ERROR ) { - PTDEBUG && _d($EVAL_ERROR); - warn "Cannot get table access for query $_"; - } - } - - return; -} - sub update_query_review_tables { my ( %args ) = @_; foreach my $arg ( qw(ea worst QueryReview OptionParser) ) { @@ -15082,17 +15163,15 @@ sub update_query_review_tables { first_seen => $stats->{ts}->{min}, last_seen => $stats->{ts}->{max} ); - if ( $o->get('review-history') ) { - my %history; - foreach my $attrib ( @$attribs ) { - $history{$attrib} = $ea->metrics( - attrib => $attrib, - where => $item, - ); - } - $qv->set_review_history( - $item, $sample->{arg} || '', %history); + my %history; + foreach my $attrib ( @$attribs ) { + $history{$attrib} = $ea->metrics( + attrib => $attrib, + where => $item, + ); } + $qv->set_review_history( + $item, $sample->{arg} || '', %history); } return; @@ -15163,6 +15242,23 @@ sub verify_run_time { return $boundary; } +sub save_resume_offset { + if ( !$resume_file || !$offset ) { + PTDEBUG && _d('Not saving resume offset because there is no ' + . 'resume file or offset:', $resume_file, $offset); + return; + } + + PTDEBUG && _d('Saving resume at offset', $offset, 'to', $resume_file); + open my $resume_fh, '>', $resume_file + or die "Error opening $resume_file: $OS_ERROR"; + print { $resume_fh } $offset, "\n"; + close $resume_fh + or die "Error close $resume_file: $OS_ERROR"; + warn "\n# Saved resume file offset $offset to $resume_file\n"; + return; +} + sub _d { my ($package, undef, $line) = caller 0; @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } @@ -15202,13 +15298,8 @@ Analyze, aggregate, and report on a slow query log: Review a slow log, saving results to the test.query_review table in a MySQL server running on host1. See L<"--review"> for more on reviewing queries: - pt-query-digest --review h=host1,D=test,t=query_review /path/to/slow.log - -Filter out everything but SELECT queries, replay the queries against another -server, then use the timings from replaying them to analyze their performance: - - pt-query-digest /path/to/slow.log --execute h=another_server \ - --filter '$event->{fingerprint} =~ m/^select/' + pt-query-digest --review h=host1 --review-table test.query_review + --history-table test.query_history /path/to/slow.log Print the structure of events so you can construct a complex L<"--filter">: @@ -15238,8 +15329,7 @@ is safe to run even on production systems, but you might want to monitor it until you are satisfied that the input you give it does not cause undue load. Various options will cause pt-query-digest to insert data into tables, execute -SQL queries, and so on. These include the L<"--execute"> option and -L<"--review">. +SQL queries, and so on. These include the L<"--review"> option. At the time of this release, we know of no bugs that could cause serious harm to users. @@ -15348,9 +15438,7 @@ that follows. It contains the following columns: Response time The total response time, and percentage of overall total Calls The number of times this query was executed R/Call The mean response time per execution - Apdx The Apdex score; see --apdex-threshold for details V/M The Variance-to-mean ratio of response time - EXPLAIN If --explain was specified, a sparkline; see --explain Item The distilled query A final line whose rank is shown as MISC contains aggregate statistics on the @@ -15464,12 +15552,6 @@ above, and something like the following: See also L<"--report-format">. -=head2 SPARKLINES - -The output also contains sparklines. Sparklines are "data-intense, -design-simple, word-sized graphics" (L).There is a sparkline for L<"--report-histogram"> and for L<"--explain">. -See each of those options for details about interpreting their sparklines. - =head1 QUERY REVIEWS A "query review" is the process of storing all the query fingerprints analyzed. @@ -15534,9 +15616,9 @@ example, You can see how useful this meta-data is -- as you analyze your queries, you get your comments integrated right into the report. -If you add the L<"--review-history"> option, it will also store information into -a separate database table, so you can keep historical trending information on -classes of queries. +The tool will also store information into a separate database table specified +by the L<"--history-table"> option, so you can keep historical trending information +on classes of queries. =back @@ -15640,27 +15722,11 @@ Collapse multiple identical UNION queries into a single one. =head1 OPTIONS -DSN values in L<"--review-history"> default to values in L<"--review"> if COPY -is yes. - This tool accepts additional command-line arguments. Refer to the L<"SYNOPSIS"> and usage information for details. =over -=item --apdex-threshold - -type: float; default: 1.0 - -Set Apdex target threshold (T) for query response time. The Application -Performance Index (Apdex) Technical Specification V1.1 defines T as "a -positive decimal value in seconds, having no more than two significant digits -of granularity." This value only applies to query response time (Query_time). - -Options can be abbreviated so specifying C<--apdex-t> also works. - -See L. - =item --ask-pass Prompt for a password when connecting to MySQL. @@ -15740,21 +15806,19 @@ first option on the command line. default: yes -Continue parsing even if there is an error. +Continue parsing even if there is an error. The tool will not continue +forever: it stops once any process causes 100 errors, in which case there +is probably a bug in the tool or the input is invalid. -=item --create-review-history-table +=item --[no]create-review-tables -Create the L<"--review-history"> table if it does not exist. +default: yes -This option causes the table specified by L<"--review-history"> to be created -with the default structure shown in the documentation for that option. +Create the L<"--review"> tables if they do not exist. -=item --create-review-table - -Create the L<"--review"> table if it does not exist. - -This option causes the table specified by L<"--review"> to be created with the -default structure shown in the documentation for that option. +This option causes the tables specified by L<"--review-table"> and +L<"--history-table"> to be created with the default structures shown +in the documentation for L<"--review">. =item --daemonize @@ -15800,50 +15864,6 @@ The second one splits it into attribute-value pairs and adds them to the event: B: All commas in the regex patterns must be escaped with \ otherwise the pattern will break. -=item --execute - -type: DSN - -Execute queries on this DSN. - -Adds a callback into the chain, after filters but before the reports. Events -are executed on this DSN. If they are successful, the time they take to execute -overwrites the event's Query_time attribute and the original Query_time value -(from the log) is saved as the Exec_orig_time attribute. If unsuccessful, -the callback returns false and terminates the chain. - -If the connection fails, pt-query-digest tries to reconnect once per second. - -See also L<"--mirror"> and L<"--execute-throttle">. - -=item --execute-throttle - -type: array - -Throttle values for L<"--execute">. - -By default L<"--execute"> runs without any limitations or concerns for the -amount of time that it takes to execute the events. The L<"--execute-throttle"> -allows you to limit the amount of time spent doing L<"--execute"> relative -to the other processes that handle events. This works by marking some events -with a C attribute when L<"--execute"> begins to take too much time. -L<"--execute"> will not execute an event if this attribute is true. This -indirectly decreases the time spent doing L<"--execute">. - -The L<"--execute-throttle"> option takes at least two comma-separated values: -max allowed L<"--execute"> time as a percentage and a check interval time. An -optional third value is a percentage step for increasing and decreasing the -probability that an event will be marked C true. 5 (percent) is -the default step. - -For example: L<"--execute-throttle"> C<70,60,10>. This will limit -L<"--execute"> to 70% of total event processing time, checked every minute -(60 seconds) and probability stepped up and down by 10%. When L<"--execute"> -exceeds 70%, the probability that events will be marked C true -increases by 10%. L<"--execute"> time is checked again after another minute. -If it's still above 70%, then the probability will increase another 10%. -Or, if it's dropped below 70%, then the probability will decrease by 10%. - =item --expected-range type: array; default: 5,10 @@ -15868,41 +15888,10 @@ be EXPLAINed. Those are typically "derived table" queries of the form select ... from ( select .... ) der; -The EXPLAIN results are printed in three places: a sparkline in the event -header, a full vertical format in the event report, and a sparkline in the -profile. - -The full format appears at the end of each event report in vertical style +The EXPLAIN results are printed as a full vertical format in the event report, +which appears at the end of each event report in vertical style (C<\G>) just like MySQL prints it. -The sparklines (see L<"SPARKLINES">) are compact representations of the -access type for each table and whether or not "Using temporary" or "Using -filesort" appear in EXPLAIN. The sparklines look like: - - nr>TF - -That sparkline means that there are two tables, the first uses a range (n) -access, the second uses a ref access, and both "Using temporary" (T) and -"Using filesort" (F) appear. The greater-than character just separates table -access codes from T and/or F. - -The abbreviated table access codes are: - - a ALL - c const - e eq_ref - f fulltext - i index - m index_merge - n range - o ref_or_null - r ref - s system - u unique_subquery - -A capitalized access code means that "Using index" appears in EXPLAIN for -that table. - =item --filter type: string @@ -15992,22 +15981,6 @@ check both. Since L<"--filter"> allows you to alter C<$event>, you can use it to do other things, like create new attributes. See L<"ATTRIBUTES"> for an example. -=item --fingerprints - -Add query fingerprints to the standard query analysis report. This is mostly -useful for debugging purposes. - -=item --[no]for-explain - -default: yes - -Print extra information to make analysis easy. - -This option adds code snippets to make it easy to run SHOW CREATE TABLE and SHOW -TABLE STATUS for the query's tables. It also rewrites non-SELECT queries into a -SELECT that might be helpful for determining the non-SELECT statement's index -usage. - =item --group-by type: Array; default: fingerprint @@ -16063,6 +16036,12 @@ L<"ATTRIBUTES">). Show help and exit. +=item --history-table + +type: string; default: percona_schema.query_history + +Where to save the historical data produced by L<"--review">. + =item --host short form: -h; type: string @@ -16092,10 +16071,6 @@ which do not have them. For example, if one event has the db attribute equal to "foo", but the next event doesn't have the db attribute, then it inherits "foo" for its db attribute. -Inheritance is usually desirable, but in some cases it might confuse things. -If a query inherits a database that it doesn't actually use, then this could -confuse L<"--execute">. - =item --interval type: float; default: .1 @@ -16138,20 +16113,6 @@ type: string Print all output to this file when daemonized. -=item --mirror - -type: float - -How often to check whether connections should be moved, depending on -C. Requires L<"--processlist"> and L<"--execute">. - -This option causes pt-query-digest to check every N seconds whether it is reading -from a read-write server and executing against a read-only server, which is a -sensible way to set up two servers if you're doing something like master-master -replication. The L master-master -toolkit does this. The aim is to keep the passive server ready for failover, -which is impossible without putting it under a realistic workload. - =item --order-by type: Array; default: Query_time:sum @@ -16211,6 +16172,13 @@ seconds and which are seen at least 5 times, use the following argument: You can specify an --outliers option for each value in L<"--group-by">. + +=item --output + +type: string; default: query + +Type of report to use. Accepted values are C<"query"> and C<"json">. + =item --password short form: -p; type: string @@ -16227,10 +16195,6 @@ daemonized instance exits. The program checks for the existence of the PID file when starting; if it exists and the process with the matching PID exists, the program exits. -=item --pipeline-profile - -Print a profile of the pipeline processes. - =item --port short form: -P; type: int @@ -16259,8 +16223,7 @@ type: DSN Poll this DSN's processlist for queries, with L<"--interval"> sleep between. -If the connection fails, pt-query-digest tries to reopen it once per second. See -also L<"--mirror">. +If the connection fails, pt-query-digest tries to reopen it once per second. =item --progress @@ -16341,36 +16304,35 @@ like: # 1s ######## # 10s+ -A sparkline (see L<"SPARKLINES">) of the full chart is also printed in the -header for each query event. The sparkline of that full chart is: - - # Query_time sparkline: | .^_ | - -The sparkline itself is the 8 characters between the pipes (C<|>), one character -for each of the 8 buckets (1us, 10us, etc.) Four character codes are used -to represent the approximate relation between each bucket's value: - - _ . - ^ - -The caret C<^> represents peaks (buckets with the most values), and -the underscore C<_> represents lows (buckets with the least or at least -one value). The period C<.> and the hyphen C<-> represent buckets with values -between these two extremes. If a bucket has no values, a space is printed. -So in the example above, the period represents the 10ms bucket, the caret -the 100ms bucket, and the underscore the 1s bucket. - See L<"OUTPUT"> for more information. +=item --resume + +type: string + +If specified, the tool writes the last file offset, if there is one, +to the given filename. When ran again with the same value for this option, +the tool reads the last file offset from the file, seeks to that position +in the log, and resumes parsing events from that point onward. + =item --review type: DSN -Store a sample of each class of query in this DSN. +Store a sample of each class of query in this DSN, plus historical values +for review trend analysis -The argument specifies a table to store all unique query fingerprints in. The -table must have at least the following columns. You can add more columns for -your own special purposes, but they won't be used by pt-query-digest. The -following CREATE TABLE definition is also used for L<"--create-review-table">. +The argument specifies a host to store all unique query fingerprints in; the +databases and tables were this data is stored can be specified with the +L<"--review-table"> and L<"--history-table"> options. +By default, if the table doesn't exist the tool mtries creating it; This +behavior can bhe controlled with the L<"--[no]create-review-tables"> option. +If the table was created manually, it must have at least the following columns. +You can add more columns for your own special purposes, but they won't be used +by pt-query-digest. The following CREATE TABLE definition is also used by +L<"--no-create-review-tables">. + +=for comment ignore-pt-internal-value MAGIC_create_review: CREATE TABLE query_review ( @@ -16405,23 +16367,12 @@ After parsing and aggregating events, your table should contain a row for each fingerprint. This option depends on C<--group-by fingerprint> (which is the default). It will not work otherwise. -=item --review-history -type: DSN +Additionally, pt-query-digest will save historical information into a review table, +so you can see how classes of queries have changed over time. You can +change the destination table with the L<"--history-table"> -The table in which to store historical values for review trend analysis. - -Each time you review queries with L<"--review">, pt-query-digest will save -information into this table so you can see how classes of queries have changed -over time. - -This DSN inherits unspecified values from L<"--review">. It should mention a -table in which to store statistics about each class of queries. pt-query-digest -verifies the existence of the table, and your privileges to insert, delete and -update on that table. - -pt-query-digest then inspects the columns in the table. The table must have at -least the following columns: +The table must have at least the following columns: CREATE TABLE query_review_history ( checksum BIGINT UNSIGNED NOT NULL, @@ -16430,7 +16381,10 @@ least the following columns: Any columns not mentioned above are inspected to see if they follow a certain naming convention. The column is special if the name ends with an underscore -followed by any of these MAGIC_history_cols values: +followed by any of these values: + +=for comment ignore-pt-internal-value +MAGIC_history_cols pct|avt|cnt|sum|min|max|pct_95|stddev|median|rank @@ -16446,8 +16400,11 @@ columns and making them part of the primary key along with the checksum. But you could also just add a ts_min column and make it a DATE type, so you'd get one row per class of queries per day. -The default table structure follows. The following MAGIC_create_review_history -table definition is used for L<"--create-review-history-table">: +The default table structure follows. The following table definition is used +for L<"--[no]create-review-tables">: + +=for comment ignore-pt-internal-value +MAGIC_create_review_history CREATE TABLE query_review_history ( checksum BIGINT UNSIGNED NOT NULL, @@ -16553,6 +16510,12 @@ table definition is used for L<"--create-review-history-table">: Note that we store the count (cnt) for the ts attribute only; it will be redundant to store this for other attributes. +=item --review-table + +type: string; default: percona_schema.query_review + +Where to save the samples produced by L<"--review">. + =item --run-time type: time @@ -16655,7 +16618,7 @@ Previously, pt-query-digest only aggregated these attributes: Query_time,Lock_time,Rows_sent,Rows_examined,user,db:Schema,ts -Attributes specified in the L<"--review-history"> table will always be selected +Attributes in the table specified by L<"--history-table"> will always be selected even if you do not specify L<"--select">. See also L<"--ignore-attributes"> and L<"ATTRIBUTES">. @@ -16717,9 +16680,9 @@ several types: CURRENT_DATE - INTERVAL 7 DAY If you give a MySQL time expression, then you must also specify a DSN -so that pt-query-digest can connect to MySQL to evaluate the expression. If you -specify L<"--execute">, L<"--explain">, L<"--processlist">, L<"--review"> -or L<"--review-history">, then one of these DSNs will be used automatically. +so that pt-query-digest can connect to MySQL to evaluate the expression. +If you specify L<"--explain">, L<"--processlist">, L<"--review">, then +one of these DSNs will be used automatically. Otherwise, you must specify an L<"--aux-dsn"> or pt-query-digest will die saying that the value is invalid. @@ -16742,59 +16705,6 @@ short form: -S; type: string Socket file to use for connection. -=item --statistics - -Print statistics about internal counters. This option is mostly for -development and debugging. The statistics report is printed for each -iteration after all other reports, even if no events are processed or -C<--no-report> is specified. The statistics report looks like: - - # No events processed. - - # Statistic Count %/Events - # ================================================ ====== ======== - # events_read 142030 100.00 - # events_parsed 50430 35.51 - # events_aggregated 0 0.00 - # ignored_midstream_server_response 18111 12.75 - # no_tcp_data 91600 64.49 - # pipeline_restarted_after_MemcachedProtocolParser 142030 100.00 - # pipeline_restarted_after_TcpdumpParser 1 0.00 - # unknown_client_command 1 0.00 - # unknown_client_data 32318 22.75 - -The first column is the internal counter name; the second column is counter's -count; and the third column is the count as a percentage of C. - -In this case, it shows why no events were processed/aggregated: 100% of events -were rejected by the C. Of those, 35.51% were data -packets, but of these 12.75% of ignored mid-stream server response, one was -an unknown client command, and 22.75% were unknown client data. The other -64.49% were TCP control packets (probably most ACKs). - -Since pt-query-digest is complex, you will probably need someone familiar -with its code to decipher the statistics report. - -=item --table-access - -Print a table access report. - -The table access report shows which tables are accessed by all the queries -and if the access is a read or write. The report looks like: - - write `baz`.`tbl` - read `baz`.`new_tbl` - write `baz`.`tbl3` - write `db6`.`tbl6` - -If you pipe the output to L, the read and write tables will be grouped -together and sorted alphabetically: - - read `baz`.`new_tbl` - write `baz`.`tbl` - write `baz`.`tbl3` - write `db6`.`tbl6` - =item --tcpdump-errors type: string @@ -17092,7 +17002,8 @@ Default character set. dsn: database; copy: yes -Database that contains the query review table. +Default database for the review option. Only useful if there are replication +filters set up. =item * F @@ -17126,7 +17037,7 @@ Socket file to use for connection. =item * t -Table to use as the query review table. +Not used. =item * u diff --git a/bin/pt-table-usage b/bin/pt-table-usage index 0037eda7..3a9767a3 100755 --- a/bin/pt-table-usage +++ b/bin/pt-table-usage @@ -5345,7 +5345,7 @@ sub new { } my $self = { - instrument => 0, + instrument => PTDEBUG, continue_on_error => 0, %args, @@ -5372,9 +5372,7 @@ sub add { push @{$self->{procs}}, $process; push @{$self->{names}}, $name; - if ( my $n = $args{retry_on_error} ) { - $self->{retries}->{$name} = $n; - } + $self->{retries}->{$name} = $args{retry_on_error} || 100; if ( $self->{instrument} ) { $self->{instrumentation}->{$name} = { time => 0, calls => 0 }; } @@ -5443,7 +5441,11 @@ sub execute { my $msg = "Pipeline process " . ($procno + 1) . " ($name) caused an error: " . $EVAL_ERROR; - if ( defined $self->{retries}->{$name} ) { + if ( !$self->{continue_on_error} ) { + die $msg . "Terminating pipeline because --continue-on-error " + . "is false.\n"; + } + elsif ( defined $self->{retries}->{$name} ) { my $n = $self->{retries}->{$name}; if ( $n ) { warn $msg . "Will retry pipeline process $procno ($name) " @@ -5455,9 +5457,6 @@ sub execute { . "($name) caused too many errors.\n"; } } - elsif ( !$self->{continue_on_error} ) { - die $msg; - } else { warn $msg; } diff --git a/lib/EventAggregator.pm b/lib/EventAggregator.pm index cf5f51e7..9c922b15 100644 --- a/lib/EventAggregator.pm +++ b/lib/EventAggregator.pm @@ -619,16 +619,6 @@ sub calculate_statistical_metrics { $classes->{$class}->{$attrib}->{all}, $classes->{$class}->{$attrib} ); - - # Apdex (http://code.google.com/p/maatkit/issues/detail?id=1054) - if ( $args{apdex_t} && $attrib eq 'Query_time' ) { - $class_metrics->{$class}->{$attrib}->{apdex_t} = $args{apdex_t}; - $class_metrics->{$class}->{$attrib}->{apdex} - = $self->calculate_apdex( - t => $args{apdex_t}, - samples => $classes->{$class}->{$attrib}->{all}, - ); - } } } } @@ -784,9 +774,6 @@ sub metrics { median => $metrics->{classes}->{$where}->{$attrib}->{median} || 0, pct_95 => $metrics->{classes}->{$where}->{$attrib}->{pct_95} || 0, stddev => $metrics->{classes}->{$where}->{$attrib}->{stddev} || 0, - - apdex_t => $metrics->{classes}->{$where}->{$attrib}->{apdex_t}, - apdex => $metrics->{classes}->{$where}->{$attrib}->{apdex}, }; } @@ -1164,70 +1151,6 @@ sub _deep_copy_attrib_vals { return $copy; } -# Sub: calculate_apdex -# Calculate the Apdex score for the given T and response times. -# -# -# Parameters: -# %args - Arguments -# -# Required Arguments: -# t - Target threshold -# samples - Hashref with bucketized response time values, -# i.e. { bucket_number => n_responses, } -# -# Returns: -# Apdex score -sub calculate_apdex { - my ( $self, %args ) = @_; - my @required_args = qw(t samples); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless $args{$arg}; - } - my ($t, $samples) = @args{@required_args}; - - if ( $t <= 0 ) { - die "Invalid target threshold (T): $t. T must be greater than zero"; - } - - my $f = 4 * $t; - PTDEBUG && _d("Apdex T =", $t, "F =", $f); - - my $satisfied = 0; - my $tolerating = 0; - my $frustrated = 0; # just for debug output - my $n_samples = 0; - BUCKET: - for my $bucket ( keys %$samples ) { - my $n_responses = $samples->{$bucket}; - my $response_time = $buck_vals[$bucket]; - - # Response time increases from 0 to F. - # 0 --- T --- F - # ^ ^-- tolerating zone - # | - # +-------- satisfied zone - if ( $response_time <= $t ) { - $satisfied += $n_responses; - } - elsif ( $response_time <= $f ) { - $tolerating += $n_responses; - } - else { - $frustrated += $n_responses; - } - - $n_samples += $n_responses; - } - - my $apdex = sprintf('%.2f', ($satisfied + ($tolerating / 2)) / $n_samples); - PTDEBUG && _d($n_samples, "samples,", $satisfied, "satisfied,", - $tolerating, "tolerating,", $frustrated, "frustrated, Apdex score:", - $apdex); - - return $apdex; -} - # Sub: _get_value # Get the value of the attribute (or one of its alternatives) from the event. # Undef is a valid value. If the attrib or none of its alternatives exist diff --git a/lib/ExplainAnalyzer.pm b/lib/ExplainAnalyzer.pm index dd2b96c7..b685ca9e 100644 --- a/lib/ExplainAnalyzer.pm +++ b/lib/ExplainAnalyzer.pm @@ -215,7 +215,7 @@ sub save_usage_for { # explain - Hashref of normalized EXPLAIN data # # Returns: -# Fingerprint/sparkline string +# Fingerprint string sub fingerprint { my ( $self, %args ) = @_; my @required_args = qw(explain); @@ -225,92 +225,6 @@ sub fingerprint { my ($explain) = @args{@required_args}; } -# Sub: sparkline -# Create a sparkline of EXPLAIN data from . A spark line -# is a very compact, terse fingerprint that represents just the following. -# See . -# -# access (for each table): -# - a: ALL -# - c: const -# - e: eq_ref -# - f: fulltext -# - i: index -# - m: index_merge -# - n: range -# - o: ref_or_null -# - r: ref -# - s: system -# - u: unique_subquery -# -# Extra: -# - uppsercaes access code: Using extra -# - T: Using temprary -# - F: Using filesort -# -# Parameters: -# %args - Arguments -# -# Required Arguments: -# explain - Hashref of normalized EXPLAIN data -# -# Returns: -# Sparkline string like (start code)TF>Ree(end code) -sub sparkline { - my ( $self, %args ) = @_; - my @required_args = qw(explain); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my ($explain) = @args{@required_args}; - PTDEBUG && _d("Making sparkline for", Dumper($explain)); - - my $access_code = { - 'ALL' => 'a', - 'const' => 'c', - 'eq_ref' => 'e', - 'fulltext' => 'f', - 'index' => 'i', - 'index_merge' => 'm', - 'range' => 'n', - 'ref_or_null' => 'o', - 'ref' => 'r', - 'system' => 's', - 'unique_subquery' => 'u', - }; - - my $sparkline = ''; - my ($T, $F); # Using temporary, Using filesort - - foreach my $tbl ( @$explain ) { - my $code; - if ( defined $tbl->{type} ) { - $code = $access_code->{$tbl->{type}} || "?"; - $code = uc $code if $tbl->{Extra}->{'Using index'}; - } - else { - $code = '-' - }; - $sparkline .= $code; - - $T = 1 if $tbl->{Extra}->{'Using temporary'}; - $F = 1 if $tbl->{Extra}->{'Using filesort'}; - } - - if ( $T || $F ) { - if ( $explain->[-1]->{Extra}->{'Using temporary'} - || $explain->[-1]->{Extra}->{'Using filesort'} ) { - $sparkline .= ">" . ($T ? "T" : "") . ($F ? "F" : ""); - } - else { - $sparkline = ($T ? "T" : "") . ($F ? "F" : "") . ">$sparkline"; - } - } - - PTDEBUG && _d("sparkline:", $sparkline); - return $sparkline; -} - sub _d { my ($package, undef, $line) = caller 0; @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } diff --git a/lib/JSONReportFormatter.pm b/lib/JSONReportFormatter.pm new file mode 100644 index 00000000..be0e534f --- /dev/null +++ b/lib/JSONReportFormatter.pm @@ -0,0 +1,124 @@ +{ +package JSONReportFormatter; +use Mo; + +use List::Util qw(sum); +use Transformers qw(make_checksum parse_timestamp); + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +my $have_json = eval { require JSON }; + +our $pretty_json = undef; +our $sorted_json = undef; + +extends qw(QueryReportFormatter); + +has _json => ( + is => 'ro', + init_arg => undef, + builder => '_build_json', +); + +sub _build_json { + return unless $have_json; + return JSON->new->utf8 + ->pretty($pretty_json) + ->canonical($sorted_json); +} + +sub encode_json { + my ($self, $encode) = @_; + if ( my $json = $self->_json ) { + return $json->encode($encode); + } + else { + return Transformers::encode_json($encode); + } +} + +override [qw(rusage date hostname files header profile prepared)] => sub { + return; +}; + +override event_report => sub { + my ($self, %args) = @_; + return $self->event_report_values(%args); +}; + +override query_report => sub { + my ($self, %args) = @_; + foreach my $arg ( qw(ea worst orderby groupby) ) { + die "I need a $arg argument" unless defined $arg; + } + + my $ea = $args{ea}; + my $worst = $args{worst}; + + my @attribs = @{$ea->get_attributes()}; + + my @queries; + foreach my $worst_info ( @$worst ) { + my $item = $worst_info->[0]; + my $stats = $ea->results->{classes}->{$item}; + my $sample = $ea->results->{samples}->{$item}; + + my $all_log_pos = $ea->{result_classes}->{$item}->{pos_in_log}->{all}; + my $times_seen = sum values %$all_log_pos; + + my %class = ( + sample => $sample->{arg}, + fingerprint => $item, + checksum => make_checksum($item), + cnt => $times_seen, + ); + + my %metrics; + foreach my $attrib ( @attribs ) { + $metrics{$attrib} = $ea->metrics( + attrib => $attrib, + where => $item, + ); + } + + foreach my $attrib ( keys %metrics ) { + if ( ! grep { $_ } values %{$metrics{$attrib}} ) { + delete $metrics{$attrib}; + next; + } + + if ($attrib eq 'ts') { + my $ts = delete $metrics{ts}; + foreach my $thing ( qw(min max) ) { + next unless defined $ts && defined $ts->{$thing}; + $ts->{$thing} = parse_timestamp($ts->{$thing}); + } + $class{ts_min} = $ts->{min}; + $class{ts_max} = $ts->{max}; + } + elsif ( ($ea->{type_for}->{$attrib} || '') eq 'num' ) { + # Avoid scientific notation in the metrics by forcing it to use + # six decimal places. + for my $value ( values %{$metrics{$attrib}} ) { + next unless $value; + $value = sprintf '%.6f', $value; + } + # ..except for the percentage, which only needs two + if ( my $pct = $metrics{$attrib}->{pct} ) { + $metrics{$attrib}->{pct} = sprintf('%.2f', $pct); + } + } + } + push @queries, { + class => \%class, + attributes => \%metrics, + }; + } + + my $json = $self->encode_json(\@queries); + $json .= "\n" if $json !~ /\n\Z/; + return $json . "\n"; +}; + +1; +} diff --git a/lib/Mo.pm b/lib/Mo.pm index 6d2e4f71..fa43dd21 100644 --- a/lib/Mo.pm +++ b/lib/Mo.pm @@ -177,6 +177,7 @@ sub Mo::import { _set_package_isa($caller, @_); _set_inherited_metadata($caller); }, + override => \&override, has => sub { my $names = shift; for my $attribute ( ref $names ? @$names : $names ) { @@ -512,6 +513,16 @@ BEGIN { } } +sub override { + my ($methods, $code) = @_; + my $caller = scalar caller; + + for my $method ( ref($methods) ? @$methods : $methods ) { + my $full_method = "${caller}::${method}"; + *{_glob_for $full_method} = $code; + } +} + } 1; # ########################################################################### diff --git a/lib/Pipeline.pm b/lib/Pipeline.pm index 2e288824..1f50afbf 100644 --- a/lib/Pipeline.pm +++ b/lib/Pipeline.pm @@ -42,7 +42,7 @@ sub new { my $self = { # default values for optional args - instrument => 0, + instrument => PTDEBUG, continue_on_error => 0, # specified arg values override defaults @@ -71,9 +71,7 @@ sub add { push @{$self->{procs}}, $process; push @{$self->{names}}, $name; - if ( my $n = $args{retry_on_error} ) { - $self->{retries}->{$name} = $n; - } + $self->{retries}->{$name} = $args{retry_on_error} || 100; if ( $self->{instrument} ) { $self->{instrumentation}->{$name} = { time => 0, calls => 0 }; } @@ -163,7 +161,11 @@ sub execute { my $msg = "Pipeline process " . ($procno + 1) . " ($name) caused an error: " . $EVAL_ERROR; - if ( defined $self->{retries}->{$name} ) { + if ( !$self->{continue_on_error} ) { + die $msg . "Terminating pipeline because --continue-on-error " + . "is false.\n"; + } + elsif ( defined $self->{retries}->{$name} ) { my $n = $self->{retries}->{$name}; if ( $n ) { warn $msg . "Will retry pipeline process $procno ($name) " @@ -175,9 +177,6 @@ sub execute { . "($name) caused too many errors.\n"; } } - elsif ( !$self->{continue_on_error} ) { - die $msg; - } else { warn $msg; } diff --git a/lib/QueryReportFormatter.pm b/lib/QueryReportFormatter.pm index 4a5b5c5a..ac45a51c 100644 --- a/lib/QueryReportFormatter.pm +++ b/lib/QueryReportFormatter.pm @@ -29,8 +29,7 @@ # which is also in mk-query-digest. package QueryReportFormatter; -use strict; -use warnings FATAL => 'all'; +use Mo; use English qw(-no_match_vars); use POSIX qw(floor); @@ -43,6 +42,9 @@ use constant PTDEBUG => $ENV{PTDEBUG} || 0; use constant LINE_LENGTH => 74; use constant MAX_STRING_LENGTH => 10; +{ local $EVAL_ERROR; eval { require Quoter } }; +{ local $EVAL_ERROR; eval { require ReportFormatter } }; + # Sub: new # # Parameters: @@ -56,31 +58,69 @@ use constant MAX_STRING_LENGTH => 10; # Optional arguments: # QueryReview - object used in # dbh - dbh used in -# ExplainAnalyzer - object used in . -# This causes a sparkline to be printed (issue 1141). # # Returns: # QueryReportFormatter object -sub new { - my ( $class, %args ) = @_; - foreach my $arg ( qw(OptionParser QueryRewriter Quoter) ) { - die "I need a $arg argument" unless $args{$arg}; +has Quoter => ( + is => 'ro', + isa => 'Quoter', + default => sub { Quoter->new() }, +); + +has label_width => ( + is => 'ro', + isa => 'Int', +); + +has global_headers => ( + is => 'ro', + isa => 'ArrayRef', + default => sub { [qw( total min max avg 95% stddev median)] }, +); + +has event_headers => ( + is => 'ro', + isa => 'ArrayRef', + default => sub { [qw(pct total min max avg 95% stddev median)] }, +); + +has ReportFormatter => ( + is => 'ro', + isa => 'ReportFormatter', + builder => '_build_report_formatter', +); + +sub _build_report_formatter { + return ReportFormatter->new( + line_width => LINE_LENGTH, + extend_right => 1, + ); +} + +sub BUILDARGS { + my $class = shift; + my $args = $class->SUPER::BUILDARGS(@_); + + foreach my $arg ( qw(OptionParser QueryRewriter) ) { + die "I need a $arg argument" unless $args->{$arg}; } # If ever someone wishes for a wider label width. - my $label_width = $args{label_width} || 12; + my $label_width = $args->{label_width} ||= 12; PTDEBUG && _d('Label width:', $label_width); - my $cheat_width = $label_width + 1; - + my $o = delete $args->{OptionParser}; my $self = { - %args, - label_width => $label_width, + %$args, + options => { + show_all => $o->get('show-all'), + shorten => $o->get('shorten'), + report_all => $o->get('report-all'), + report_histogram => $o->get('report-histogram'), + }, num_format => "# %-${label_width}s %3s %7s %7s %7s %7s %7s %7s %7s", bool_format => "# %-${label_width}s %3d%% yes, %3d%% no", string_format => "# %-${label_width}s %s", - global_headers => [qw( total min max avg 95% stddev median)], - event_headers => [qw(pct total min max avg 95% stddev median)], hidden_attrib => { # Don't sort/print these attribs in the reports. arg => 1, # They're usually handled specially, or not fingerprint => 1, # printed at all. @@ -88,32 +128,7 @@ sub new { ts => 1, }, }; - return bless $self, $class; -} - -# Sub: set_report_formatter -# Set a report formatter object for a report. By default this package will -# instantiate ReportFormatter objects to format columnized reports (e.g. -# for profile and prepared reports). Setting a caller-created formatter -# object (usually a obj) is used for tested and also by -# to extend the profile report line width to 82 for -# the --explain sparkline. -# -# Parameters: -# %args - Arguments -# -# Required Arguments: -# report - Report name, e.g. profile, prepared, etc. -# formatter - Formatter object, usually a obj -sub set_report_formatter { - my ( $self, %args ) = @_; - my @required_args = qw(report formatter); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless exists $args{$arg}; - } - my ($report, $formatter) = @args{@required_args}; - $self->{formatter_for}->{$report} = $formatter; - return; + return $self; } # Arguments: @@ -243,7 +258,7 @@ sub header { shorten(scalar keys %{$results->{classes}}, d=>1_000), shorten($qps || 0, d=>1_000), shorten($conc || 0, d=>1_000)); - $line .= ('_' x (LINE_LENGTH - length($line) + $self->{label_width} - 12)); + $line .= ('_' x (LINE_LENGTH - length($line) + $self->label_width() - 12)); push @result, $line; # Second line: time range @@ -308,6 +323,70 @@ sub header { return join("\n", map { s/\s+$//; $_ } @result) . "\n"; } +sub query_report_values { + my ($self, %args) = @_; + foreach my $arg ( qw(ea worst orderby groupby) ) { + die "I need a $arg argument" unless defined $arg; + } + my $ea = $args{ea}; + my $groupby = $args{groupby}; + my $worst = $args{worst}; + + my $q = $self->Quoter; + my $qv = $self->{QueryReview}; + my $qr = $self->{QueryRewriter}; + + my @values; + # Print each worst item: its stats/metrics (sum/min/max/95%/etc.), + # Query_time distro chart, tables, EXPLAIN, fingerprint, etc. + # Items are usually unique queries/fingerprints--depends on how + # the events were grouped. + ITEM: + foreach my $top_event ( @$worst ) { + my $item = $top_event->[0]; + my $reason = $args{explain_why} ? $top_event->[1] : ''; + my $rank = $top_event->[2]; + my $stats = $ea->results->{classes}->{$item}; + my $sample = $ea->results->{samples}->{$item}; + my $samp_query = $sample->{arg} || ''; + + my %item_vals = ( + item => $item, + samp_query => $samp_query, + rank => ($rank || 0), + reason => $reason, + ); + + # ############################################################### + # Possibly skip item for --review. + # ############################################################### + my $review_vals; + if ( $qv ) { + $review_vals = $qv->get_review_info($item); + next ITEM if $review_vals->{reviewed_by} && !$self->{options}->{report_all}; + for my $col ( $qv->review_cols() ) { + push @{$item_vals{review_vals}}, [$col, $review_vals->{$col}]; + } + } + + $item_vals{default_db} = $sample->{db} ? $sample->{db} + : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} + : undef; + $item_vals{tables} = [$self->{QueryParser}->extract_tables( + query => $samp_query, + default_db => $item_vals{default_db}, + Quoter => $self->Quoter, + )]; + + if ( $samp_query && ($args{variations} && @{$args{variations}}) ) { + $item_vals{crc} = crc32($samp_query); + } + + push @values, \%item_vals; + } + return \@values; +} + # Arguments: # * ea obj: EventAggregator # * worst arrayref: worst items @@ -319,16 +398,11 @@ sub header { # * print_header bool: "Report grouped by" header sub query_report { my ( $self, %args ) = @_; - foreach my $arg ( qw(ea worst orderby groupby) ) { - die "I need a $arg argument" unless defined $arg; - } + my $ea = $args{ea}; my $groupby = $args{groupby}; - my $worst = $args{worst}; + my $report_values = $self->query_report_values(%args); - my $o = $self->{OptionParser}; - my $q = $self->{Quoter}; - my $qv = $self->{QueryReview}; my $qr = $self->{QueryRewriter}; my $report = ''; @@ -350,66 +424,36 @@ sub query_report { # Items are usually unique queries/fingerprints--depends on how # the events were grouped. ITEM: - foreach my $top_event ( @$worst ) { - my $item = $top_event->[0]; - my $reason = $args{explain_why} ? $top_event->[1] : ''; - my $rank = $top_event->[2]; - my $stats = $ea->results->{classes}->{$item}; - my $sample = $ea->results->{samples}->{$item}; - my $samp_query = $sample->{arg} || ''; - - # ############################################################### - # Possibly skip item for --review. - # ############################################################### - my $review_vals; - if ( $qv ) { - $review_vals = $qv->get_review_info($item); - next ITEM if $review_vals->{reviewed_by} && !$o->get('report-all'); - } - - # ############################################################### - # Get tables for --for-explain. - # ############################################################### - my ($default_db) = $sample->{db} ? $sample->{db} - : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} - : undef; - my @tables; - if ( $o->get('for-explain') ) { - @tables = $self->{QueryParser}->extract_tables( - query => $samp_query, - default_db => $default_db, - Quoter => $self->{Quoter}, - ); - } - + foreach my $vals ( @$report_values ) { + my $item = $vals->{item}; # ############################################################### # Print the standard query analysis report. # ############################################################### - $report .= "\n" if $rank > 1; # space between each event report + $report .= "\n" if $vals->{rank} > 1; # space between each event report $report .= $self->event_report( %args, item => $item, - sample => $sample, - rank => $rank, - reason => $reason, + sample => $ea->results->{samples}->{$item}, + rank => $vals->{rank}, + reason => $vals->{reason}, attribs => $attribs, - db => $default_db, + db => $vals->{default_db}, ); - if ( $o->get('report-histogram') ) { + if ( $self->{options}->{report_histogram} ) { $report .= $self->chart_distro( %args, - attrib => $o->get('report-histogram'), - item => $item, + attrib => $self->{options}->{report_histogram}, + item => $vals->{item}, ); } - if ( $qv && $review_vals ) { + if ( $vals->{review_vals} ) { # Print the review information that is already in the table # before putting anything new into the table. $report .= "# Review information\n"; - foreach my $col ( $qv->review_cols() ) { - my $val = $review_vals->{$col}; + foreach my $elem ( @{$vals->{review_vals}} ) { + my ($col, $val) = @$elem; if ( !$val || $val ne '0000-00-00 00:00:00' ) { # issue 202 $report .= sprintf "# %13s: %-s\n", $col, ($val ? $val : ''); } @@ -418,25 +462,22 @@ sub query_report { if ( $groupby eq 'fingerprint' ) { # Shorten it if necessary (issue 216 and 292). - $samp_query = $qr->shorten($samp_query, $o->get('shorten')) - if $o->get('shorten'); + my $samp_query = $qr->shorten($vals->{samp_query}, $self->{options}->{shorten}) + if $self->{options}->{shorten}; # Print query fingerprint. - $report .= "# Fingerprint\n# $item\n" - if $o->get('fingerprints'); + PTDEBUG && _d("Fingerprint\n# $vals->{item}\n"); # Print tables used by query. - $report .= $self->tables_report(@tables) - if $o->get('for-explain'); + $report .= $self->tables_report(@{$vals->{tables}}); # Print sample (worst) query's CRC % 1_000. We mod 1_000 because # that's actually the value stored in the ea, not the full checksum. # So the report will print something like, # # arg crc 685 (2/66%), 159 (1/33%) # Thus we want our "CRC" line to be 685 and not 18547302820. - if ( $samp_query && ($args{variations} && @{$args{variations}}) ) { - my $crc = crc32($samp_query); - $report.= "# CRC " . ($crc ? $crc % 1_000 : "") . "\n"; + if ( $vals->{crc} ) { + $report.= "# CRC " . ($vals->{crc} % 1_000) . "\n"; } my $log_type = $args{log_type} || ''; @@ -450,14 +491,13 @@ sub query_report { } else { $report .= "# EXPLAIN /*!50100 PARTITIONS*/\n$samp_query${mark}\n"; - $report .= $self->explain_report($samp_query, $default_db); + $report .= $self->explain_report($samp_query, $vals->{default_db}); } } else { $report .= "$samp_query${mark}\n"; my $converted = $qr->convert_to_select($samp_query); - if ( $o->get('for-explain') - && $converted + if ( $converted && $converted =~ m/^[\(\s]*select/i ) { # It converted OK to a SELECT $report .= "# Converted for EXPLAIN\n# EXPLAIN /*!50100 PARTITIONS*/\n$converted${mark}\n"; @@ -466,7 +506,7 @@ sub query_report { } else { if ( $groupby eq 'tables' ) { - my ( $db, $tbl ) = $q->split_unquote($item); + my ( $db, $tbl ) = $self->Quoter->split_unquote($item); $report .= $self->tables_report([$db, $tbl]); } $report .= "$item\n"; @@ -486,21 +526,20 @@ sub query_report { # * rank scalar: item rank among the worst # Print a report about the statistics in the EventAggregator. # Called by query_report(). -sub event_report { - my ( $self, %args ) = @_; - foreach my $arg ( qw(ea item orderby) ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my $ea = $args{ea}; - my $item = $args{item}; +sub event_report_values { + my ($self, %args) = @_; + + my $ea = $args{ea}; + my $item = $args{item}; my $orderby = $args{orderby}; my $results = $ea->results(); - my $o = $self->{OptionParser}; - my @result; + + my %vals; # Return unless the item exists in the results (it should). my $store = $results->{classes}->{$item}; - return "# No such event $item\n" unless $store; + + return unless $store; # Pick the first attribute to get counts my $global_cnt = $results->{globals}->{$orderby}->{cnt}; @@ -521,80 +560,26 @@ sub event_report { }; } - # First line like: - # Query 1: 9 QPS, 0x concurrency, ID 0x7F7D57ACDD8A346E at byte 5 ________ - my $line = sprintf( - '# %s %d: %s QPS, %sx concurrency, ID 0x%s at byte %.f ', - ($ea->{groupby} eq 'fingerprint' ? 'Query' : 'Item'), - $args{rank} || 0, - shorten($qps || 0, d=>1_000), - shorten($conc || 0, d=>1_000), - make_checksum($item), - $results->{samples}->{$item}->{pos_in_log} || 0, - ); - $line .= ('_' x (LINE_LENGTH - length($line) + $self->{label_width} - 12)); - push @result, $line; - - # Second line: reason why this class is being reported. - if ( $args{reason} ) { - push @result, - "# This item is included in the report because it matches " - . ($args{reason} eq 'top' ? '--limit.' : '--outliers.'); - } - - # Third line: Apdex and variance-to-mean (V/M) ratio, like: - # Scores: Apdex = 0.93 [1.0], V/M = 1.5 - { + $vals{groupby} = $ea->{groupby}; + $vals{qps} = $qps || 0; + $vals{concurrency} = $conc || 0; + $vals{checksum} = make_checksum($item); + $vals{pos_in_log} = $results->{samples}->{$item}->{pos_in_log} || 0; + $vals{reason} = $args{reason}; + $vals{variance_to_mean} = do { my $query_time = $ea->metrics(where => $item, attrib => 'Query_time'); - push @result, - sprintf("# Scores: Apdex = %s [%3.1f]%s, V/M = %.2f", - (defined $query_time->{apdex} ? "$query_time->{apdex}" : "NS"), - ($query_time->{apdex_t} || 0), - ($query_time->{cnt} < 100 ? "*" : ""), - ($query_time->{stddev}**2 / ($query_time->{avg} || 1)), - ); + $query_time->{stddev}**2 / ($query_time->{avg} || 1) + }; + + $vals{counts} = { + class_cnt => $class_cnt, + global_cnt => $global_cnt, + }; + + if ( my $ts = $store->{ts}) { + $vals{time_range} = $self->format_time_range($ts) || "unknown"; } - # Fourth line: EXPLAIN sparkline if --explain. - if ( $o->get('explain') && $results->{samples}->{$item}->{arg} ) { - eval { - my $sparkline = $self->explain_sparkline( - $results->{samples}->{$item}->{arg}, $args{db}); - push @result, "# EXPLAIN sparkline: $sparkline\n"; - }; - if ( $EVAL_ERROR ) { - PTDEBUG && _d("Failed to get EXPLAIN sparkline:", $EVAL_ERROR); - } - } - - if ( my $attrib = $o->get('report-histogram') ) { - my $sparkline = $self->distro_sparkline( - %args, - attrib => $attrib, - item => $item, - ); - if ( $sparkline ) { - # I find the | | bookends help make the sparkchart graph more clear. - # Else with just .^- it's difficult to tell where the chart beings - # or ends. - push @result, "# $attrib sparkline: |$sparkline|"; - } - } - - # Last line before column headers: time range - if ( my $ts = $store->{ts} ) { - my $time_range = $self->format_time_range($ts) || "unknown"; - push @result, "# Time range: $time_range"; - } - - # Column header line - push @result, $self->make_event_header(); - - # Count line - push @result, - sprintf $self->{num_format}, 'Count', - percentage_of($class_cnt, $global_cnt), $class_cnt, map { '' } (1..8); - # Sort the attributes, removing any hidden attributes, if they're not # already given to us. In mk-query-digest, this sub is called from # query_report(), but in testing it's called directly. query_report() @@ -607,11 +592,10 @@ sub event_report { ); } + $vals{attributes} = { map { $_ => [] } qw(num innodb bool string) }; + foreach my $type ( qw(num innodb) ) { # Add "InnoDB:" sub-header before grouped InnoDB_* attributes. - if ( $type eq 'innodb' && @{$attribs->{$type}} ) { - push @result, "# InnoDB:"; - }; NUM_ATTRIB: foreach my $attrib ( @{$attribs->{$type}} ) { @@ -631,15 +615,12 @@ sub event_report { $pct = percentage_of( $vals->{sum}, $results->{globals}->{$attrib}->{sum}); - push @result, - sprintf $self->{num_format}, - $self->make_label($attrib), $pct, @values; + push @{$vals{attributes}{$type}}, + [ $attrib, $pct, @values ]; } } if ( @{$attribs->{bool}} ) { - push @result, "# Boolean:"; - my $printed_bools = 0; BOOL_ATTRIB: foreach my $attrib ( @{$attribs->{bool}} ) { next BOOL_ATTRIB unless exists $store->{$attrib}; @@ -647,33 +628,125 @@ sub event_report { next unless scalar %$vals; if ( $vals->{sum} > 0 ) { - push @result, - sprintf $self->{bool_format}, - $self->make_label($attrib), $self->bool_percents($vals); - $printed_bools = 1; + push @{$vals{attributes}{bool}}, + [ $attrib, $self->bool_percents($vals) ]; } } - pop @result unless $printed_bools; } if ( @{$attribs->{string}} ) { - push @result, "# String:"; - my $printed_strings = 0; STRING_ATTRIB: foreach my $attrib ( @{$attribs->{string}} ) { next STRING_ATTRIB unless exists $store->{$attrib}; my $vals = $store->{$attrib}; next unless scalar %$vals; + push @{$vals{attributes}{string}}, + [ $attrib, $vals ]; + } + } + + + return \%vals; +} + +# TODO I maybe've broken the groupby report + +sub event_report { + my ( $self, %args ) = @_; + foreach my $arg ( qw(ea item orderby) ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + + my $item = $args{item}; + my $val = $self->event_report_values(%args); + my @result; + + return "# No such event $item\n" unless $val; + + # First line like: + # Query 1: 9 QPS, 0x concurrency, ID 0x7F7D57ACDD8A346E at byte 5 ________ + my $line = sprintf( + '# %s %d: %s QPS, %sx concurrency, ID 0x%s at byte %.f ', + ($val->{groupby} eq 'fingerprint' ? 'Query' : 'Item'), + $args{rank} || 0, + shorten($val->{qps}, d=>1_000), + shorten($val->{concurrency}, d=>1_000), + $val->{checksum}, + $val->{pos_in_log}, + ); + $line .= ('_' x (LINE_LENGTH - length($line) + $self->label_width() - 12)); + push @result, $line; + + # Second line: reason why this class is being reported. + if ( $val->{reason} ) { + push @result, + "# This item is included in the report because it matches " + . ($val->{reason} eq 'top' ? '--limit.' : '--outliers.'); + } + + # Third line: Variance-to-mean (V/M) ratio, like: + # Scores: V/M = 1.5 + push @result, + sprintf("# Scores: V/M = %.2f", $val->{variance_to_mean} ); + + # Last line before column headers: time range + if ( $val->{time_range} ) { + push @result, "# Time range: $val->{time_range}"; + } + + # Column header line + push @result, $self->make_event_header(); + + # Count line + push @result, + sprintf $self->{num_format}, 'Count', + percentage_of($val->{counts}{class_cnt}, $val->{counts}{global_cnt}), + $val->{counts}{class_cnt}, + map { '' } (1..8); + + + my $attribs = $val->{attributes}; + + foreach my $type ( qw(num innodb) ) { + # Add "InnoDB:" sub-header before grouped InnoDB_* attributes. + if ( $type eq 'innodb' && @{$attribs->{$type}} ) { + push @result, "# InnoDB:"; + }; + + NUM_ATTRIB: + foreach my $attrib ( @{$attribs->{$type}} ) { + my ($attrib_name, @vals) = @$attrib; + push @result, + sprintf $self->{num_format}, + $self->make_label($attrib_name), @vals; + } + } + + if ( @{$attribs->{bool}} ) { + push @result, "# Boolean:"; + BOOL_ATTRIB: + foreach my $attrib ( @{$attribs->{bool}} ) { + my ($attrib_name, @vals) = @$attrib; + push @result, + sprintf $self->{bool_format}, + $self->make_label($attrib_name), @vals; + } + } + + if ( @{$attribs->{string}} ) { + push @result, "# String:"; + STRING_ATTRIB: + foreach my $attrib ( @{$attribs->{string}} ) { + my ($attrib_name, $vals) = @$attrib; push @result, sprintf $self->{string_format}, - $self->make_label($attrib), - $self->format_string_list($attrib, $vals, $class_cnt); - $printed_strings = 1; + $self->make_label($attrib_name), + $self->format_string_list($attrib_name, $vals, $val->{counts}{class_cnt}); } - pop @result unless $printed_strings; } + return join("\n", map { s/\s+$//; $_ } @result) . "\n"; } @@ -739,98 +812,6 @@ sub chart_distro { return join("\n", @results) . "\n"; } - -# Sub: distro_sparkline -# Make a sparkline of the graph. The following -# character codes are used: _.-^ If a bucket doesn't have a value, a -# space is used. So _ buckets are the lowest lines on the full graph -# (), and ^ are the peaks on the full graph. See -# QueryReportFormatter.t for several examples. -# -# This sub isn't the most optimized. The first half is the same code -# as . Then the latter code, unique to this sub, -# essentially compresses the full chart further into 8 characters using -# the 4 char codes above. -# -# Parameters: -# %args - Arguments -# -# Required Arguments: -# ea - object -# item - Item in results to chart -# attrib - Attribute of item to chart -# -# Returns: -# Sparkchart string -sub distro_sparkline { - my ( $self, %args ) = @_; - foreach my $arg ( qw(ea item attrib) ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my $ea = $args{ea}; - my $item = $args{item}; - my $attrib = $args{attrib}; - - my $results = $ea->results(); - my $store = $results->{classes}->{$item}->{$attrib}; - my $vals = $store->{all}; - - my $all_zeros_sparkline = " " x 8; - - return $all_zeros_sparkline unless defined $vals && scalar %$vals; - - my @buck_tens = $ea->buckets_of(10); - my @distro = map { 0 } (0 .. 7); - my @buckets = map { 0 } (0..999); - map { $buckets[$_] = $vals->{$_} } keys %$vals; - $vals = \@buckets; - map { $distro[$buck_tens[$_]] += $vals->[$_] } (1 .. @$vals - 1); - - my $vals_per_mark; - my $max_val = 0; - my $max_disp_width = 64; - foreach my $n_vals ( @distro ) { - $max_val = $n_vals if $n_vals > $max_val; - } - $vals_per_mark = $max_val / $max_disp_width; - - my ($min, $max); - foreach my $i ( 0 .. $#distro ) { - my $n_vals = $distro[$i]; - my $n_marks = $n_vals / ($vals_per_mark || 1); - $n_marks = 1 if $n_marks < 1 && $n_vals > 0; - - $min = $n_marks if $n_marks && (!$min || $n_marks < $min); - $max = $n_marks if !$max || $n_marks > $max; - } - return $all_zeros_sparkline unless $min && $max; - - # That ^ code is mostly the same as chart_distro(). Now here's - # our own unique code. - - # Divide the range by 4 because there are 4 char codes: _.-^ - $min = 0 if $min == $max; - my @range_min; - my $d = floor((($max+0.00001)-$min) / 4); - for my $x ( 1..4 ) { - push @range_min, $min + ($d * $x); - } - - my $sparkline = ""; - foreach my $i ( 0 .. $#distro ) { - my $n_vals = $distro[$i]; - my $n_marks = $n_vals / ($vals_per_mark || 1); - $n_marks = 1 if $n_marks < 1 && $n_vals > 0; - $sparkline .= $n_marks <= 0 ? ' ' - : $n_marks <= $range_min[0] ? '_' - : $n_marks <= $range_min[1] ? '.' - : $n_marks <= $range_min[2] ? '-' - : '^'; - } - - return $sparkline; -} - # Profile subreport (issue 381). # Arguments: # * ea obj: EventAggregator @@ -839,7 +820,6 @@ sub distro_sparkline { # Optional arguments: # * other arrayref: other items (that didn't make it into top worst) # * distill_args hashref: extra args for distill() -# * ReportFormatter obj: passed-in ReportFormatter for testing sub profile { my ( $self, %args ) = @_; foreach my $arg ( qw(ea worst groupby) ) { @@ -851,7 +831,6 @@ sub profile { my $groupby = $args{groupby}; my $qr = $self->{QueryRewriter}; - my $o = $self->{OptionParser}; # Total response time of all events. my $results = $ea->results(); @@ -874,41 +853,20 @@ sub profile { $qr->distill($samp_query, %{$args{distill_args}}) : $item, id => $groupby eq 'fingerprint' ? make_checksum($item) : '', vmr => ($query_time->{stddev}**2) / ($query_time->{avg} || 1), - apdex => defined $query_time->{apdex} ? $query_time->{apdex} : "NS", ); - # Get EXPLAIN sparkline if --explain. - if ( $o->get('explain') && $samp_query ) { - my ($default_db) = $sample->{db} ? $sample->{db} - : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} - : undef; - eval { - $profile{explain_sparkline} = $self->explain_sparkline( - $samp_query, $default_db); - }; - if ( $EVAL_ERROR ) { - PTDEBUG && _d("Failed to get EXPLAIN sparkline:", $EVAL_ERROR); - } - } - push @profiles, \%profile; } - my $report = $self->{formatter_for}->{profile} || new ReportFormatter( - line_width => LINE_LENGTH, - long_last_column => 1, - extend_right => 1, - ); - $report->set_title('Profile'); + my $report = $self->ReportFormatter(); + $report->title('Profile'); my @cols = ( { name => 'Rank', right_justify => 1, }, { name => 'Query ID', }, { name => 'Response time', right_justify => 1, }, { name => 'Calls', right_justify => 1, }, { name => 'R/Call', right_justify => 1, }, - { name => 'Apdx', right_justify => 1, width => 4, }, { name => 'V/M', right_justify => 1, width => 5, }, - ( $o->get('explain') ? { name => 'EXPLAIN' } : () ), { name => 'Item', }, ); $report->set_columns(@cols); @@ -924,9 +882,7 @@ sub profile { "$rt $rtp", $item->{cnt}, $rc, - $item->{apdex}, $vmr, - ( $o->get('explain') ? $item->{explain_sparkline} || "" : () ), $item->{sample}, ); $report->add_line(@vals); @@ -954,9 +910,7 @@ sub profile { "$rt $rtp", $misc->{cnt}, $rc, - 'NS', # Apdex is not meaningful here '0.0', # variance-to-mean ratio is not meaningful here - ( $o->get('explain') ? "MISC" : () ), "<".scalar @$other." ITEMS>", ); } @@ -971,7 +925,6 @@ sub profile { # * groupby scalar: attrib worst items grouped by # Optional arguments: # * distill_args hashref: extra args for distill() -# * ReportFormatter obj: passed-in ReportFormatter for testing sub prepared { my ( $self, %args ) = @_; foreach my $arg ( qw(ea worst groupby) ) { @@ -1056,12 +1009,8 @@ sub prepared { # Return unless there are prepared statements to report. return unless scalar @prepared; - my $report = $self->{formatter_for}->{prepared} || new ReportFormatter( - line_width => LINE_LENGTH, - long_last_column => 1, - extend_right => 1, - ); - $report->set_title('Prepared statements'); + my $report = $self->ReportFormatter(); + $report->title('Prepared statements'); $report->set_columns( { name => 'Rank', right_justify => 1, }, { name => 'Query ID', }, @@ -1097,7 +1046,7 @@ sub make_global_header { # First line: # Attribute total min max avg 95% stddev median push @lines, - sprintf $self->{num_format}, "Attribute", '', @{$self->{global_headers}}; + sprintf $self->{num_format}, "Attribute", '', @{$self->global_headers()}; # Underline first line: # ========= ======= ======= ======= ======= ======= ======= ======= @@ -1105,7 +1054,7 @@ sub make_global_header { # Hard-coded values aren't ideal but this code rarely changes. push @lines, sprintf $self->{num_format}, - (map { "=" x $_ } $self->{label_width}), + (map { "=" x $_ } $self->label_width()), (map { " " x $_ } qw(3)), # no pct column in global header (map { "=" x $_ } qw(7 7 7 7 7 7 7)); @@ -1123,13 +1072,13 @@ sub make_event_header { my @lines; push @lines, - sprintf $self->{num_format}, "Attribute", @{$self->{event_headers}}; + sprintf $self->{num_format}, "Attribute", @{$self->event_headers()}; # The numbers 6, 7, 7, etc. are the field widths from make_header(). # Hard-coded values aren't ideal but this code rarely changes. push @lines, sprintf $self->{num_format}, - map { "=" x $_ } ($self->{label_width}, qw(3 7 7 7 7 7 7 7)); + map { "=" x $_ } ($self->label_width(), qw(3 7 7 7 7 7 7 7)); # End result should be like: # Attribute pct total min max avg 95% stddev median @@ -1148,7 +1097,7 @@ sub make_label { if ( $val =~ m/^InnoDB/ ) { $val =~ s/^InnoDB //; $val = $val eq 'trx id' ? "InnoDB trxID" - : substr($val, 0, $self->{label_width}); + : substr($val, 0, $self->label_width()); } $val = $val eq 'user' ? 'Users' @@ -1159,7 +1108,7 @@ sub make_label { : $val eq 'bytes' ? 'Query size' : $val eq 'Tmp disk tables' ? 'Tmp disk tbl' : $val eq 'Tmp table sizes' ? 'Tmp tbl size' - : substr($val, 0, $self->{label_width}); + : substr($val, 0, $self->label_width); return $val; } @@ -1177,8 +1126,7 @@ sub bool_percents { # Does pretty-printing for lists of strings like users, hosts, db. sub format_string_list { my ( $self, $attrib, $vals, $class_cnt ) = @_; - my $o = $self->{OptionParser}; - my $show_all = $o->get('show-all'); + my $show_all = $self->{options}->{show_all}; # Only class result values have unq. So if unq doesn't exist, # then we've been given global values. @@ -1318,7 +1266,7 @@ sub pref_sort { sub tables_report { my ( $self, @tables ) = @_; return '' unless @tables; - my $q = $self->{Quoter}; + my $q = $self->Quoter(); my $tables = ""; foreach my $db_tbl ( @tables ) { my ( $db, $tbl ) = @$db_tbl; @@ -1337,7 +1285,7 @@ sub explain_report { return '' unless $query; my $dbh = $self->{dbh}; - my $q = $self->{Quoter}; + my $q = $self->Quoter(); my $qp = $self->{QueryParser}; return '' unless $dbh && $q && $qp; @@ -1387,34 +1335,6 @@ sub format_time_range { return $min && $max ? "$min to $max" : ''; } -sub explain_sparkline { - my ( $self, $query, $db ) = @_; - return unless $query; - - my $q = $self->{Quoter}; - my $dbh = $self->{dbh}; - my $ex = $self->{ExplainAnalyzer}; - return unless $dbh && $ex; - - if ( $db ) { - PTDEBUG && _d($dbh, "USE", $db); - $dbh->do("USE " . $q->quote($db)); - } - my $res = $ex->normalize( - $ex->explain_query( - dbh => $dbh, - query => $query, - ) - ); - - my $sparkline; - if ( $res ) { - $sparkline = $ex->sparkline(explain => $res); - } - - return $sparkline; -} - sub _d { my ($package, undef, $line) = caller 0; @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } diff --git a/lib/QueryReview.pm b/lib/QueryReview.pm index 2274189e..f2ecc062 100644 --- a/lib/QueryReview.pm +++ b/lib/QueryReview.pm @@ -111,7 +111,7 @@ sub new { # table. sub set_history_options { my ( $self, %args ) = @_; - foreach my $arg ( qw(table dbh tbl_struct col_pat) ) { + foreach my $arg ( qw(table tbl_struct col_pat) ) { die "I need a $arg argument" unless $args{$arg}; } @@ -157,7 +157,7 @@ sub set_history_options { } @cols) . ')'; PTDEBUG && _d($sql); - $self->{history_sth} = $args{dbh}->prepare($sql); + $self->{history_sth} = $self->{dbh}->prepare($sql); $self->{history_metrics} = \@metrics; return; diff --git a/lib/ReportFormatter.pm b/lib/ReportFormatter.pm index 1be57ec6..936f2919 100644 --- a/lib/ReportFormatter.pm +++ b/lib/ReportFormatter.pm @@ -56,8 +56,7 @@ # calculated widths. package ReportFormatter; -use strict; -use warnings FATAL => 'all'; +use Mo; use English qw(-no_match_vars); use constant PTDEBUG => $ENV{PTDEBUG} || 0; @@ -67,7 +66,6 @@ use POSIX qw(ceil); eval { require Term::ReadKey }; my $have_term = $EVAL_ERROR ? 0 : 1; -# Arguments: # * underline_header bool: underline headers with = # * line_prefix scalar: prefix every line with this string # * line_width scalar: line width in characters or 'auto' @@ -77,42 +75,106 @@ my $have_term = $EVAL_ERROR ? 0 : 1; # * column_errors scalar: die or warn on column errors (default warn) # * truncate_header_side scalar: left or right (default left) # * strip_whitespace bool: strip leading and trailing whitespace -sub new { - my ( $class, %args ) = @_; - my @required_args = qw(); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless $args{$arg}; - } - my $self = { - underline_header => 1, - line_prefix => '# ', - line_width => 78, - column_spacing => ' ', - extend_right => 0, - truncate_line_mark => '...', - column_errors => 'warn', - truncate_header_side => 'left', - strip_whitespace => 1, - %args, # args above can be overriden, args below cannot - n_cols => 0, - }; +# * title scalar: title for the report + +has underline_header => ( + is => 'ro', + isa => 'Bool', + default => sub { 1 }, +); +has line_prefix => ( + is => 'ro', + isa => 'Str', + default => sub { '# ' }, +); +has line_width => ( + is => 'ro', + isa => 'Int', + default => sub { 78 }, +); +has column_spacing => ( + is => 'ro', + isa => 'Str', + default => sub { ' ' }, +); +has extend_right => ( + is => 'ro', + isa => 'Bool', + default => sub { '' }, +); +has truncate_line_mark => ( + is => 'ro', + isa => 'Str', + default => sub { '...' }, +); +has column_errors => ( + is => 'ro', + isa => 'Str', + default => sub { 'warn' }, +); +has truncate_header_side => ( + is => 'ro', + isa => 'Str', + default => sub { 'left' }, +); +has strip_whitespace => ( + is => 'ro', + isa => 'Bool', + default => sub { 1 }, +); +has title => ( + is => 'rw', + isa => 'Str', + predicate => 'has_title', +); + +# Internal + +has n_cols => ( + is => 'rw', + isa => 'Int', + default => sub { 0 }, + init_arg => undef, +); + +has cols => ( + is => 'ro', + isa => 'ArrayRef', + init_arg => undef, + default => sub { [] }, + clearer => 'clear_cols', +); + +has lines => ( + is => 'ro', + isa => 'ArrayRef', + init_arg => undef, + default => sub { [] }, + clearer => 'clear_lines', +); + +has truncate_headers => ( + is => 'rw', + isa => 'Bool', + default => sub { undef }, + init_arg => undef, + clearer => 'clear_truncate_headers', +); + +sub BUILDARGS { + my $class = shift; + my $args = $class->SUPER::BUILDARGS(@_); # This is not tested or currently used, but I like the idea and - # think one day it will be very handy in mk-config-diff. - if ( ($self->{line_width} || '') eq 'auto' ) { + # think one day it will be very handy in pt-config-diff. + if ( ($args->{line_width} || '') eq 'auto' ) { die "Cannot auto-detect line width because the Term::ReadKey module " . "is not installed" unless $have_term; - ($self->{line_width}) = GetTerminalSize(); + ($args->{line_width}) = GetTerminalSize(); + PTDEBUG && _d('Line width:', $args->{line_width}); } - PTDEBUG && _d('Line width:', $self->{line_width}); - return bless $self, $class; -} - -sub set_title { - my ( $self, $title ) = @_; - $self->{title} = $title; - return; + return $args; } # @cols is an array of hashrefs. Each hashref describes a column and can @@ -139,7 +201,7 @@ sub set_columns { die "Column does not have a name" unless defined $col_name; if ( $col->{width} ) { - $col->{width_pct} = ceil(($col->{width} * 100) / $self->{line_width}); + $col->{width_pct} = ceil(($col->{width} * 100) / $self->line_width()); PTDEBUG && _d('col:', $col_name, 'width:', $col->{width}, 'chars =', $col->{width_pct}, '%'); } @@ -172,10 +234,10 @@ sub set_columns { # Used with extend_right. $col->{right_most} = 1 if $i == $#cols; - push @{$self->{cols}}, $col; + push @{$self->cols}, $col; } - $self->{n_cols} = scalar @cols; + $self->n_cols( scalar @cols ); if ( ($used_width || 0) > 100 ) { die "Total width_pct for all columns is >100%"; @@ -186,16 +248,16 @@ sub set_columns { my $wid_per_col = int((100 - $used_width) / scalar @auto_width_cols); PTDEBUG && _d('Line width left:', (100-$used_width), '%;', 'each auto width col:', $wid_per_col, '%'); - map { $self->{cols}->[$_]->{width_pct} = $wid_per_col } @auto_width_cols; + map { $self->cols->[$_]->{width_pct} = $wid_per_col } @auto_width_cols; } # Add to the minimum possible header width the spacing between columns. - $min_hdr_wid += ($self->{n_cols} - 1) * length $self->{column_spacing}; + $min_hdr_wid += ($self->n_cols() - 1) * length $self->column_spacing(); PTDEBUG && _d('min header width:', $min_hdr_wid); - if ( $min_hdr_wid > $self->{line_width} ) { + if ( $min_hdr_wid > $self->line_width() ) { PTDEBUG && _d('Will truncate headers because min header width', - $min_hdr_wid, '> line width', $self->{line_width}); - $self->{truncate_headers} = 1; + $min_hdr_wid, '> line width', $self->line_width()); + $self->truncate_headers(1); } return; @@ -207,14 +269,14 @@ sub set_columns { sub add_line { my ( $self, @vals ) = @_; my $n_vals = scalar @vals; - if ( $n_vals != $self->{n_cols} ) { + if ( $n_vals != $self->n_cols() ) { $self->_column_error("Number of values $n_vals does not match " - . "number of columns $self->{n_cols}"); + . "number of columns " . $self->n_cols()); } for my $i ( 0..($n_vals-1) ) { - my $col = $self->{cols}->[$i]; + my $col = $self->cols->[$i]; my $val = defined $vals[$i] ? $vals[$i] : $col->{undef_value}; - if ( $self->{strip_whitespace} ) { + if ( $self->strip_whitespace() ) { $val =~ s/^\s+//g; $val =~ s/\s+$//; $vals[$i] = $val; @@ -223,7 +285,7 @@ sub add_line { $col->{min_val} = min($width, ($col->{min_val} || $width)); $col->{max_val} = max($width, ($col->{max_val} || $width)); } - push @{$self->{lines}}, \@vals; + push @{$self->lines}, \@vals; return; } @@ -232,12 +294,14 @@ sub get_report { my ( $self, %args ) = @_; $self->_calculate_column_widths(); - $self->_truncate_headers() if $self->{truncate_headers}; + if ( $self->truncate_headers() ) { + $self->_truncate_headers(); + } $self->_truncate_line_values(%args); my @col_fmts = $self->_make_column_formats(); - my $fmt = ($self->{line_prefix} || '') - . join($self->{column_spacing}, @col_fmts); + my $fmt = $self->line_prefix() + . join($self->column_spacing(), @col_fmts); PTDEBUG && _d('Format:', $fmt); # Make the printf line format for the header and ensure that its labels @@ -246,15 +310,15 @@ sub get_report { # Build the report line by line, starting with the title and header lines. my @lines; - push @lines, sprintf "$self->{line_prefix}$self->{title}" if $self->{title}; + push @lines, $self->line_prefix() . $self->title() if $self->has_title(); push @lines, $self->_truncate_line( - sprintf($hdr_fmt, map { $_->{name} } @{$self->{cols}}), + sprintf($hdr_fmt, map { $_->{name} } @{$self->cols}), strip => 1, mark => '', ); - if ( $self->{underline_header} ) { - my @underlines = map { '=' x $_->{print_width} } @{$self->{cols}}; + if ( $self->underline_header() ) { + my @underlines = map { '=' x $_->{print_width} } @{$self->cols}; push @lines, $self->_truncate_line( sprintf($fmt, map { $_ || '' } @underlines), mark => '', @@ -265,19 +329,24 @@ sub get_report { my $vals = $_; my $i = 0; my @vals = map { - my $val = defined $_ ? $_ : $self->{cols}->[$i++]->{undef_value}; + my $val = defined $_ ? $_ : $self->cols->[$i++]->{undef_value}; $val = '' if !defined $val; $val =~ s/\n/ /g; $val; } @$vals; my $line = sprintf($fmt, @vals); - if ( $self->{extend_right} ) { + if ( $self->extend_right() ) { $line; } else { $self->_truncate_line($line); } - } @{$self->{lines}}; + } @{$self->lines}; + + # Clean up any leftover state + $self->clear_cols(); + $self->clear_lines(); + $self->clear_truncate_headers(); return join("\n", @lines) . "\n"; } @@ -285,7 +354,7 @@ sub get_report { sub truncate_value { my ( $self, $col, $val, $width, $side ) = @_; return $val if length $val <= $width; - return $val if $col->{right_most} && $self->{extend_right}; + return $val if $col->{right_most} && $self->extend_right(); $side ||= $col->{truncate_side}; my $mark = $col->{truncate_mark}; if ( $side eq 'right' ) { @@ -305,8 +374,8 @@ sub _calculate_column_widths { my ( $self ) = @_; my $extra_space = 0; - foreach my $col ( @{$self->{cols}} ) { - my $print_width = int($self->{line_width} * ($col->{width_pct} / 100)); + foreach my $col ( @{$self->cols} ) { + my $print_width = int($self->line_width() * ($col->{width_pct} / 100)); PTDEBUG && _d('col:', $col->{name}, 'width pct:', $col->{width_pct}, 'char width:', $print_width, @@ -330,7 +399,7 @@ sub _calculate_column_widths { PTDEBUG && _d('Extra space:', $extra_space); while ( $extra_space-- ) { - foreach my $col ( @{$self->{cols}} ) { + foreach my $col ( @{$self->cols} ) { if ( $col->{auto_width} && ( $col->{print_width} < $col->{max_val} || $col->{print_width} < $col->{header_width}) @@ -346,8 +415,8 @@ sub _calculate_column_widths { sub _truncate_headers { my ( $self, $col ) = @_; - my $side = $self->{truncate_header_side}; - foreach my $col ( @{$self->{cols}} ) { + my $side = $self->truncate_header_side(); + foreach my $col ( @{$self->cols} ) { my $col_name = $col->{name}; my $print_width = $col->{print_width}; next if length $col_name <= $print_width; @@ -360,10 +429,10 @@ sub _truncate_headers { sub _truncate_line_values { my ( $self, %args ) = @_; - my $n_vals = $self->{n_cols} - 1; - foreach my $vals ( @{$self->{lines}} ) { + my $n_vals = $self->n_cols() - 1; + foreach my $vals ( @{$self->lines} ) { for my $i ( 0..$n_vals ) { - my $col = $self->{cols}->[$i]; + my $col = $self->cols->[$i]; my $val = defined $vals->[$i] ? $vals->[$i] : $col->{undef_value}; my $width = length $val; @@ -393,9 +462,9 @@ sub _truncate_line_values { sub _make_column_formats { my ( $self ) = @_; my @col_fmts; - my $n_cols = $self->{n_cols} - 1; + my $n_cols = $self->n_cols() - 1; for my $i ( 0..$n_cols ) { - my $col = $self->{cols}->[$i]; + my $col = $self->cols->[$i]; # Normally right-most col has no width so it can potentially # extend_right. But if it's right-justified, it requires a width. @@ -410,12 +479,12 @@ sub _make_column_formats { sub _truncate_line { my ( $self, $line, %args ) = @_; - my $mark = defined $args{mark} ? $args{mark} : $self->{truncate_line_mark}; + my $mark = defined $args{mark} ? $args{mark} : $self->truncate_line_mark(); if ( $line ) { $line =~ s/\s+$// if $args{strip}; my $len = length($line); - if ( $len > $self->{line_width} ) { - $line = substr($line, 0, $self->{line_width} - length $mark); + if ( $len > $self->line_width() ) { + $line = substr($line, 0, $self->line_width() - length $mark); $line .= $mark if $mark; } } @@ -425,7 +494,7 @@ sub _truncate_line { sub _column_error { my ( $self, $err ) = @_; my $msg = "Column error: $err"; - $self->{column_errors} eq 'die' ? die $msg : warn $msg; + $self->column_errors() eq 'die' ? die $msg : warn $msg; return; } diff --git a/lib/Transformers.pm b/lib/Transformers.pm index fe2b7f88..f378c474 100644 --- a/lib/Transformers.pm +++ b/lib/Transformers.pm @@ -31,24 +31,26 @@ use Time::Local qw(timegm timelocal); use Digest::MD5 qw(md5_hex); use B qw(); -require Exporter; -our @ISA = qw(Exporter); -our %EXPORT_TAGS = (); -our @EXPORT = (); -our @EXPORT_OK = qw( - micro_t - percentage_of - secs_to_time - time_to_secs - shorten - ts - parse_timestamp - unix_timestamp - any_unix_timestamp - make_checksum - crc32 - encode_json -); +BEGIN { + require Exporter; + our @ISA = qw(Exporter); + our %EXPORT_TAGS = (); + our @EXPORT = (); + our @EXPORT_OK = qw( + micro_t + percentage_of + secs_to_time + time_to_secs + shorten + ts + parse_timestamp + unix_timestamp + any_unix_timestamp + make_checksum + crc32 + encode_json + ); +} our $mysql_ts = qr/(\d\d)(\d\d)(\d\d) +(\d+):(\d+):(\d+)(\.\d+)?/; our $proper_ts = qr/(\d\d\d\d)-(\d\d)-(\d\d)[T ](\d\d):(\d\d):(\d\d)(\.\d+)?/; diff --git a/t/lib/EventAggregator.t b/t/lib/EventAggregator.t index 38a16ca2..7ee7e30a 100644 --- a/t/lib/EventAggregator.t +++ b/t/lib/EventAggregator.t @@ -9,7 +9,7 @@ BEGIN { use strict; use warnings FATAL => 'all'; use English qw(-no_match_vars); -use Test::More tests => 82; +use Test::More; use QueryRewriter; use EventAggregator; @@ -431,7 +431,7 @@ foreach my $event (@$events) { is_deeply( $ea->results, $result, 'user aggregation' ); is($ea->type_for('Query_time'), 'num', 'Query_time is numeric'); -$ea->calculate_statistical_metrics(apdex_t => 1); +$ea->calculate_statistical_metrics(); is_deeply( $ea->metrics( where => 'bob', @@ -446,8 +446,6 @@ is_deeply( median => '0.000682', stddev => 0, pct_95 => '0.000682', - apdex_t => 1, - apdex => '1.00', }, 'Got simple hash of metrics from metrics()', ); @@ -466,8 +464,6 @@ is_deeply( median => 0, stddev => 0, pct_95 => 0, - apdex_t => undef, - apdex => undef, }, 'It does not crash on metrics()', ); @@ -1816,59 +1812,6 @@ is_deeply( "Merge results" ); -# ############################################################################# -# Apdex -# ############################################################################# - -my $samples = { - 280 => 10, # 0.81623354758492 satisfy - 281 => 10, # 0.85704522496417 satisfy - 282 => 10, # 0.89989748621238 satisfy - 283 => 50, # 0.94489236052300 satisfy - 284 => 50, # 0.99213697854915 satisfy - 285 => 10, # 1.04174382747661 tolerate - 290 => 10, # 1.32955843985657 tolerate - 313 => 1, # 4.08377033290049 frustrated -}; -my $apdex = $ea->calculate_apdex( - t => 1, - samples => $samples, -); - -is( - $apdex, - '0.93', - "Apdex score" -); - -$samples = { - 0 => 150, -}; -$apdex = $ea->calculate_apdex( - t => 1, - samples => $samples, -); - -is( - $apdex, - '1.00', - "Apdex score 1.00" -); - -$samples = { - 400 => 150, -}; -$apdex = $ea->calculate_apdex( - t => 1, - samples => $samples, -); - -is( - $apdex, - '0.00', - "Apdex score 0.00" -); - # ############################################################################# # Special-case attribs called *_crc for mqd --variations. # ############################################################################# @@ -1953,4 +1896,5 @@ like( qr/Complete test coverage/, '_d() works' ); +done_testing; exit; diff --git a/t/lib/ExplainAnalyzer.t b/t/lib/ExplainAnalyzer.t index 32ccc67f..161362cf 100644 --- a/t/lib/ExplainAnalyzer.t +++ b/t/lib/ExplainAnalyzer.t @@ -425,149 +425,6 @@ is_deeply( ], 'Got saved usage for 0xdeadbeef'); -# ############################################################################# -# Issue 1141: Add "spark charts" to mk-query-digest profile -# ############################################################################# -is( - $exa->sparkline(explain => - [ - { id => 1, - select_type => 'PRIMARY', - table => 'foo', - type => 'eq_ref', - possible_keys => ['idx'], - key => ['idx'], - key_len => [10], - ref => [], - rows => 100, - Extra => { - 'Using index' => 1, - 'Using where' => 1, - }, - }, - ], - ), - "E", - "sparkline: basic 1 table eq_ref" -); - -is( - $exa->sparkline(explain => - [ - { id => 1, - select_type => 'PRIMARY', - table => 'foo', - type => 'eq_ref', - possible_keys => ['idx'], - key => ['idx'], - key_len => [10], - ref => [], - rows => 100, - Extra => { - 'Using index' => 1, - 'Using where' => 1, - 'Using filesort' => 1, - }, - }, - { id => 2, - select_type => 'PRIMARY', - table => 'bar', - type => 'ref', - possible_keys => ['idx'], - key => ['idx'], - key_len => [10], - ref => ['foo.col'], - rows => 100, - Extra => { - }, - }, - ], - ), - "F>Er", - "sparkline: 2 table with filesort at start" -); - -is( - $exa->sparkline(explain => - [ - { id => 1, - select_type => 'PRIMARY', - table => 'foo', - type => 'range', - possible_keys => ['idx'], - key => ['idx'], - key_len => [10], - ref => [], - rows => 100, - Extra => { - }, - }, - { id => 2, - select_type => 'PRIMARY', - table => 'bar', - type => 'ref', - possible_keys => ['idx'], - key => ['idx'], - key_len => [10], - ref => ['foo.col'], - rows => 100, - Extra => { - 'Using temporary' => 1, - 'Using filesort' => 1, - }, - }, - ], - ), - "nr>TF", - "sparkline: 2 table with temp and filesort at end" -); - -is( - $exa->sparkline(explain => - [ - { id => 1, - select_type => 'PRIMARY', - table => undef, - type => undef, - possible_keys => [], - key => [], - key_len => [], - ref => [], - rows => undef, - Extra => { - 'No tables used' => 1, - }, - }, - { id => 1, - select_type => 'UNION', - table => 'a', - type => 'index', - possible_keys => [], - key => ['PRIMARY'], - key_len => [2], - ref => [], - rows => 200, - Extra => { - 'Using index' => 1, - }, - }, - { id => undef, - select_type => 'UNION RESULT', - table => '', - type => 'ALL', - possible_keys => [], - key => [], - key_len => [], - ref => [], - rows => undef, - Extra => {}, - }, - ], - ), - "-Ia", - "sparkline: 3 tables, using index" -); - # ############################################################################# # Done. # ############################################################################# diff --git a/t/lib/Pipeline.t b/t/lib/Pipeline.t index 6cb5f0c3..dc21af96 100644 --- a/t/lib/Pipeline.t +++ b/t/lib/Pipeline.t @@ -261,7 +261,7 @@ $pipeline->add( ); $output = output( - sub { $pipeline->execute(%args) }, + sub {$pipeline->execute(%args); }, stderr => 1, ); diff --git a/t/lib/QueryReportFormatter.t b/t/lib/QueryReportFormatter.t index 7677df36..66442c63 100644 --- a/t/lib/QueryReportFormatter.t +++ b/t/lib/QueryReportFormatter.t @@ -43,7 +43,6 @@ my $o = new OptionParser(description=>'qrf'); my $ex = new ExplainAnalyzer(QueryRewriter => $qr, QueryParser => $qp); $o->get_specs("$trunk/bin/pt-query-digest"); - my $qrf = new QueryReportFormatter( OptionParser => $o, QueryRewriter => $qr, @@ -885,6 +884,13 @@ ok( # Test show_all. @ARGV = qw(--show-all host); $o->get_opts(); +$qrf = new QueryReportFormatter( + OptionParser => $o, + QueryRewriter => $qr, + QueryParser => $qp, + Quoter => $q, + ExplainAnalyzer => $ex, +); $result = $qrf->event_report( ea => $ea, select => [ qw(Query_time host) ], @@ -971,7 +977,13 @@ $ea->calculate_statistical_metrics(apdex_t=>1); # Reset opts in case anything above left something set. @ARGV = qw(); $o->get_opts(); - +$qrf = new QueryReportFormatter( + OptionParser => $o, + QueryRewriter => $qr, + QueryParser => $qp, + Quoter => $q, + ExplainAnalyzer => $ex, +); # Normally, the report subs will make their own ReportFormatter but # that package isn't visible to QueryReportFormatter right now so we # make ReportFormatters and pass them in. Since ReporFormatters can't @@ -980,7 +992,7 @@ $o->get_opts(); # profile subreport. And the line width is 82 because that's the new # default to accommodate the EXPLAIN sparkline (issue 1141). my $report = new ReportFormatter(line_width=>82); -$qrf->set_report_formatter(report=>'profile', formatter=>$report); +$qrf->{formatter} = $report; ok( no_diff( sub { $qrf->print_reports( @@ -997,8 +1009,6 @@ ok( "print_reports(header, query_report, profile)" ); -$report = new ReportFormatter(line_width=>82); -$qrf->set_report_formatter(report=>'profile', formatter=>$report); ok( no_diff( sub { $qrf->print_reports( @@ -1051,11 +1061,6 @@ foreach my $event ( @$events ) { $ea->aggregate($event); } $ea->calculate_statistical_metrics(); -$report = new ReportFormatter( - line_width => 82, - extend_right => 1, -); -$qrf->set_report_formatter(report=>'prepared', formatter=>$report); ok( no_diff( sub { @@ -1094,11 +1099,6 @@ foreach my $event ( @$events ) { $ea->aggregate($event); } $ea->calculate_statistical_metrics(); -$report = new ReportFormatter( - line_width => 82, - extend_right => 1, -); -$qrf->set_report_formatter(report=>'profile', formatter=>$report); ok( no_diff( sub { @@ -1130,7 +1130,13 @@ SKIP: { @ARGV = qw(--explain F=/tmp/12345/my.sandbox.cnf); $o->get_opts(); - + $qrf = new QueryReportFormatter( + OptionParser => $o, + QueryRewriter => $qr, + QueryParser => $qp, + Quoter => $q, + ExplainAnalyzer => $ex, + ); my $qrf = new QueryReportFormatter( OptionParser => $o, QueryRewriter => $qr, @@ -1151,70 +1157,6 @@ SKIP: { "explain_report()" ); - my $arg = "select t1.i from t as t1 join t as t2 where t1.i < t2.i and t1.v is not null order by t1.i"; - my $fingerprint = $qr->fingerprint($arg); - - $events = [ - { - Query_time => '0.000286', - arg => $arg, - fingerprint => $fingerprint, - bytes => length $arg, - cmd => 'Query', - db => 'qrf', - pos_in_log => 0, - ts => '091208 09:23:49.637394', - }, - ]; - $ea = new EventAggregator( - groupby => 'fingerprint', - worst => 'Query_time', - ); - foreach my $event ( @$events ) { - $ea->aggregate($event); - } - $ea->calculate_statistical_metrics(); - - # Make sure that explain_sparkline() does USE db like explain_report() - # does because by mqd defaults expalin_sparline() is called by profile() - # so if it doesn't USE db then the EXPLAIN will fail. Here we reset - # the db to something else because we already called explain_report() - # above which did USE qrf. - # - # 5.6 really is that different: ia vs. TF>aI. It's smarter. - $dbh->do("USE mysql"); - my $explain_sparkline = $qrf->explain_sparkline($arg, 'qrf'); - is( - $explain_sparkline, - $sandbox_version eq '5.6' ? "ia" : "TF>aI", - "explain_sparkling() uses db" - ); - - $report = new ReportFormatter( - line_width => 82, - extend_right => 1, - ); - $qrf->set_report_formatter(report=>'profile', formatter=>$report); - $dbh->do("USE mysql"); # same reason as above ^; force use db from event - ok( - no_diff( - sub { - $qrf->print_reports( - reports => ['profile', 'query_report'], - ea => $ea, - worst => [ [$fingerprint, 'top', 1], ], - other => [ [$fingerprint, 'misc', 2], ], - orderby => 'Query_time', - groupby => 'fingerprint', - ); - }, - ( $sandbox_version eq '5.6' ? "t/lib/samples/QueryReportFormatter/report032.txt" - : $sandbox_version ge '5.1' ? "t/lib/samples/QueryReportFormatter/report027.txt" - : "t/lib/samples/QueryReportFormatter/report029.txt"), - ), - "EXPLAIN sparkline (issue 1141)" - ); - $sb->wipe_clean($dbh); $dbh->disconnect(); } @@ -1265,7 +1207,6 @@ foreach my $event ( @$events ) { $ea->calculate_statistical_metrics(); @ARGV = qw(); $o->get_opts(); -$report = new ReportFormatter(line_width=>82); $qrf = new QueryReportFormatter( OptionParser => $o, QueryRewriter => $qr, @@ -1273,7 +1214,6 @@ $qrf = new QueryReportFormatter( Quoter => $q, ExplainAnalyzer => $ex, ); -$qrf->set_report_formatter(report=>'profile', formatter=>$report); my $output = output( sub { $qrf->print_reports( reports => [qw(rusage date files header query_report profile)], @@ -1337,11 +1277,6 @@ foreach my $event ( @$events ) { $ea->aggregate($event); } $ea->calculate_statistical_metrics(); -$report = new ReportFormatter( - line_width => 82, - extend_right => 1, -); -$qrf->set_report_formatter(report=>'profile', formatter=>$report); ok( no_diff( sub { @@ -1360,181 +1295,6 @@ ok( "Variance-to-mean ration (issue 1124)" ); -# ############################################################################# -# Issue 1141: Add "spark charts" to mk-query-digest profile -# ############################################################################# -sub proc_events { - my ( %args ) = @_; - my ($arg, $attrib, $vals) = @args{qw(arg attrib vals)}; - - my $bytes = length $arg; - my $fingerprint = $qr->fingerprint($arg); - - $events = []; - foreach my $val ( @$vals ) { - push @$events, { - bytes => $bytes, - arg => $arg, - fingerprint => $fingerprint, - $attrib => $val, - } - } - - $ea = new EventAggregator( - groupby => 'fingerprint', - worst => 'Query_time', - ); - foreach my $event (@$events) { - $ea->aggregate($event); - } - $ea->calculate_statistical_metrics(apdex_t=>1); - - # Seeing the full chart helps determine what the - # sparkline should look like. - if ( $args{chart} ) { - $result = $qrf->chart_distro( - ea => $ea, - item => 'select c from t', - attrib => 'Query_time', - ); - print $result; - } - - return; -}; - -# Test sparklines in isolation. -proc_events( - arg => 'select c from t', - attrib => 'Query_time', - vals => [qw(0 0 0)], -); -$result = $qrf->distro_sparkline( - ea => $ea, - item => 'select c from t', - attrib => 'Query_time', -); -is( - $result, - " ", - "Sparkchart line - all zeros" -); - -# 1us -# 10us -# 100us ################################################ -# 1ms ################################ -# 10ms ################################ -# 100ms ################################################################ -# 1s ################ -# 10s+ -proc_events( - arg => 'select c from t', - attrib => 'Query_time', - vals => [qw(0.100000 0.500000 0.000600 0.008000 0.990000 1.000000 0.400000 0.003000 0.000200 0.000100 0.010000 0.020000)], -); -$result = $qrf->distro_sparkline( - ea => $ea, - item => 'select c from t', - attrib => 'Query_time', -); -is( - $result, - " -..^_ ", - "Sparkchart line 1" -); - -# 1us -# 10us -# 100us -# 1ms -# 10ms ################################ -# 100ms ################################################################ -# 1s ######## -# 10s+ -proc_events( - arg => 'select c from t', - attrib => 'Query_time', - vals => [qw(0.01 0.03 0.08 0.09 0.3 0.5 0.5 0.6 0.7 0.5 0.5 0.9 1.0)], -); -$result = $qrf->distro_sparkline( - ea => $ea, - item => 'select c from t', - attrib => 'Query_time', -); -is( - $result, - " .^_ ", - "Sparkchart line 2" -); - -# 1us ################################################################ -# 10us ################################################################ -# 100us ################################################################ -# 1ms ################################################################ -# 10ms ################################################################ -# 100ms ################################################################ -# 1s ################################################################ -# 10s+ -proc_events( - arg => 'select c from t', - attrib => 'Query_time', - vals => [qw(0.000003 0.000030 0.000300 0.003000 0.030000 0.300000 3)], -); -$result = $qrf->distro_sparkline( - ea => $ea, - item => 'select c from t', - attrib => 'Query_time', -); -is( - $result, - "^^^^^^^ ", - "Sparkchart line - vals in all ranges except 10s+" -); - - -# 1us ################################################################ -# 10us ################################################################ -# 100us -# 1ms -# 10ms -# 100ms -# 1s ################################################################ -# 10s+ ################################################################ -proc_events( - arg => 'select c from t', - attrib => 'Query_time', - vals => [qw(0.000003 0.000030 0.000003 0.000030 3 3 30 30)], -); -$result = $qrf->distro_sparkline( - ea => $ea, - item => 'select c from t', - attrib => 'Query_time', -); -is( - $result, - "^^ ^^", - "Sparkchart line - twin peaks" -); - -# Test that that ^ sparkchart appears in the event header properly. -$result = $qrf->event_report( - ea => $ea, - select => [ qw(Query_time) ], - item => 'select c from t', - rank => 1, - orderby => 'Query_time', - reason => 'top', -); -ok( - no_diff( - $result, - "t/lib/samples/QueryReportFormatter/report028.txt", - cmd_output => 1, - ), - 'Sparkchart in event header' -); - # ############################################################################ # Bug 887688: Prepared statements crash pt-query-digest # ############################################################################ @@ -1565,11 +1325,6 @@ foreach my $event ( @$events ) { $ea->aggregate($event); } $ea->calculate_statistical_metrics(); -$report = new ReportFormatter( - line_width => 82, - extend_right => 1, -); -$qrf->set_report_formatter(report=>'prepared', formatter=>$report); ok( no_diff( sub { diff --git a/t/lib/QueryReview.t b/t/lib/QueryReview.t index c5bba0c1..950953d4 100644 --- a/t/lib/QueryReview.t +++ b/t/lib/QueryReview.t @@ -161,7 +161,6 @@ my $hist_struct = $tp->parse( $qv->set_history_options( table => 'test.query_review_history', - dbh => $dbh, quoter => $q, tbl_struct => $hist_struct, col_pat => qr/^(.*?)_($pat)$/, @@ -257,7 +256,6 @@ $hist_struct = $tp->parse( $tp->get_create_table($dbh, 'test', 'query_review_history')); $qv->set_history_options( table => 'test.query_review_history', - dbh => $dbh, quoter => $q, tbl_struct => $hist_struct, col_pat => qr/^(.*?)_($pat)$/, diff --git a/t/lib/ReportFormatter.t b/t/lib/ReportFormatter.t index 744e85ad..7d69444a 100644 --- a/t/lib/ReportFormatter.t +++ b/t/lib/ReportFormatter.t @@ -88,7 +88,7 @@ is( # Basic report. # ############################################################################# $rf = new ReportFormatter(); -$rf->set_title('Checksum differences'); +$rf->title('Checksum differences'); $rf->set_columns( { name => 'Query ID', @@ -216,7 +216,7 @@ is( # Respect line width. # ############################################################################# $rf = new ReportFormatter(); -$rf->set_title('Respect line width'); +$rf->title('Respect line width'); $rf->set_columns( { name => 'col1' }, { name => 'col2' }, @@ -248,7 +248,7 @@ is( # extend_right # ############################################################################# $rf = new ReportFormatter(extend_right=>1); -$rf->set_title('extend_right'); +$rf->title('extend_right'); $rf->set_columns( { name => 'col1' }, { name => 'col2' }, @@ -280,7 +280,7 @@ is( # Relvative column widths. # ############################################################################# $rf = new ReportFormatter(); -$rf->set_title('Relative col widths'); +$rf->title('Relative col widths'); $rf->set_columns( { name => 'col1', width_pct=>'20', }, { name => 'col2', width_pct=>'40', }, @@ -309,7 +309,7 @@ is( ); $rf = new ReportFormatter(); -$rf->set_title('Relative col widths'); +$rf->title('Relative col widths'); $rf->set_columns( { name => 'col1', width_pct=>'20', }, { name => 'col2', width_pct=>'40', }, @@ -344,7 +344,7 @@ is( ); $rf = new ReportFormatter(); -$rf->set_title('Relative col widths'); +$rf->title('Relative col widths'); $rf->set_columns( { name => 'col1', width =>'25', }, { name => 'col2', width_pct=>'33', }, @@ -380,7 +380,7 @@ is( $rf = new ReportFormatter(); -$rf->set_title('Short cols'); +$rf->title('Short cols'); $rf->set_columns( { name => 'I am column1', }, { name => 'I am column2', }, @@ -403,7 +403,7 @@ is( ); $rf = new ReportFormatter(); -$rf->set_title('Short cols'); +$rf->title('Short cols'); $rf->set_columns( { name => 'I am column1', }, { name => 'I am column2', }, @@ -422,7 +422,7 @@ is( ); $rf = new ReportFormatter(); -$rf->set_title('Short cols'); +$rf->title('Short cols'); $rf->set_columns( { name => 'I am column1', }, { name => 'I am column2', }, diff --git a/t/lib/samples/QueryReportFormatter/report001.txt b/t/lib/samples/QueryReportFormatter/report001.txt index 304abd51..ceb1cfc0 100644 --- a/t/lib/samples/QueryReportFormatter/report001.txt +++ b/t/lib/samples/QueryReportFormatter/report001.txt @@ -7,8 +7,7 @@ # Lock time 1ms 1ms 1ms 1ms 1ms 0 1ms # Query 1: 0 QPS, 0x concurrency, ID 0x5796997451B1FA1D at byte 123 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -35,6 +34,6 @@ select col from tbl where id=42\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x5796997451B1FA1D 1.0007 100.0% 1 1.0007 1.00 0.00 SELECT tbl +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0x5796997451B1FA1D 1.0007 100.0% 1 1.0007 0.00 SELECT tbl diff --git a/t/lib/samples/QueryReportFormatter/report002.txt b/t/lib/samples/QueryReportFormatter/report002.txt index fbaedf8b..193d5acd 100644 --- a/t/lib/samples/QueryReportFormatter/report002.txt +++ b/t/lib/samples/QueryReportFormatter/report002.txt @@ -1,7 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x3F79759E7FA2F117 at byte 1106 _____ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-12-08 09:23:49.637892 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -31,8 +30,7 @@ EXECUTE SELECT i FROM d.t WHERE i="3"\G SELECT i FROM d.t WHERE i="3"\G # Query 2: 0 QPS, 0x concurrency, ID 0xAA8E9FA785927259 at byte 0 ________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-12-08 09:23:49.637394 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report003.txt b/t/lib/samples/QueryReportFormatter/report003.txt index 9e92376b..c87a0881 100644 --- a/t/lib/samples/QueryReportFormatter/report003.txt +++ b/t/lib/samples/QueryReportFormatter/report003.txt @@ -1,12 +1,11 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x5796997451B1FA1D 1.0007 100.0% 1 1.0007 1.00 0.00 SELECT tbl +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0x5796997451B1FA1D 1.0007 100.0% 1 1.0007 0.00 SELECT tbl # Query 1: 0 QPS, 0x concurrency, ID 0x5796997451B1FA1D at byte 123 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report004.txt b/t/lib/samples/QueryReportFormatter/report004.txt index 39e50b90..fadc6838 100644 --- a/t/lib/samples/QueryReportFormatter/report004.txt +++ b/t/lib/samples/QueryReportFormatter/report004.txt @@ -1,6 +1,6 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAECF4CA2310AC9E2 1.0303 97.1% 1 1.0303 NS 0.00 UPDATE foo -# MISC 0xMISC 0.0306 2.9% 2 0.0153 NS 0.0 <2 ITEMS> +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAECF4CA2310AC9E2 1.0303 97.1% 1 1.0303 0.00 UPDATE foo +# MISC 0xMISC 0.0306 2.9% 2 0.0153 0.0 <2 ITEMS> diff --git a/t/lib/samples/QueryReportFormatter/report005.txt b/t/lib/samples/QueryReportFormatter/report005.txt index aea3787c..502c32b9 100644 --- a/t/lib/samples/QueryReportFormatter/report005.txt +++ b/t/lib/samples/QueryReportFormatter/report005.txt @@ -1,5 +1,5 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============== ===== ====== ==== ===== ======== -# 1 0xCB5621E548E5497F 17.5000 100.0% 4 4.3750 NS 2.23 SELECT t +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============== ===== ====== ===== ======== +# 1 0xCB5621E548E5497F 17.5000 100.0% 4 4.3750 2.23 SELECT t diff --git a/t/lib/samples/QueryReportFormatter/report007.txt b/t/lib/samples/QueryReportFormatter/report007.txt index 3ce434fa..932ec60c 100644 --- a/t/lib/samples/QueryReportFormatter/report007.txt +++ b/t/lib/samples/QueryReportFormatter/report007.txt @@ -1,6 +1,6 @@ # Query 1: 2 QPS, 9.00x concurrency, ID 0x82860EDA9A88FCC5 at byte 1 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 5.44 +# Scores: V/M = 5.44 # Time range: 2007-10-15 21:43:52 to 21:43:53 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report009.txt b/t/lib/samples/QueryReportFormatter/report009.txt index dcd22b37..e208c406 100644 --- a/t/lib/samples/QueryReportFormatter/report009.txt +++ b/t/lib/samples/QueryReportFormatter/report009.txt @@ -1,6 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x5D51E5F01B88B79E at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-04-12 11:00:13.118191 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report010.txt b/t/lib/samples/QueryReportFormatter/report010.txt index 6dcbd45c..319343f5 100644 --- a/t/lib/samples/QueryReportFormatter/report010.txt +++ b/t/lib/samples/QueryReportFormatter/report010.txt @@ -1,6 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x82860EDA9A88FCC5 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = NS [0.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report011.txt b/t/lib/samples/QueryReportFormatter/report011.txt index 42508fc8..02d54b0c 100644 --- a/t/lib/samples/QueryReportFormatter/report011.txt +++ b/t/lib/samples/QueryReportFormatter/report011.txt @@ -1,6 +1,6 @@ # Query 1: 0.67 QPS, 1x concurrency, ID 0x82860EDA9A88FCC5 at byte 0 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = NS [0.0]*, V/M = 0.33 +# Scores: V/M = 0.33 # Time range: 2007-10-15 21:43:52 to 21:43:55 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report012.txt b/t/lib/samples/QueryReportFormatter/report012.txt index 31943a57..330f8d68 100644 --- a/t/lib/samples/QueryReportFormatter/report012.txt +++ b/t/lib/samples/QueryReportFormatter/report012.txt @@ -1,6 +1,6 @@ # Query 1: 1 QPS, 2x concurrency, ID 0x82860EDA9A88FCC5 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = NS [0.0]*, V/M = 0.30 +# Scores: V/M = 0.30 # Time range: 2007-10-15 21:43:52 to 21:43:55 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report013.txt b/t/lib/samples/QueryReportFormatter/report013.txt index 1ef0919d..3ff99d58 100644 --- a/t/lib/samples/QueryReportFormatter/report013.txt +++ b/t/lib/samples/QueryReportFormatter/report013.txt @@ -1,5 +1,5 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xEDEF654FCCC4A4D8 at byte 0 _________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 100 2 diff --git a/t/lib/samples/QueryReportFormatter/report014.txt b/t/lib/samples/QueryReportFormatter/report014.txt index b0fc35ed..782b0010 100644 --- a/t/lib/samples/QueryReportFormatter/report014.txt +++ b/t/lib/samples/QueryReportFormatter/report014.txt @@ -1,5 +1,5 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xEDEF654FCCC4A4D8 at byte 0 _________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 100 3 diff --git a/t/lib/samples/QueryReportFormatter/report015.txt b/t/lib/samples/QueryReportFormatter/report015.txt index 04949587..382a228c 100644 --- a/t/lib/samples/QueryReportFormatter/report015.txt +++ b/t/lib/samples/QueryReportFormatter/report015.txt @@ -1,6 +1,5 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xEDEF654FCCC4A4D8 at byte 0 _________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 100 3 diff --git a/t/lib/samples/QueryReportFormatter/report016.txt b/t/lib/samples/QueryReportFormatter/report016.txt index b443426a..64434e38 100644 --- a/t/lib/samples/QueryReportFormatter/report016.txt +++ b/t/lib/samples/QueryReportFormatter/report016.txt @@ -1,6 +1,5 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xEDEF654FCCC4A4D8 at byte 0 _________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 100 1 diff --git a/t/lib/samples/QueryReportFormatter/report024.txt b/t/lib/samples/QueryReportFormatter/report024.txt index da2a0d3d..2b1b7d80 100644 --- a/t/lib/samples/QueryReportFormatter/report024.txt +++ b/t/lib/samples/QueryReportFormatter/report024.txt @@ -1,5 +1,5 @@ # Query 0: 0 QPS, 0x concurrency, ID 0x82860EDA9A88FCC5 at byte 0 ________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report027.txt b/t/lib/samples/QueryReportFormatter/report027.txt deleted file mode 100644 index fb3bc4ba..00000000 --- a/t/lib/samples/QueryReportFormatter/report027.txt +++ /dev/null @@ -1,58 +0,0 @@ - -# Profile -# Rank Query ID Response time Calls R/Call Apdx V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ==== ===== ======= ========= -# 1 0x46F81B022F1AD76B 0.0003 100.0% 1 0.0003 NS 0.00 TF>aI SELECT t -# MISC 0xMISC 0.0003 100.0% 1 0.0003 NS 0.0 MISC <1 ITEMS> - -# Query 1: 0 QPS, 0x concurrency, ID 0x46F81B022F1AD76B at byte 0 ________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 -# EXPLAIN sparkline: TF>aI -# Query_time sparkline: | ^ | -# Time range: all events occurred at 2009-12-08 09:23:49.637394 -# Attribute pct total min max avg 95% stddev median -# ============ === ======= ======= ======= ======= ======= ======= ======= -# Count 100 1 -# Exec time 100 286us 286us 286us 286us 286us 0 286us -# Query size 100 90 90 90 90 90 0 90 -# String: -# cmd Query -# Databases qrf -# Query_time distribution -# 1us -# 10us -# 100us ################################################################ -# 1ms -# 10ms -# 100ms -# 1s -# 10s+ -# Tables -# SHOW TABLE STATUS FROM `qrf` LIKE 't'\G -# SHOW CREATE TABLE `qrf`.`t`\G -# EXPLAIN /*!50100 PARTITIONS*/ -select t1.i from t as t1 join t as t2 where t1.i < t2.i and t1.v is not null order by t1.i\G -# *************************** 1. row *************************** -# id: 1 -# select_type: SIMPLE -# table: t1 -# partitions: NULL -# type: ALL -# possible_keys: PRIMARY -# key: NULL -# key_len: NULL -# ref: NULL -# rows: 4 -# Extra: Using where; Using temporary; Using filesort -# *************************** 2. row *************************** -# id: 1 -# select_type: SIMPLE -# table: t2 -# partitions: NULL -# type: index -# possible_keys: PRIMARY -# key: PRIMARY -# key_len: 4 -# ref: NULL -# rows: 4 -# Extra: Using where; Using index; Using join buffer diff --git a/t/lib/samples/QueryReportFormatter/report028.txt b/t/lib/samples/QueryReportFormatter/report028.txt index 3640f6c1..eeba3c53 100644 --- a/t/lib/samples/QueryReportFormatter/report028.txt +++ b/t/lib/samples/QueryReportFormatter/report028.txt @@ -1,6 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xFDE00DF974C61E9F at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.62 [1.0]*, V/M = 17.71 +# Scores: V/M = 17.71 # Query_time sparkline: |^^ ^^| # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report029.txt b/t/lib/samples/QueryReportFormatter/report029.txt deleted file mode 100644 index 08605a95..00000000 --- a/t/lib/samples/QueryReportFormatter/report029.txt +++ /dev/null @@ -1,56 +0,0 @@ - -# Profile -# Rank Query ID Response time Calls R/Call Apdx V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ==== ===== ======= ========= -# 1 0x46F81B022F1AD76B 0.0003 100.0% 1 0.0003 NS 0.00 TF>aI SELECT t -# MISC 0xMISC 0.0003 100.0% 1 0.0003 NS 0.0 MISC <1 ITEMS> - -# Query 1: 0 QPS, 0x concurrency, ID 0x46F81B022F1AD76B at byte 0 ________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 -# EXPLAIN sparkline: TF>aI -# Query_time sparkline: | ^ | -# Time range: all events occurred at 2009-12-08 09:23:49.637394 -# Attribute pct total min max avg 95% stddev median -# ============ === ======= ======= ======= ======= ======= ======= ======= -# Count 100 1 -# Exec time 100 286us 286us 286us 286us 286us 0 286us -# Query size 100 90 90 90 90 90 0 90 -# String: -# cmd Query -# Databases qrf -# Query_time distribution -# 1us -# 10us -# 100us ################################################################ -# 1ms -# 10ms -# 100ms -# 1s -# 10s+ -# Tables -# SHOW TABLE STATUS FROM `qrf` LIKE 't'\G -# SHOW CREATE TABLE `qrf`.`t`\G -# EXPLAIN /*!50100 PARTITIONS*/ -select t1.i from t as t1 join t as t2 where t1.i < t2.i and t1.v is not null order by t1.i\G -# *************************** 1. row *************************** -# id: 1 -# select_type: SIMPLE -# table: t1 -# type: ALL -# possible_keys: PRIMARY -# key: NULL -# key_len: NULL -# ref: NULL -# rows: 4 -# Extra: Using where; Using temporary; Using filesort -# *************************** 2. row *************************** -# id: 1 -# select_type: SIMPLE -# table: t2 -# type: index -# possible_keys: PRIMARY -# key: PRIMARY -# key_len: 4 -# ref: NULL -# rows: 4 -# Extra: Using where; Using index diff --git a/t/lib/samples/QueryReportFormatter/report032.txt b/t/lib/samples/QueryReportFormatter/report032.txt deleted file mode 100644 index d792a3b2..00000000 --- a/t/lib/samples/QueryReportFormatter/report032.txt +++ /dev/null @@ -1,58 +0,0 @@ - -# Profile -# Rank Query ID Response time Calls R/Call Apdx V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ==== ===== ======= ========= -# 1 0x46F81B022F1AD76B 0.0003 100.0% 1 0.0003 NS 0.00 ia SELECT t -# MISC 0xMISC 0.0003 100.0% 1 0.0003 NS 0.0 MISC <1 ITEMS> - -# Query 1: 0 QPS, 0x concurrency, ID 0x46F81B022F1AD76B at byte 0 ________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 -# EXPLAIN sparkline: ia -# Query_time sparkline: | ^ | -# Time range: all events occurred at 2009-12-08 09:23:49.637394 -# Attribute pct total min max avg 95% stddev median -# ============ === ======= ======= ======= ======= ======= ======= ======= -# Count 100 1 -# Exec time 100 286us 286us 286us 286us 286us 0 286us -# Query size 100 90 90 90 90 90 0 90 -# String: -# cmd Query -# Databases qrf -# Query_time distribution -# 1us -# 10us -# 100us ################################################################ -# 1ms -# 10ms -# 100ms -# 1s -# 10s+ -# Tables -# SHOW TABLE STATUS FROM `qrf` LIKE 't'\G -# SHOW CREATE TABLE `qrf`.`t`\G -# EXPLAIN /*!50100 PARTITIONS*/ -select t1.i from t as t1 join t as t2 where t1.i < t2.i and t1.v is not null order by t1.i\G -# *************************** 1. row *************************** -# id: 1 -# select_type: SIMPLE -# table: t1 -# partitions: NULL -# type: index -# possible_keys: PRIMARY -# key: PRIMARY -# key_len: 4 -# ref: NULL -# rows: 4 -# Extra: Using where -# *************************** 2. row *************************** -# id: 1 -# select_type: SIMPLE -# table: t2 -# partitions: NULL -# type: ALL -# possible_keys: PRIMARY -# key: NULL -# key_len: NULL -# ref: NULL -# rows: 4 -# Extra: Range checked for each record (index map: 0x1) diff --git a/t/pt-query-digest/execute.t b/t/pt-query-digest/execute.t deleted file mode 100644 index 75b51203..00000000 --- a/t/pt-query-digest/execute.t +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env perl - -BEGIN { - die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" - unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; - unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; -}; - -use strict; -use warnings FATAL => 'all'; -use English qw(-no_match_vars); -use Test::More; - -use Sandbox; -use PerconaTest; - -require "$trunk/bin/pt-query-digest"; - -my $dp = new DSNParser(opts=>$dsn_opts); -my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); -my $dbh = $sb->get_dbh_for('master'); - -if ( !$dbh ) { - plan skip_all => 'Cannot connect to sandbox master'; -} -else { - plan tests => 6; -} - -my $output = ''; -my $cnf = 'h=127.1,P=12345,u=msandbox,p=msandbox'; -my @args = qw(--report-format=query_report --limit 10 --stat); - -$sb->create_dbs($dbh, [qw(test)]); -$dbh->do('use test'); -$dbh->do('create table foo (a int, b int, c int)'); - -is_deeply( - $dbh->selectall_arrayref('select * from test.foo'), - [], - 'No rows in table yet' -); - -ok( - no_diff( - sub { pt_query_digest::main(@args, '--execute', $cnf, - "$trunk/t/lib/samples/slowlogs/slow018.txt") }, - 't/pt-query-digest/samples/slow018_execute_report_1.txt', - ), - '--execute without database' -); - -is_deeply( - $dbh->selectall_arrayref('select * from test.foo'), - [], - 'Still no rows in table' -); - -# Provide a default db to make --execute work. -$cnf .= ',D=test'; - -# TODO: This test is a PITA because every time the mqd output -# changes the -n of tail has to be adjusted. - -# - -# We tail to get everything from "Exec orig" onward. The lines -# above have the real execution time will will vary. The last 18 lines -# are sufficient to see that it actually executed without errors. -ok( - no_diff( - sub { pt_query_digest::main(@args, '--execute', $cnf, - "$trunk/t/lib/samples/slowlogs/slow018.txt") }, - 't/pt-query-digest/samples/slow018_execute_report_2.txt', - trf => 'tail -n 30', - sed => ["-e 's/s ##*/s/g'"], - ), - '--execute with default database' -); - -is_deeply( - $dbh->selectall_arrayref('select * from test.foo'), - [[qw(1 2 3)],[qw(4 5 6)]], - 'Rows in table' -); - -# ############################################################################# -# Done. -# ############################################################################# -$sb->wipe_clean($dbh); -ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox"); -exit; diff --git a/t/pt-query-digest/issue_360.t b/t/pt-query-digest/issue_360.t index 65069d3b..b3a1effc 100644 --- a/t/pt-query-digest/issue_360.t +++ b/t/pt-query-digest/issue_360.t @@ -34,11 +34,11 @@ else { my $pid_file = "/tmp/pt-query-digest-test-issue_360.t.$PID"; # Need a clean query review table. -$sb->create_dbs($dbh, [qw(test)]); +$sb->create_dbs($dbh, [qw(test percona_schema)]); # Run pt-query-digest in the background for 2s, # saving queries to test.query_review. -diag(`$trunk/bin/pt-query-digest --processlist h=127.1,P=12345,u=msandbox,p=msandbox --interval 0.01 --create-review-table --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review --daemonize --pid $pid_file --log /dev/null --run-time 2`); +diag(`$trunk/bin/pt-query-digest --processlist h=127.1,P=12345,u=msandbox,p=msandbox --interval 0.01 --create-review-table --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review --daemonize --pid $pid_file --log /dev/null --run-time 2`); # Wait until its running. PerconaTest::wait_for_files($pid_file); diff --git a/t/pt-query-digest/mirror.t b/t/pt-query-digest/mirror.t deleted file mode 100644 index 27e77aae..00000000 --- a/t/pt-query-digest/mirror.t +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env perl - -BEGIN { - die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" - unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; - unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; -}; - -use strict; -use warnings FATAL => 'all'; -use English qw(-no_match_vars); -use Test::More; -use Time::HiRes qw(sleep); - -use PerconaTest; -use DSNParser; -use Sandbox; - -my $dp = new DSNParser(opts=>$dsn_opts); -my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); -my $dbh1 = $sb->get_dbh_for('master'); -my $dbh2 = $sb->get_dbh_for('slave1'); - -if ( !$dbh1 ) { - plan skip_all => 'Cannot connect to sandbox master'; -} -elsif ( !$dbh2 ) { - plan skip_all => 'Cannot connect to sandbox slave'; -} -else { - plan tests => 5; -} - -my $output; -my $cmd; -my $pid_file = "/tmp/pt-query-digest-mirror-test.pid"; -diag(`rm $pid_file 2>/dev/null`); - -# ########################################################################## -# Tests for swapping --processlist and --execute -# ########################################################################## -$dbh1->do('set global read_only=0'); -$dbh2->do('set global read_only=1'); -$cmd = "$trunk/bin/pt-query-digest " - . "--processlist h=127.1,P=12345,u=msandbox,p=msandbox " - . "--execute h=127.1,P=12346,u=msandbox,p=msandbox --mirror 1 " - . "--pid $pid_file"; - -{ - local $ENV{PTDEBUG}=1; - `$cmd > /tmp/read_only.txt 2>&1 &`; -} - -$dbh1->do('select sleep(1)'); -$dbh1->do('set global read_only=1'); -$dbh2->do('set global read_only=0'); -$dbh1->do('select sleep(1)'); - -PerconaTest::wait_for_files($pid_file); -chomp(my $pid = `cat $pid_file`); -kill 15, $pid; -sleep 0.25; - -# Verify that it's dead... -$output = `ps x | grep '^[ ]*$pid'`; -is( - $output, - '', - 'It is stopped now' -); - -$output = `ps -p $pid`; -unlike($output, qr/pt-query-digest/, 'It is stopped now'); - -$output = `grep read_only /tmp/read_only.txt`; -# Sample output: -# # main:3619 6897 read_only on execute for --execute: 1 (want 1) -# # main:3619 6897 read_only on processlist for --processlist: 0 (want 0) -# # main:3619 6897 read_only on processlist for --processlist: 0 (want 0) -# # main:3619 6897 read_only on processlist for --processlist: 0 (want 0) -# # main:3619 6897 read_only on processlist for --processlist: 0 (want 0) -# # main:3619 6897 read_only on processlist for --processlist: 0 (want 0) -# # main:3619 6897 read_only on processlist for --processlist: 0 (want 0) -# # main:3619 6897 read_only on execute for --execute: 0 (want 1) -# # main:3622 6897 read_only wrong for --execute getting a dbh from processlist -# # main:3619 6897 read_only on processlist for --processlist: 1 (want 0) -# # main:3622 6897 read_only wrong for --processlist getting a dbh from execute -# # main:3619 6897 read_only on processlist for --execute: 1 (want 1) -# # main:3619 6897 read_only on execute for --processlist: 0 (want 0) -like($output, qr/wrong for --execute getting a dbh from processlist/, - 'switching --processlist works'); -like($output, qr/wrong for --processlist getting a dbh from execute/, - 'switching --execute works'); - -diag(`rm -rf /tmp/read_only.txt`); - -# ############################################################################# -# Done. -# ############################################################################# -diag(`rm $pid_file 2>/dev/null`); -$dbh1->do('set global read_only=0'); -$dbh2->do('set global read_only=1'); -$sb->wipe_clean($dbh1); -ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox"); -exit; diff --git a/t/pt-query-digest/option_sanity.t b/t/pt-query-digest/option_sanity.t index 5e4cf18a..1e177a62 100644 --- a/t/pt-query-digest/option_sanity.t +++ b/t/pt-query-digest/option_sanity.t @@ -13,14 +13,33 @@ use Test::More; use PerconaTest; +my $cmd = "$trunk/bin/pt-query-digest"; +my $help = qx{$cmd --help}; + +my $output; + # ############################################################################# # Test cmd line op sanity. # ############################################################################# -my $output = `$trunk/bin/pt-query-digest --review h=127.1,P=12345,u=msandbox,p=msandbox`; -like($output, qr/--review DSN requires a D/, 'Dies if no D part in --review DSN'); +for my $opt (qw(review-table history-table)) { + $output = `$cmd --review h=127.1,P=12345,u=msandbox,p=msandbox --$opt test`; + like($output, qr/--$opt should be passed a/, "Dies if no database part in --$opt"); +} -$output = `$trunk/bin/pt-query-digest --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test`; -like($output, qr/--review DSN requires a D/, 'Dies if no t part in --review DSN'); +$output = `$cmd --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=test`; +like($output, qr/--review does not accept a t option/, 'Dies if t part in --review DSN'); + +like( + $help, + qr/review-table\s+\Qpercona_schema.query_review\E/, + "--review-table has a sane default" +); + +like( + $help, + qr/history-table\s+\Qpercona_schema.query_history\E/, + "--history-table has a sane default" +); # ############################################################################# # https://bugs.launchpad.net/percona-toolkit/+bug/885382 @@ -34,43 +53,59 @@ my @options = qw( --group-by file ); -$output = `$trunk/bin/pt-query-digest @options --embedded-attributes '-- .*' $sample.slow010.txt`; +$output = `$cmd @options --embedded-attributes '-- .*' $sample.slow010.txt`; like $output, qr/\Q--embedded-attributes should be passed two comma-separated patterns, got 1/, 'Bug 885382: --embedded-attributes cardinality'; -$output = `$trunk/bin/pt-query-digest @options --embedded-attributes '-- .*,(?{1234})' $sample.slow010.txt`; +$output = `$cmd @options --embedded-attributes '-- .*,(?{1234})' $sample.slow010.txt`; like $output, qr/\Q--embedded-attributes Eval-group /, "Bug 885382: --embedded-attributes rejects invalid patterns early"; -$output = `$trunk/bin/pt-query-digest @options --embedded-attributes '-- .*,(?*asdasd' $sample.slow010.txt`; +$output = `$cmd @options --embedded-attributes '-- .*,(?*asdasd' $sample.slow010.txt`; like $output, qr/\Q--embedded-attributes Sequence (?*...) not recognized/, "Bug 885382: --embedded-attributes rejects invalid patterns early"; -$output = `$trunk/bin/pt-query-digest @options --embedded-attributes '-- .*,[:alpha:]' $sample.slow010.txt`; +$output = `$cmd @options --embedded-attributes '-- .*,[:alpha:]' $sample.slow010.txt`; like $output, qr/\Q--embedded-attributes POSIX syntax [: :] belongs inside character/, "Bug 885382: --embedded-attributes rejects warning patterns early";; + +# We removed --statistics, but they should still print out if we use PTDEBUG. + +$output = qx{PTDEBUG=1 $cmd --no-report ${sample}slow002.txt 2>&1}; +my $stats = slurp_file("t/pt-query-digest/samples/stats-slow002.txt"); + +like( + $output, + qr/\Q$stats\E/m, + 'PTDEBUG shows --statistics for slow002.txt', +); + +like( + $output, + qr/Pipeline profile/m, + 'PTDEBUG shows --pipeline-profile' +); + # ############################################################################# # pt-query-digest help output mangled # https://bugs.launchpad.net/percona-toolkit/+bug/831525 # ############################################################################# -$output = `$trunk/bin/pt-query-digest --help`; - like( - $output, + $help, qr/\Q--report-format=A\E\s* \QPrint these sections of the query analysis\E\s* - \Qreport (default rusage,date,hostname,files,\E\s* - \Qheader,profile,query_report,prepared)\E/x, + \Qreport (default rusage\E,\s*date,\s*hostname,\s*files,\s* + header,\s*profile,\s*query_report,\s*prepared\)/x, "Bug 831525: pt-query-digest help output mangled" ); diff --git a/t/pt-query-digest/output.t b/t/pt-query-digest/output.t new file mode 100644 index 00000000..f61bb572 --- /dev/null +++ b/t/pt-query-digest/output.t @@ -0,0 +1,55 @@ +#!/usr/bin/env perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; + +use PerconaTest; +require "$trunk/bin/pt-query-digest"; + +no warnings 'once'; +local $JSONReportFormatter::sorted_json = 1; +local $JSONReportFormatter::pretty_json = 1; + +my @args = qw(--output json); +my $sample = "$trunk/t/lib/samples"; +my $results = "t/pt-query-digest/samples"; + +ok( + no_diff( + sub { pt_query_digest::main(@args, "$sample/slowlogs/empty") }, + "$results/empty_report.txt", + ), + 'json output for empty log' +); + +ok( + no_diff( + sub { pt_query_digest::main(@args, "$sample/slowlogs/slow002.txt") }, + "$results/output_json_slow002.txt" + ), + 'json output for slow002' +); + +# --type tcpdump + +ok( + no_diff( + sub { pt_query_digest::main(qw(--type tcpdump --limit 10 --watch-server 127.0.0.1:12345), + @args, "$sample/tcpdump/tcpdump021.txt") }, + "$results/output_json_tcpdump021.txt", + ), + 'json output for for tcpdump021', +); + +# ############################################################################# +# Done. +# ############################################################################# +done_testing; diff --git a/t/pt-query-digest/resume.t b/t/pt-query-digest/resume.t new file mode 100644 index 00000000..ccc2ca65 --- /dev/null +++ b/t/pt-query-digest/resume.t @@ -0,0 +1,163 @@ +#!/usr/bin/env perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; + +use IO::File; +use Fcntl qw(:seek); +use File::Temp qw(tempfile); + +use PerconaTest; +use Sandbox; +require "$trunk/bin/pt-query-digest"; + +my $dp = new DSNParser(opts=>$dsn_opts); +my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); +my $dbh = $sb->get_dbh_for('master'); + +if ( !$dbh ) { + plan skip_all => 'Cannot connect to sandbox master'; +} + +my $samples = "$trunk/t/lib/samples/slowlogs"; +my $output; + +$sb->create_dbs($dbh, ['test']); + +my $resume_file = (tempfile())[1]; + +my ($fh, $filename) = tempfile(UNLINK => 1); +$fh->autoflush(1); + +sub resume_offset_ok { + my ($resume_file, $file, $msg) = @_; + chomp(my $offset = slurp_file($resume_file)); + open my $tmp_fh, q{<}, $file or die $OS_ERROR; + seek $tmp_fh, 0, SEEK_END; + is(tell($tmp_fh), $offset, $msg); +} + +sub run_pqd { + my @extra_args = @_; + my $run = output(sub { pt_query_digest::main(qw(--limit 10), @extra_args, $filename) }, stderr => 1); + $run =~ s/\d+ms user time.+//; + $run =~ s/Current date: .+//; + return $run; +} + +print { $fh } slurp_file("$samples/slow006.txt"); + +my @runs; +push @runs, run_pqd() for 1, 2; + +is($runs[0], $runs[1], "Sanity check: Behaves the same between runs without --resume"); + +my @resume_runs; +push @resume_runs, run_pqd('--resume', $resume_file) for 1, 2; + +(my $without_resume_line = $resume_runs[0]) =~ s/\n\n. Saved resume file offset.+//; +is( + $runs[0], + $without_resume_line, + "First time with --resume just like the first time without" +); + +like( + $resume_runs[0], + qr/\QSaved resume file offset\E/, + "SAves offset with --resume" +); + +like( + $resume_runs[1], + qr/\QNo events processed.\E/, + "..and there are no events on the second run" +); + +resume_offset_ok($resume_file, $filename, "The resume file has the correct offset"); + +print { $fh } slurp_file("$samples/slow002.txt"); + +push @resume_runs, run_pqd('--resume', $resume_file) for 1, 2; + +unlike( + $resume_runs[2], + qr/\QNo events processed.\E/, + "New run detects new events" +); + +like( + $resume_runs[3], + qr/\QNo events processed.\E/, + "And running again after that finds nothing new" +); + +resume_offset_ok($resume_file, $filename, "The resume file has the updated offset"); + +unlink($resume_file); + +close $fh; + +# ############################################################################# +# Now test the itneraction with --run-time-mode interval +# ############################################################################# + +($fh, $filename) = tempfile(UNLINK => 1); +$fh->autoflush(1); + +print { $fh } slurp_file("$trunk/t/lib/samples/slowlogs/slow033.txt"); + +my @run_args = (qw(--run-time-mode interval --run-time 1d --iterations 0), + qw(--report-format query_report)); +my @resume_args = (@run_args, '--resume', $resume_file); + +my @run_time; +push @run_time, run_pqd(@resume_args) for 1,2; + +resume_offset_ok($resume_file, $filename, "The resume file has the correct offset when using --run-time-mode interval"); + +print { $fh } slurp_file("$samples/slow002.txt"); + +push @run_time, run_pqd(@resume_args) for 1,2; + +resume_offset_ok($resume_file, $filename, "...and it updates correctly"); + +like( + $_, + qr/\QNo events processed.\E/, + "Runs 2 & 4 find no new data" +) for @run_time[1, 3]; + +# This shows up in the first report, but shouldn't show up in there +# third run, after we add new events to the file. +my $re = qr/\QSELECT * FROM foo\E/; + +unlike( + $run_time[2], + $re, + "Events from the first run are correctly ignored" +); + +my $no_resume = run_pqd(@run_args); + +like( + $no_resume, + $re, + "...but do show up if run without resume" +); + +# ############################################################################# +# Done. +# ############################################################################# +$sb->wipe_clean($dbh); +ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox"); +done_testing; +exit; diff --git a/t/pt-query-digest/review.t b/t/pt-query-digest/review.t index bf980c3d..8ad93f76 100644 --- a/t/pt-query-digest/review.t +++ b/t/pt-query-digest/review.t @@ -22,9 +22,6 @@ my $dbh = $sb->get_dbh_for('master'); if ( !$dbh ) { plan skip_all => 'Cannot connect to sandbox master'; } -else { - plan tests => 18; -} sub normalize_numbers { use Scalar::Util qw(looks_like_number); @@ -43,21 +40,21 @@ $sb->load_file('master', 't/pt-query-digest/samples/query_review.sql'); # Test --create-review and --create-review-history-table $output = 'foo'; # clear previous test results -$cmd = "${run_with}slow006.txt --create-review-table --review " - . "h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review --create-review-history-table " - . "--review-history t=query_review_history"; +$cmd = "${run_with}slow006.txt --create-review-tables --review " + . "h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review " + . "--history-table test.query_review_history"; $output = `$cmd >/dev/null 2>&1`; my ($table) = $dbh->selectrow_array( "show tables from test like 'query_review'"); -is($table, 'query_review', '--create-review'); +is($table, 'query_review', '--create-review-tables'); ($table) = $dbh->selectrow_array( "show tables from test like 'query_review_history'"); -is($table, 'query_review_history', '--create-review-history-table'); +is($table, 'query_review_history', '--create-review-tables'); $output = 'foo'; # clear previous test results -$cmd = "${run_with}slow006.txt --review h=127.1,u=msandbox,p=msandbox,P=12345,D=test,t=query_review " - . "--review-history t=query_review_history"; +$cmd = "${run_with}slow006.txt --review h=127.1,u=msandbox,p=msandbox,P=12345 --review-table test.query_review " + . "--history-table test.query_review_history"; $output = `$cmd`; my $res = $dbh->selectall_arrayref( 'SELECT * FROM test.query_review', { Slice => {} } ); @@ -181,17 +178,21 @@ is_deeply( # have been reviewed, the report should include both of them with # their respective query review info added to the report. ok( - no_diff($run_with.'slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review', "t/pt-query-digest/samples/slow006_AR_1.txt"), + no_diff($run_with.'slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review --create-review-tables', "t/pt-query-digest/samples/slow006_AR_1.txt"), 'Analyze-review pass 1 reports not-reviewed queries' ); +($table) = $dbh->selectrow_array( + "show tables from percona_schema like 'query_history'"); +is($table, 'query_history', '--create-review-tables creates both percona_schema and query_review_history'); + # Mark a query as reviewed and run --report again and that query should # not be reported. $dbh->do('UPDATE test.query_review SET reviewed_by="daniel", reviewed_on="2008-12-24 12:00:00", comments="foo_tbl is ok, so are cranberries" WHERE checksum=11676753765851784517'); ok( - no_diff($run_with.'slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review', "t/pt-query-digest/samples/slow006_AR_2.txt"), + no_diff($run_with.'slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review', "t/pt-query-digest/samples/slow006_AR_2.txt"), 'Analyze-review pass 2 does not report the reviewed query' ); @@ -199,7 +200,7 @@ ok( # to re-appear in the report with the reviewed_by, reviewed_on and comments # info included. ok( - no_diff($run_with.'slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review --report-all', "t/pt-query-digest/samples/slow006_AR_4.txt"), + no_diff($run_with.'slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review --report-all', "t/pt-query-digest/samples/slow006_AR_4.txt"), 'Analyze-review pass 4 with --report-all reports reviewed query' ); @@ -208,7 +209,7 @@ $dbh->do('ALTER TABLE test.query_review ADD COLUMN foo INT'); $dbh->do('UPDATE test.query_review SET foo=42 WHERE checksum=15334040482108055940'); ok( - no_diff($run_with.'slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review', "t/pt-query-digest/samples/slow006_AR_5.txt"), + no_diff($run_with.'slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review', "t/pt-query-digest/samples/slow006_AR_5.txt"), 'Analyze-review pass 5 reports new review info column' ); @@ -217,7 +218,7 @@ ok( $dbh->do("update test.query_review set first_seen='0000-00-00 00:00:00', " . " last_seen='0000-00-00 00:00:00'"); $output = 'foo'; # clear previous test results -$cmd = "${run_with}slow022.txt --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review"; +$cmd = "${run_with}slow022.txt --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review"; $output = `$cmd`; unlike($output, qr/last_seen/, 'no last_seen when 0000 timestamp'); unlike($output, qr/first_seen/, 'no first_seen when 0000 timestamp'); @@ -231,7 +232,7 @@ unlike($output, qr/0000-00-00 00:00:00/, 'no 0000-00-00 00:00:00 timestamp'); # Make sure a missing Time property does not cause a crash. Don't test data # in table, because it varies based on when you run the test. $output = 'foo'; # clear previous test results -$cmd = "${run_with}slow021.txt --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review"; +$cmd = "${run_with}slow021.txt --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review"; $output = `$cmd`; unlike($output, qr/Use of uninitialized value/, 'didnt crash due to undef ts'); @@ -239,7 +240,7 @@ unlike($output, qr/Use of uninitialized value/, 'didnt crash due to undef ts'); # crash. Don't test data in table, because it varies based on when you run # the test. $output = 'foo'; # clear previous test results -$cmd = "${run_with}slow022.txt --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review"; +$cmd = "${run_with}slow022.txt --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review"; $output = `$cmd`; # Don't test data in table, because it varies based on when you run the test. unlike($output, qr/Use of uninitialized value/, 'no crash due to totally missing ts'); @@ -248,7 +249,7 @@ unlike($output, qr/Use of uninitialized value/, 'no crash due to totally missing # --review --no-report # ############################################################################# $sb->load_file('master', 't/pt-query-digest/samples/query_review.sql'); -$output = `${run_with}slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review --no-report --create-review-table`; +$output = `${run_with}slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review --no-report --create-review-table`; $res = $dbh->selectall_arrayref('SELECT * FROM test.query_review'); is( $res->[0]->[1], @@ -268,7 +269,7 @@ is( $dbh->do('truncate table test.query_review'); $dbh->do('truncate table test.query_review_history'); -`${run_with}slow002.txt --review h=127.1,u=msandbox,p=msandbox,P=12345,D=test,t=query_review --review-history t=query_review_history --no-report --filter '\$event->{arg} =~ m/foo\.bar/' > /dev/null`; +`${run_with}slow002.txt --review h=127.1,u=msandbox,p=msandbox,P=12345 --review-table test.query_review --history-table test.query_review_history --no-report --filter '\$event->{arg} =~ m/foo\.bar/' > /dev/null`; $res = $dbh->selectall_arrayref( 'SELECT * FROM test.query_review_history', { Slice => {} } ); @@ -396,8 +397,9 @@ $dbh->do($min_tbl); $output = output( sub { pt_query_digest::main( - '--review', 'h=127.1,u=msandbox,p=msandbox,P=12345,D=test,t=query_review', - '--review-history', 't=query_review_history', + '--review', 'h=127.1,u=msandbox,p=msandbox,P=12345', + '--review-table', 'test.query_review', + '--history-table', 'test.query_review_history', qw(--no-report --no-continue-on-error), "$trunk/t/lib/samples/slow002.txt") }, @@ -415,4 +417,5 @@ unlike( # ############################################################################# $sb->wipe_clean($dbh); ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox"); +done_testing; exit; diff --git a/t/pt-query-digest/samples/binlog001.txt b/t/pt-query-digest/samples/binlog001.txt index b6acca7b..d4ac8514 100644 --- a/t/pt-query-digest/samples/binlog001.txt +++ b/t/pt-query-digest/samples/binlog001.txt @@ -15,8 +15,7 @@ # error code 0 0 0 0 0 0 0 # Query 1: 0 QPS, 0x concurrency, ID 0xCD948EAF18BC614E at byte 953 ______ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^| +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-07 12:02:08 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -52,8 +51,7 @@ replace into test4.tbl9(tbl5, day, todo, comment) and o.col3 >= date_sub(current_date, interval 30 day)\G # Query 2: 0 QPS, 0x concurrency, ID 0xC356FD9EFD7D799E at byte 605 ______ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^| +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-07 12:02:07 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -89,8 +87,7 @@ select e.tblo = o.tblo, inner join test3.tbl2 as e on o.animal = e.animal and o.oid = e.oid where e.tblo is null\G # Query 3: 0 QPS, 0x concurrency, ID 0xB5E55291C7DE1096 at byte 1469 _____ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^| +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-07 12:02:50 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -126,8 +123,7 @@ select o.tbl2 = e.tbl2, on o.animal = e.animal and o.oid = e.oid where o.tbl2 is null\G # Query 4: 0 QPS, 0x concurrency, ID 0x85FFF5AA78E5FF6A at byte 146 ______ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^| +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-07 12:02:50 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -156,8 +152,7 @@ select o.tbl2 = e.tbl2, BEGIN\G # Query 5: 0 QPS, 0x concurrency, ID 0xED69B13F3D0161D0 at byte 2479 _____ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^| +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-07 12:02:53 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -191,8 +186,7 @@ select last2metric1 = last1metric1, last2time = last1time, last0metric1 = ondeckmetric1, last0time = now() from test2.tbl8 where tbl8 in (10800712)\G # Query 6: 0 QPS, 0x concurrency, ID 0x79BFEA84D0CED05F at byte 1889 _____ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^| +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-07 12:02:53 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -222,11 +216,11 @@ insert into test1.tbl6 metric12 = metric12 + values(metric12), secs = secs + values(secs)\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Ite -# ==== ================== ================ ===== ========== ==== ===== === -# 1 0xCD948EAF18BC614E 20704.0000 16.7% 1 20704.0000 0.00 0.00 REPLACE SELECT test?.tbl? test?.tblo test?.tbl? -# 2 0xC356FD9EFD7D799E 20675.0000 16.7% 1 20675.0000 0.00 0.00 UPDATE test?.tblo test?.tbl? -# 3 0xB5E55291C7DE1096 20664.0000 16.7% 1 20664.0000 0.00 0.00 UPDATE test?.tblo test?.tbl? -# 4 0x85FFF5AA78E5FF6A 20664.0000 16.7% 1 20664.0000 0.00 0.00 BEGIN -# 5 0xED69B13F3D0161D0 20661.0000 16.7% 1 20661.0000 0.00 0.00 UPDATE test?.tbl? -# 6 0x79BFEA84D0CED05F 20661.0000 16.7% 1 20661.0000 0.00 0.00 INSERT UPDATE test?.tbl? +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ================ ===== ========== ===== ======== +# 1 0xCD948EAF18BC614E 20704.0000 16.7% 1 20704.0000 0.00 REPLACE SELECT test?.tbl? test?.tblo test?.tbl? +# 2 0xC356FD9EFD7D799E 20675.0000 16.7% 1 20675.0000 0.00 UPDATE test?.tblo test?.tbl? +# 3 0xB5E55291C7DE1096 20664.0000 16.7% 1 20664.0000 0.00 UPDATE test?.tblo test?.tbl? +# 4 0x85FFF5AA78E5FF6A 20664.0000 16.7% 1 20664.0000 0.00 BEGIN +# 5 0xED69B13F3D0161D0 20661.0000 16.7% 1 20661.0000 0.00 UPDATE test?.tbl? +# 6 0x79BFEA84D0CED05F 20661.0000 16.7% 1 20661.0000 0.00 INSERT UPDATE test?.tbl? diff --git a/t/pt-query-digest/samples/binlog002.txt b/t/pt-query-digest/samples/binlog002.txt index 076171f9..3e1426a2 100644 --- a/t/pt-query-digest/samples/binlog002.txt +++ b/t/pt-query-digest/samples/binlog002.txt @@ -16,8 +16,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xF25D6D5AC7C18FF3 at byte 381 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-22 07:21:59 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -45,8 +44,7 @@ create database d\G # Query 2: 0 QPS, 0x concurrency, ID 0x03409022EB8A4AE7 at byte 795 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-22 07:22:16 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -72,8 +70,7 @@ create table foo (i int)\G # Query 3: 0 QPS, 0x concurrency, ID 0xF579EC4A9633EEA0 at byte 973 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-22 07:22:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -95,8 +92,8 @@ create table foo (i int)\G insert foo values (1) /*... omitted ...*/\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xF25D6D5AC7C18FF3 0.0000 0.0% 1 0.0000 1.00 0.00 CREATE DATABASE d -# 2 0x03409022EB8A4AE7 0.0000 0.0% 1 0.0000 1.00 0.00 CREATE TABLE foo -# 3 0xF579EC4A9633EEA0 0.0000 0.0% 1 0.0000 1.00 0.00 INSERT +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =============== +# 1 0xF25D6D5AC7C18FF3 0.0000 0.0% 1 0.0000 0.00 CREATE DATABASE d +# 2 0x03409022EB8A4AE7 0.0000 0.0% 1 0.0000 0.00 CREATE TABLE foo +# 3 0xF579EC4A9633EEA0 0.0000 0.0% 1 0.0000 0.00 INSERT diff --git a/t/pt-query-digest/samples/cannot-distill-profile.txt b/t/pt-query-digest/samples/cannot-distill-profile.txt index de462a62..60a42713 100644 --- a/t/pt-query-digest/samples/cannot-distill-profile.txt +++ b/t/pt-query-digest/samples/cannot-distill-profile.txt @@ -1,6 +1,6 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x5B721CAE3EDDB56B 0.0900 69.1% 1 0.0900 1.00 0.00 -# 2 0xBE90A42C0FB7E89E 0.0403 30.9% 1 0.0403 1.00 0.00 +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =========== +# 1 0x5B721CAE3EDDB56B 0.0900 69.1% 1 0.0900 0.00 +# 2 0xBE90A42C0FB7E89E 0.0403 30.9% 1 0.0403 0.00 diff --git a/t/pt-query-digest/samples/genlog001.txt b/t/pt-query-digest/samples/genlog001.txt index a08e68e9..63b9ce91 100644 --- a/t/pt-query-digest/samples/genlog001.txt +++ b/t/pt-query-digest/samples/genlog001.txt @@ -7,8 +7,7 @@ # Query size 315 27 124 45 118.34 31.33 28.75 # Query 1: 0.00 QPS, 0x concurrency, ID 0x5D51E5F01B88B79E at byte 244 ___ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: 2005-10-07 21:55:24 to 2006-12-26 15:42:36 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -31,8 +30,7 @@ administrator command: Connect\G # Query 2: 0.00 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 464 ___ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: 2005-10-07 21:55:24 to 2006-12-26 16:44:48 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -53,8 +51,7 @@ administrator command: Connect\G administrator command: Quit\G # Query 3: 0 QPS, 0x concurrency, ID 0x4D096479916B0F45 at byte 346 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2006-12-26 15:42:36 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -79,8 +76,7 @@ administrator command: Quit\G SELECT DISTINCT col FROM tbl WHERE foo=20061219\G # Query 4: 0 QPS, 0x concurrency, ID 0x44AAC79F41BCF692 at byte 58 _______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -108,8 +104,7 @@ SELECT foo ORDER BY col\G # Query 5: 0 QPS, 0x concurrency, ID 0x44AE35A182869033 at byte 300 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2006-12-26 15:42:36 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -130,10 +125,10 @@ SELECT foo administrator command: Init DB\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x5D51E5F01B88B79E 0.0000 0.0% 2 0.0000 1.00 0.00 ADMIN CONNECT -# 2 0xAA353644DE4C4CB4 0.0000 0.0% 2 0.0000 1.00 0.00 ADMIN QUIT -# 3 0x4D096479916B0F45 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT tbl -# 4 0x44AAC79F41BCF692 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT tbl -# 5 0x44AE35A182869033 0.0000 0.0% 1 0.0000 1.00 0.00 ADMIN INIT DB +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ============= +# 1 0x5D51E5F01B88B79E 0.0000 0.0% 2 0.0000 0.00 ADMIN CONNECT +# 2 0xAA353644DE4C4CB4 0.0000 0.0% 2 0.0000 0.00 ADMIN QUIT +# 3 0x4D096479916B0F45 0.0000 0.0% 1 0.0000 0.00 SELECT tbl +# 4 0x44AAC79F41BCF692 0.0000 0.0% 1 0.0000 0.00 SELECT tbl +# 5 0x44AE35A182869033 0.0000 0.0% 1 0.0000 0.00 ADMIN INIT DB diff --git a/t/pt-query-digest/samples/genlog002.txt b/t/pt-query-digest/samples/genlog002.txt index 5afe0258..fb0937bc 100644 --- a/t/pt-query-digest/samples/genlog002.txt +++ b/t/pt-query-digest/samples/genlog002.txt @@ -8,8 +8,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x2361B36A4AEB397B at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2010-02-11 00:55:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -35,8 +34,7 @@ SELECT category_id # Query 2: 0 QPS, 0x concurrency, ID 0x0A3E6DCD23F3445A at byte 237 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2010-02-11 00:55:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -71,7 +69,7 @@ SELECT auction_id, auction_title_en AS title, close_time, LIMIT 500\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x2361B36A4AEB397B 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT auction_category_map -# 2 0x0A3E6DCD23F3445A 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT auction_search +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =============== +# 1 0x2361B36A4AEB397B 0.0000 0.0% 1 0.0000 0.00 SELECT auction_category_map +# 2 0x0A3E6DCD23F3445A 0.0000 0.0% 1 0.0000 0.00 SELECT auction_search diff --git a/t/pt-query-digest/samples/genlog003.txt b/t/pt-query-digest/samples/genlog003.txt index 5330867f..98de2ae3 100644 --- a/t/pt-query-digest/samples/genlog003.txt +++ b/t/pt-query-digest/samples/genlog003.txt @@ -7,8 +7,7 @@ # Query size 315 27 124 45 118.34 31.33 28.75 # Query 1: 0 QPS, 0x concurrency, ID 0x5D51E5F01B88B79E at byte 246 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -31,8 +30,7 @@ administrator command: Connect\G # Query 2: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 466 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -53,8 +51,7 @@ administrator command: Connect\G administrator command: Quit\G # Query 3: 0 QPS, 0x concurrency, ID 0x4D096479916B0F45 at byte 348 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -79,8 +76,7 @@ administrator command: Quit\G SELECT DISTINCT col FROM tbl WHERE foo=20061219\G # Query 4: 0 QPS, 0x concurrency, ID 0x44AAC79F41BCF692 at byte 60 _______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -108,8 +104,7 @@ SELECT foo ORDER BY col\G # Query 5: 0 QPS, 0x concurrency, ID 0x44AE35A182869033 at byte 302 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -130,10 +125,10 @@ SELECT foo administrator command: Init DB\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x5D51E5F01B88B79E 0.0000 0.0% 2 0.0000 1.00 0.00 ADMIN CONNECT -# 2 0xAA353644DE4C4CB4 0.0000 0.0% 2 0.0000 1.00 0.00 ADMIN QUIT -# 3 0x4D096479916B0F45 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT tbl -# 4 0x44AAC79F41BCF692 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT tbl -# 5 0x44AE35A182869033 0.0000 0.0% 1 0.0000 1.00 0.00 ADMIN INIT DB +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ============= +# 1 0x5D51E5F01B88B79E 0.0000 0.0% 2 0.0000 0.00 ADMIN CONNECT +# 2 0xAA353644DE4C4CB4 0.0000 0.0% 2 0.0000 0.00 ADMIN QUIT +# 3 0x4D096479916B0F45 0.0000 0.0% 1 0.0000 0.00 SELECT tbl +# 4 0x44AAC79F41BCF692 0.0000 0.0% 1 0.0000 0.00 SELECT tbl +# 5 0x44AE35A182869033 0.0000 0.0% 1 0.0000 0.00 ADMIN INIT DB diff --git a/t/pt-query-digest/samples/http_tcpdump002.txt b/t/pt-query-digest/samples/http_tcpdump002.txt index 4d48f8a7..8fdd14fc 100644 --- a/t/pt-query-digest/samples/http_tcpdump002.txt +++ b/t/pt-query-digest/samples/http_tcpdump002.txt @@ -1,7 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xFB0C089DD4451762 at byte 59213 ____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-11-09 15:31:09.411349 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -25,8 +24,7 @@ get www.percona.com/images/menu_our-vision.gif # Query 2: 0 QPS, 0x concurrency, ID 0x7C3AA9143C98C14A at byte 206 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-11-09 15:31:09.074855 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -50,8 +48,7 @@ get www.percona.com/images/menu_our-vision.gif get www.percona.com/about-us.html # Query 3: 0 QPS, 0x concurrency, ID 0x7CC09CE55CB7750C at byte 16362 ____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-11-09 15:31:09.157215 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -75,8 +72,7 @@ get www.percona.com/about-us.html get www.percona.com/js/jquery.js # Query 4: 0 QPS, 0x concurrency, ID 0x44C0C94594575296 at byte 65644 ____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-11-09 15:31:09.420851 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -100,8 +96,7 @@ get www.percona.com/js/jquery.js get www.percona.com/images/bg-gray-corner-top.gif # Query 5: 0 QPS, 0x concurrency, ID 0x08207FBDE8A42C36 at byte 67956 ____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-11-09 15:31:09.420996 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -125,8 +120,7 @@ get www.percona.com/images/bg-gray-corner-top.gif get www.percona.com/images/handshake.jpg # Query 6: 0 QPS, 0x concurrency, ID 0x4F1E2B5E822F55B8 at byte 53100 ____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-11-09 15:31:09.346763 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -150,8 +144,7 @@ get www.percona.com/images/handshake.jpg get www.percona.com/images/menu_team.gif # Query 7: 0 QPS, 0x concurrency, ID 0x7FB624EE10D71E1F at byte 170117 ___ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-11-09 15:31:14.737890 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -175,8 +168,7 @@ get www.percona.com/images/menu_team.gif get hit.clickaider.com/s/forms.js # Query 8: 0 QPS, 0x concurrency, ID 0x1279DE4968C95A8D at byte 147447 ___ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-11-09 15:31:14.536149 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -200,8 +192,7 @@ get hit.clickaider.com/s/forms.js get hit.clickaider.com/clickaider.js # Query 9: 0 QPS, 0x concurrency, ID 0x590BE2A84B8F0D5B at byte 167245 ___ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-11-09 15:31:14.678713 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -225,8 +216,7 @@ get hit.clickaider.com/clickaider.js get hit.clickaider.com/pv?lng=140&&lnks=&t=About%20Percona&c=73a41b95-2926&r=http%3A%2F%2Fwww.percona.com%2F&tz=-420&loc=http%3A%2F%2Fwww.percona.com%2Fabout-us.html&rnd=3688 # Query 10: 0 QPS, 0x concurrency, ID 0xFC5C4A690D695F35 at byte 55942 ___ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-11-09 15:31:09.373800 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/issue_1196-output-5.0.txt b/t/pt-query-digest/samples/issue_1196-output-5.0.txt index 17eaefa0..8d0edc99 100644 --- a/t/pt-query-digest/samples/issue_1196-output-5.0.txt +++ b/t/pt-query-digest/samples/issue_1196-output-5.0.txt @@ -1,14 +1,12 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ==== ===== ======= ======== -# 1 0xD4B6A5CD2F2F485C 0.2148 100.0% 1 0.2148 1.00 0.00 TF>aa SELECT t +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ======== +# 1 0xD4B6A5CD2F2F485C 0.2148 100.0% 1 0.2148 0.00 SELECT t # Query 1: 0 QPS, 0x concurrency, ID 0xD4B6A5CD2F2F485C at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# EXPLAIN sparkline: TF>aa -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2010-12-14 16:12:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/issue_1196-output-5.6.txt b/t/pt-query-digest/samples/issue_1196-output-5.6.txt index bd69285f..860ec6b5 100644 --- a/t/pt-query-digest/samples/issue_1196-output-5.6.txt +++ b/t/pt-query-digest/samples/issue_1196-output-5.6.txt @@ -1,14 +1,12 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ==== ===== ======= ======== -# 1 0xD4B6A5CD2F2F485C 0.2148 100.0% 1 0.2148 1.00 0.00 TF>aa SELECT t +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ======== +# 1 0xD4B6A5CD2F2F485C 0.2148 100.0% 1 0.2148 0.00 SELECT t # Query 1: 0 QPS, 0x concurrency, ID 0xD4B6A5CD2F2F485C at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# EXPLAIN sparkline: TF>aa -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2010-12-14 16:12:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/issue_1196-output.txt b/t/pt-query-digest/samples/issue_1196-output.txt index d7b2e2ef..71d380f8 100644 --- a/t/pt-query-digest/samples/issue_1196-output.txt +++ b/t/pt-query-digest/samples/issue_1196-output.txt @@ -1,14 +1,12 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ==== ===== ======= ======== -# 1 0xD4B6A5CD2F2F485C 0.2148 100.0% 1 0.2148 1.00 0.00 TF>aa SELECT t +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ======== +# 1 0xD4B6A5CD2F2F485C 0.2148 100.0% 1 0.2148 0.00 SELECT t # Query 1: 0 QPS, 0x concurrency, ID 0xD4B6A5CD2F2F485C at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# EXPLAIN sparkline: TF>aa -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2010-12-14 16:12:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump001.txt b/t/pt-query-digest/samples/memc_tcpdump001.txt index d8966641..e300ad76 100644 --- a/t/pt-query-digest/samples/memc_tcpdump001.txt +++ b/t/pt-query-digest/samples/memc_tcpdump001.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x26193ADA9E14A97E at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-04 21:33:39.229179 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump002.txt b/t/pt-query-digest/samples/memc_tcpdump002.txt index c94b74b9..3b0720c4 100644 --- a/t/pt-query-digest/samples/memc_tcpdump002.txt +++ b/t/pt-query-digest/samples/memc_tcpdump002.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x456F2F160AF2DC0F at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-04 22:12:06.174390 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump003.txt b/t/pt-query-digest/samples/memc_tcpdump003.txt index 5b950344..b0bb5ff4 100644 --- a/t/pt-query-digest/samples/memc_tcpdump003.txt +++ b/t/pt-query-digest/samples/memc_tcpdump003.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAEBF67014CC9A7C0 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-04 22:12:06.175734 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -27,8 +26,7 @@ incr key # Query 2: 0 QPS, 0x concurrency, ID 0xC03129972E1D6A1F at byte 522 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-04 22:12:06.176181 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump003_report_key_print.txt b/t/pt-query-digest/samples/memc_tcpdump003_report_key_print.txt index a04d0331..df88fe79 100644 --- a/t/pt-query-digest/samples/memc_tcpdump003_report_key_print.txt +++ b/t/pt-query-digest/samples/memc_tcpdump003_report_key_print.txt @@ -5,8 +5,7 @@ # Item 1: 4.47k QPS, 0.32x concurrency, ID 0x8228B9A98CA1531D at byte 0 __ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2009-07-04 22:12:06.175734 to 22:12:06.176181 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump004.txt b/t/pt-query-digest/samples/memc_tcpdump004.txt index 27acc028..b7d8f2aa 100644 --- a/t/pt-query-digest/samples/memc_tcpdump004.txt +++ b/t/pt-query-digest/samples/memc_tcpdump004.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAEBF67014CC9A7C0 at byte 764 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-06 10:37:21.668469 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -28,8 +27,7 @@ incr key # Query 2: 0 QPS, 0x concurrency, ID 0xC03129972E1D6A1F at byte 1788 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-06 10:37:21.668851 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump005.txt b/t/pt-query-digest/samples/memc_tcpdump005.txt index da7a20c0..838774b6 100644 --- a/t/pt-query-digest/samples/memc_tcpdump005.txt +++ b/t/pt-query-digest/samples/memc_tcpdump005.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x26193ADA9E14A97E at byte 764 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-06 22:07:14.406827 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump006.txt b/t/pt-query-digest/samples/memc_tcpdump006.txt index 988b8688..04149aa8 100644 --- a/t/pt-query-digest/samples/memc_tcpdump006.txt +++ b/t/pt-query-digest/samples/memc_tcpdump006.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x456F2F160AF2DC0F at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-06 22:07:14.411331 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump007.txt b/t/pt-query-digest/samples/memc_tcpdump007.txt index 93ecfa72..62e20baf 100644 --- a/t/pt-query-digest/samples/memc_tcpdump007.txt +++ b/t/pt-query-digest/samples/memc_tcpdump007.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x28C64E8A71EEAEAF at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-06-11 21:54:49.059144 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump008.txt b/t/pt-query-digest/samples/memc_tcpdump008.txt index 805de0b6..931f13dd 100644 --- a/t/pt-query-digest/samples/memc_tcpdump008.txt +++ b/t/pt-query-digest/samples/memc_tcpdump008.txt @@ -1,8 +1,7 @@ # Query 1: 645.28k QPS, 1.29x concurrency, ID 0x456F2F160AF2DC0F at byte 0 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: |^ | +# Scores: V/M = 0.00 # Time range: 2009-07-06 22:07:14.411331 to 22:07:14.411334 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump009.txt b/t/pt-query-digest/samples/memc_tcpdump009.txt index 44a7a7ae..47340315 100644 --- a/t/pt-query-digest/samples/memc_tcpdump009.txt +++ b/t/pt-query-digest/samples/memc_tcpdump009.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x6A3331FD94A66F54 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-06-11 21:54:52.244534 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump010.txt b/t/pt-query-digest/samples/memc_tcpdump010.txt index 32f7de80..ae021d35 100644 --- a/t/pt-query-digest/samples/memc_tcpdump010.txt +++ b/t/pt-query-digest/samples/memc_tcpdump010.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x3D1AED9A2A3A73C8 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-09 22:00:29.066476 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/output_json_slow002.txt b/t/pt-query-digest/samples/output_json_slow002.txt new file mode 100644 index 00000000..f8604c65 --- /dev/null +++ b/t/pt-query-digest/samples/output_json_slow002.txt @@ -0,0 +1,203 @@ + +[ + { + "attributes" : { + "Filesort" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Filesort_on_disk" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Full_join" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Full_scan" : { + "avg" : 1, + "cnt" : 1, + "max" : 1, + "median" : 0, + "min" : 1, + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : 1 + }, + "Lock_time" : { + "avg" : "0.000091", + "cnt" : "1.000000", + "max" : "0.000091", + "median" : "0.000091", + "min" : "0.000091", + "pct" : "0.12", + "pct_95" : "0.000091", + "stddev" : 0, + "sum" : "0.000091" + }, + "Merge_passes" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : "0", + "median" : 0, + "min" : "0", + "pct" : "0.12", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "QC_Hit" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Query_time" : { + "avg" : "0.726052", + "cnt" : "1.000000", + "max" : "0.726052", + "median" : "0.726052", + "min" : "0.726052", + "pct" : "0.12", + "pct_95" : "0.726052", + "stddev" : 0, + "sum" : "0.726052" + }, + "Rows_examined" : { + "avg" : "62951.000000", + "cnt" : "1.000000", + "max" : "62951.000000", + "median" : "62951.000000", + "min" : "62951.000000", + "pct" : "0.12", + "pct_95" : "62951.000000", + "stddev" : 0, + "sum" : "62951.000000" + }, + "Rows_sent" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : "0", + "median" : 0, + "min" : "0", + "pct" : "0.12", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Tmp_table" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Tmp_table_on_disk" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "bytes" : { + "avg" : "129.000000", + "cnt" : "1.000000", + "max" : "129.000000", + "median" : "129.000000", + "min" : "129.000000", + "pct" : "0.12", + "pct_95" : "129.000000", + "stddev" : 0, + "sum" : "129.000000" + }, + "db" : { + "avg" : 0, + "cnt" : 1, + "max" : "db1", + "median" : 0, + "min" : "db1", + "pct" : 0.142857142857143, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "host" : { + "avg" : 0, + "cnt" : 1, + "max" : "", + "median" : 0, + "min" : "", + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "pos_in_log" : { + "avg" : "338.000000", + "cnt" : "1.000000", + "max" : "338.000000", + "median" : "338.000000", + "min" : "338.000000", + "pct" : "0.12", + "pct_95" : "338.000000", + "stddev" : 0, + "sum" : "338.000000" + }, + "user" : { + "avg" : 0, + "cnt" : 1, + "max" : "[SQL_SLAVE]", + "median" : 0, + "min" : "[SQL_SLAVE]", + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + } + }, + "class" : { + "checksum" : "66825DDC008FFA89", + "cnt" : 1, + "fingerprint" : "update d?tuningdetail_?_? n inner join d?gonzo a using(gonzo) set n.column? = a.column?, n.word? = a.word?", + "sample" : "update db2.tuningdetail_21_265507 n\n inner join db1.gonzo a using(gonzo) \n set n.column1 = a.column1, n.word3 = a.word3", + "ts_max" : "2007-12-18 11:48:27", + "ts_min" : "2007-12-18 11:48:27" + } + } +] + diff --git a/t/pt-query-digest/samples/output_json_tcpdump021.txt b/t/pt-query-digest/samples/output_json_tcpdump021.txt new file mode 100644 index 00000000..1f5b8826 --- /dev/null +++ b/t/pt-query-digest/samples/output_json_tcpdump021.txt @@ -0,0 +1,359 @@ + +[ + { + "attributes" : { + "Error_no" : { + "avg" : 0, + "cnt" : 1, + "max" : "none", + "median" : 0, + "min" : "none", + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "No_good_index_used" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "No_index_used" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Query_time" : { + "avg" : "0.000286", + "cnt" : "1.000000", + "max" : "0.000286", + "median" : "0.000286", + "min" : "0.000286", + "pct" : "0.33", + "pct_95" : "0.000286", + "stddev" : 0, + "sum" : "0.000286" + }, + "Rows_affected" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : "0.33", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Statement_id" : { + "avg" : 0, + "cnt" : 1, + "max" : 2, + "median" : 0, + "min" : 2, + "pct" : 0.5, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "Warning_count" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : "0.33", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "bytes" : { + "avg" : "35.000000", + "cnt" : "1.000000", + "max" : "35.000000", + "median" : "35.000000", + "min" : "35.000000", + "pct" : "0.33", + "pct_95" : "35.000000", + "stddev" : 0, + "sum" : "35.000000" + }, + "host" : { + "avg" : 0, + "cnt" : 1, + "max" : "127.0.0.1", + "median" : 0, + "min" : "127.0.0.1", + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "pos_in_log" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : "0.33", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + } + }, + "class" : { + "checksum" : "AA8E9FA785927259", + "cnt" : 1, + "fingerprint" : "prepare select i from d.t where i=?", + "sample" : "PREPARE SELECT i FROM d.t WHERE i=?", + "ts_max" : "2009-12-08 09:23:49.637394", + "ts_min" : "2009-12-08 09:23:49.637394" + } + }, + { + "attributes" : { + "Error_no" : { + "avg" : 0, + "cnt" : 1, + "max" : "none", + "median" : 0, + "min" : "none", + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "No_good_index_used" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "No_index_used" : { + "avg" : 1, + "cnt" : 1, + "max" : 1, + "median" : 0, + "min" : 1, + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : 1 + }, + "Query_time" : { + "avg" : "0.000281", + "cnt" : "1.000000", + "max" : "0.000281", + "median" : "0.000281", + "min" : "0.000281", + "pct" : "0.33", + "pct_95" : "0.000281", + "stddev" : 0, + "sum" : "0.000281" + }, + "Rows_affected" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : "0.33", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Statement_id" : { + "avg" : 0, + "cnt" : 1, + "max" : "2", + "median" : 0, + "min" : "2", + "pct" : 0.5, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "Warning_count" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : "0.33", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "bytes" : { + "avg" : "37.000000", + "cnt" : "1.000000", + "max" : "37.000000", + "median" : "37.000000", + "min" : "37.000000", + "pct" : "0.33", + "pct_95" : "37.000000", + "stddev" : 0, + "sum" : "37.000000" + }, + "host" : { + "avg" : 0, + "cnt" : 1, + "max" : "127.0.0.1", + "median" : 0, + "min" : "127.0.0.1", + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "pos_in_log" : { + "avg" : "1106.000000", + "cnt" : "1.000000", + "max" : "1106.000000", + "median" : "1106.000000", + "min" : "1106.000000", + "pct" : "0.33", + "pct_95" : "1106.000000", + "stddev" : 0, + "sum" : "1106.000000" + } + }, + "class" : { + "checksum" : "3F79759E7FA2F117", + "cnt" : 1, + "fingerprint" : "execute select i from d.t where i=?", + "sample" : "EXECUTE SELECT i FROM d.t WHERE i=\"3\"", + "ts_max" : "2009-12-08 09:23:49.637892", + "ts_min" : "2009-12-08 09:23:49.637892" + } + }, + { + "attributes" : { + "Error_no" : { + "avg" : 0, + "cnt" : 1, + "max" : "none", + "median" : 0, + "min" : "none", + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "No_good_index_used" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "No_index_used" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Query_time" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : "0.000000", + "median" : "0.000000", + "min" : "0.000000", + "pct" : "0.33", + "pct_95" : "0.000000", + "stddev" : 0, + "sum" : 0 + }, + "Rows_affected" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : "0.33", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Warning_count" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : "0.33", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "bytes" : { + "avg" : "27.000000", + "cnt" : "1.000000", + "max" : "27.000000", + "median" : "27.000000", + "min" : "27.000000", + "pct" : "0.33", + "pct_95" : "27.000000", + "stddev" : 0, + "sum" : "27.000000" + }, + "host" : { + "avg" : 0, + "cnt" : 1, + "max" : "127.0.0.1", + "median" : 0, + "min" : "127.0.0.1", + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "pos_in_log" : { + "avg" : "1850.000000", + "cnt" : "1.000000", + "max" : "1850.000000", + "median" : "1850.000000", + "min" : "1850.000000", + "pct" : "0.33", + "pct_95" : "1850.000000", + "stddev" : 0, + "sum" : "1850.000000" + } + }, + "class" : { + "checksum" : "AA353644DE4C4CB4", + "cnt" : 1, + "fingerprint" : "administrator command: Quit", + "sample" : "administrator command: Quit", + "ts_max" : "2009-12-08 09:23:49.638381", + "ts_min" : "2009-12-08 09:23:49.638381" + } + } +] + diff --git a/t/pt-query-digest/samples/pg-sample1 b/t/pt-query-digest/samples/pg-sample1 index 7339202e..d8030e05 100644 --- a/t/pt-query-digest/samples/pg-sample1 +++ b/t/pt-query-digest/samples/pg-sample1 @@ -1,25 +1,25 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x949BAEB72FDE23A2 1.4501 33.3% 16 0.0906 1.00 0.00 SELECT stats_cvs_group -# 2 0x8FFEBD609B778EB2 0.5053 11.6% 45 0.0112 1.00 0.10 INSERT activity_log -# 3 0x64F8E6F000640AF8 0.3750 8.6% 5 0.0750 1.00 0.00 SELECT users -# 4 0x22375E33FDA4E899 0.3705 8.5% 1 0.3705 1.00 0.00 SELECT ONLY OF -# 5 0x60D6962E42C08882 0.1020 2.3% 46 0.0022 1.00 0.00 SELECT plugins -# 6 0x32AF9886FDBBAE30 0.0981 2.3% 38 0.0026 1.00 0.00 SELECT frs_filetype frs_processor frs_file frs_dlstats_filetotal_agg -# 7 0x5E64B4F52EC23D71 0.0936 2.1% 17 0.0055 1.00 0.01 SELECT trove_cat trove_group_link -# 8 0x1929E67B76DC55E7 0.0877 2.0% 5 0.0175 1.00 0.00 SELECT frs_dlstats_grouptotal_vw groups -# 9 0x1451AE69DBB6E0F2 0.0780 1.8% 1 0.0780 1.00 0.00 SELECT users -# 10 0xD7884E7E471BB089 0.0722 1.7% 61 0.0012 1.00 0.00 SELECT forum_group_list_vw -# 11 0x9DBDF5FB59454957 0.0612 1.4% 5 0.0122 1.00 0.00 SELECT users news_bytes groups -# 12 0x834CC93BAA549DD4 0.0609 1.4% 17 0.0036 1.00 0.00 SELECT users user_group -# 13 0xEF691689ACF9DC59 0.0595 1.4% 10 0.0059 1.00 0.00 SELECT frs_package frs_release frs_file groups -# 14 0x10D09F1381004A22 0.0582 1.3% 17 0.0034 1.00 0.00 SELECT groups -# 15 0xCF439D1EC0933550 0.0579 1.3% 2 0.0290 1.00 0.04 SELECT pg_catalog.pg_class pg_catalog.pg_namespace -# 16 0x7D752C8A15925978 0.0544 1.2% 60 0.0009 1.00 0.00 BEGIN SELECT -# 17 0x82AEF03891943FB3 0.0514 1.2% 2 0.0257 1.00 0.03 SELECT forum_group_list_vw -# 18 0x9AA827C1DF73EE43 0.0496 1.1% 17 0.0029 1.00 0.00 SELECT users news_bytes groups -# 19 0x4636BFC0875521C9 0.0447 1.0% 40 0.0011 1.00 0.00 SELECT supported_languages -# 20 0xB1C777CE6EBFE87E 0.0434 1.0% 17 0.0026 1.00 0.00 SELECT frs_package frs_release -# MISC 0xMISC 0.5823 13.4% 334 0.0017 NS 0.0 <47 ITEMS> +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =============== +# 1 0x949BAEB72FDE23A2 1.4501 33.3% 16 0.0906 0.00 SELECT stats_cvs_group +# 2 0x8FFEBD609B778EB2 0.5053 11.6% 45 0.0112 0.10 INSERT activity_log +# 3 0x64F8E6F000640AF8 0.3750 8.6% 5 0.0750 0.00 SELECT users +# 4 0x22375E33FDA4E899 0.3705 8.5% 1 0.3705 0.00 SELECT ONLY OF +# 5 0x60D6962E42C08882 0.1020 2.3% 46 0.0022 0.00 SELECT plugins +# 6 0x32AF9886FDBBAE30 0.0981 2.3% 38 0.0026 0.00 SELECT frs_filetype frs_processor frs_file frs_dlstats_filetotal_agg +# 7 0x5E64B4F52EC23D71 0.0936 2.1% 17 0.0055 0.01 SELECT trove_cat trove_group_link +# 8 0x1929E67B76DC55E7 0.0877 2.0% 5 0.0175 0.00 SELECT frs_dlstats_grouptotal_vw groups +# 9 0x1451AE69DBB6E0F2 0.0780 1.8% 1 0.0780 0.00 SELECT users +# 10 0xD7884E7E471BB089 0.0722 1.7% 61 0.0012 0.00 SELECT forum_group_list_vw +# 11 0x9DBDF5FB59454957 0.0612 1.4% 5 0.0122 0.00 SELECT users news_bytes groups +# 12 0x834CC93BAA549DD4 0.0609 1.4% 17 0.0036 0.00 SELECT users user_group +# 13 0xEF691689ACF9DC59 0.0595 1.4% 10 0.0059 0.00 SELECT frs_package frs_release frs_file groups +# 14 0x10D09F1381004A22 0.0582 1.3% 17 0.0034 0.00 SELECT groups +# 15 0xCF439D1EC0933550 0.0579 1.3% 2 0.0290 0.04 SELECT pg_catalog.pg_class pg_catalog.pg_namespace +# 16 0x7D752C8A15925978 0.0544 1.2% 60 0.0009 0.00 BEGIN SELECT +# 17 0x82AEF03891943FB3 0.0514 1.2% 2 0.0257 0.03 SELECT forum_group_list_vw +# 18 0x9AA827C1DF73EE43 0.0496 1.1% 17 0.0029 0.00 SELECT users news_bytes groups +# 19 0x4636BFC0875521C9 0.0447 1.0% 40 0.0011 0.00 SELECT supported_languages +# 20 0xB1C777CE6EBFE87E 0.0434 1.0% 17 0.0026 0.00 SELECT frs_package frs_release +# MISC 0xMISC 0.5823 13.4% 334 0.0017 0.0 <47 ITEMS> diff --git a/t/pt-query-digest/samples/pg-syslog-sample1 b/t/pt-query-digest/samples/pg-syslog-sample1 index 8c7f9a5c..ea03c8f7 100644 --- a/t/pt-query-digest/samples/pg-syslog-sample1 +++ b/t/pt-query-digest/samples/pg-syslog-sample1 @@ -1,12 +1,12 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x43088A2EF12EB3EE 0.1661 38.2% 2 0.0830 1.00 0.11 SELECT pg_catalog.pg_class pg_catalog.pg_roles pg_catalog.pg_namespace -# 2 0xD6F2B77706BEEB5F 0.0710 16.3% 1 0.0710 1.00 0.00 SELECT pg_catalog.pg_class pg_catalog.pg_roles pg_catalog.pg_namespace -# 3 0x9213FC20E3993331 0.0464 10.7% 1 0.0464 1.00 0.00 SELECT foo -# 4 0x458CB071ADE822AC 0.0446 10.3% 6 0.0074 1.00 0.00 SELECT -# 5 0x60960AADCFD005F3 0.0426 9.8% 1 0.0426 1.00 0.00 SELECT pg_catalog.pg_class pg_catalog.pg_roles pg_catalog.pg_namespace -# 6 0xA99588746B4C6438 0.0283 6.5% 1 0.0283 1.00 0.00 SELECT pg_catalog.pg_class pg_catalog.pg_roles pg_catalog.pg_namespace -# 7 0x32A1860329937485 0.0278 6.4% 1 0.0278 1.00 0.00 SELECT pg_catalog.pg_database pg_catalog.pg_roles -# MISC 0xMISC 0.0083 1.9% 1 0.0083 NS 0.0 <1 ITEMS> +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =============== +# 1 0x43088A2EF12EB3EE 0.1661 38.2% 2 0.0830 0.11 SELECT pg_catalog.pg_class pg_catalog.pg_roles pg_catalog.pg_namespace +# 2 0xD6F2B77706BEEB5F 0.0710 16.3% 1 0.0710 0.00 SELECT pg_catalog.pg_class pg_catalog.pg_roles pg_catalog.pg_namespace +# 3 0x9213FC20E3993331 0.0464 10.7% 1 0.0464 0.00 SELECT foo +# 4 0x458CB071ADE822AC 0.0446 10.3% 6 0.0074 0.00 SELECT +# 5 0x60960AADCFD005F3 0.0426 9.8% 1 0.0426 0.00 SELECT pg_catalog.pg_class pg_catalog.pg_roles pg_catalog.pg_namespace +# 6 0xA99588746B4C6438 0.0283 6.5% 1 0.0283 0.00 SELECT pg_catalog.pg_class pg_catalog.pg_roles pg_catalog.pg_namespace +# 7 0x32A1860329937485 0.0278 6.4% 1 0.0278 0.00 SELECT pg_catalog.pg_database pg_catalog.pg_roles +# MISC 0xMISC 0.0083 1.9% 1 0.0083 0.0 <1 ITEMS> diff --git a/t/pt-query-digest/samples/rawlog001.txt b/t/pt-query-digest/samples/rawlog001.txt index 3f298ac0..8227959f 100644 --- a/t/pt-query-digest/samples/rawlog001.txt +++ b/t/pt-query-digest/samples/rawlog001.txt @@ -7,8 +7,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xCB5621E548E5497F at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 50 1 @@ -31,8 +30,7 @@ SELECT c FROM t WHERE id=1\G # Query 2: 0 QPS, 0x concurrency, ID 0x774B2B0B59EBAC2C at byte 27 _______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 50 1 @@ -54,7 +52,7 @@ SELECT c FROM t WHERE id=1\G /* Hello, world! */ SELECT * FROM t2 LIMIT 1\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========= -# 1 0xCB5621E548E5497F 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT t -# 2 0x774B2B0B59EBAC2C 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT t? +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========= +# 1 0xCB5621E548E5497F 0.0000 0.0% 1 0.0000 0.00 SELECT t +# 2 0x774B2B0B59EBAC2C 0.0000 0.0% 1 0.0000 0.00 SELECT t? diff --git a/t/pt-query-digest/samples/slow001_distillreport.txt b/t/pt-query-digest/samples/slow001_distillreport.txt index 02f2165e..757c6152 100644 --- a/t/pt-query-digest/samples/slow001_distillreport.txt +++ b/t/pt-query-digest/samples/slow001_distillreport.txt @@ -5,8 +5,7 @@ # Item 1: 0 QPS, 0x concurrency, ID 0x82E67ABEEDCA3249 at byte 0 _________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -33,8 +32,7 @@ SELECT n # Item 2: 0 QPS, 0x concurrency, ID 0x7AD070CD3F4121D5 at byte 359 _______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:45:10 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow001_report.txt b/t/pt-query-digest/samples/slow001_report.txt index 32440146..d85429cb 100644 --- a/t/pt-query-digest/samples/slow001_report.txt +++ b/t/pt-query-digest/samples/slow001_report.txt @@ -1,7 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7F7D57ACDD8A346E at byte 0 ________ -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -31,8 +30,7 @@ select sleep(2) from n\G # Query 2: 0 QPS, 0x concurrency, ID 0x3A99CC42AEDCCFCD at byte 359 ______ -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:45:10 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow001_select_report.txt b/t/pt-query-digest/samples/slow001_select_report.txt index ae5b49c1..27d29433 100644 --- a/t/pt-query-digest/samples/slow001_select_report.txt +++ b/t/pt-query-digest/samples/slow001_select_report.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7F7D57ACDD8A346E at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 50 1 @@ -24,8 +23,7 @@ select sleep(2) from n\G # Query 2: 0 QPS, 0x concurrency, ID 0x3A99CC42AEDCCFCD at byte 359 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 50 1 diff --git a/t/pt-query-digest/samples/slow001_tablesreport.txt b/t/pt-query-digest/samples/slow001_tablesreport.txt index 7e92c56b..5dc70bf3 100644 --- a/t/pt-query-digest/samples/slow001_tablesreport.txt +++ b/t/pt-query-digest/samples/slow001_tablesreport.txt @@ -5,8 +5,7 @@ # Item 1: 0.03 QPS, 0.05x concurrency, ID 0x1161D7068EB79526 at byte 0 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2007-10-15 21:43:52 to 21:45:10 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow002-orderbynonexistent.txt b/t/pt-query-digest/samples/slow002-orderbynonexistent.txt index 96083e5f..b6cc3a83 100644 --- a/t/pt-query-digest/samples/slow002-orderbynonexistent.txt +++ b/t/pt-query-digest/samples/slow002-orderbynonexistent.txt @@ -1,8 +1,7 @@ --order-by attribute Rows_read doesn't exist, using Query_time:sum # Query 1: 0 QPS, 0x concurrency, ID 0x66825DDC008FFA89 at byte 338 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -13,7 +12,6 @@ # Rows examine 100 61.48k 61.48k 61.48k 61.48k 61.48k 0 61.48k # Merge passes 0 0 0 0 0 0 0 0 # Query size 25 129 129 129 129 129 0 129 -# InnoDB: # Boolean: # Full scan 100% yes, 0% no # String: @@ -43,8 +41,7 @@ select n.column1 = a.column1, n.word3 = a.word3 from db2.tuningdetail_21_265507 inner join db1.gonzo a using(gonzo) \G # Query 2: 0 QPS, 0x concurrency, ID 0x0FFE94ABA6A2A9E8 at byte 1334 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -86,8 +83,7 @@ WHERE vab3concept1upload='6994465'\G select vab3concept1id = '91848182522' from db4.vab3concept1upload where vab3concept1upload='6994465'\G # Query 3: 0 QPS, 0x concurrency, ID 0xB211BA2B8D6D065C at byte 2393 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -128,8 +124,7 @@ SET biz = '91848182522'\G select biz = '91848182522' from foo.bar \G # Query 4: 0 QPS, 0x concurrency, ID 0x6969975466519B81 at byte 2861 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -171,8 +166,7 @@ WHERE fillze='899'\G select boop='bop: 899' from bizzle.bat where fillze='899'\G # Query 5: 0 QPS, 0x concurrency, ID 0xC22D235B07D1D774 at byte 1864 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -210,8 +204,7 @@ INSERT INTO db1.conch (word3, vid83) VALUES ('211', '18')\G # Query 6: 0 QPS, 0x concurrency, ID 0x7546F89214254F2F at byte 815 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -249,8 +242,7 @@ INSERT INTO db3.vendor11gonzo (makef, bizzle) VALUES ('', 'Exact')\G # Query 7: 0 QPS, 0x concurrency, ID 0x85FFF5AA78E5FF6A at byte 0 ________ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -261,7 +253,6 @@ VALUES ('', 'Exact')\G # Rows examine 0 0 0 0 0 0 0 0 # Merge passes 0 0 0 0 0 0 0 0 # Query size 0 5 5 5 5 5 0 5 -# InnoDB: # String: # Hosts # Users [SQL_SLAVE] diff --git a/t/pt-query-digest/samples/slow002_iters_2.txt b/t/pt-query-digest/samples/slow002_iters_2.txt index 978f6d2d..6bd7c087 100644 --- a/t/pt-query-digest/samples/slow002_iters_2.txt +++ b/t/pt-query-digest/samples/slow002_iters_2.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x66825DDC008FFA89 at byte 338 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -13,7 +12,6 @@ # Rows examine 100 61.48k 61.48k 61.48k 61.48k 61.48k 0 61.48k # Merge passes 0 0 0 0 0 0 0 0 # Query size 25 129 129 129 129 129 0 129 -# InnoDB: # Boolean: # Full scan 100% yes, 0% no # String: @@ -43,7 +41,7 @@ select n.column1 = a.column1, n.word3 = a.word3 from db2.tuningdetail_21_265507 inner join db1.gonzo a using(gonzo) \G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x66825DDC008FFA89 0.7261 95.3% 1 0.7261 1.00 0.00 UPDATE db?.tuningdetail_?_? db?.gonzo -# MISC 0xMISC 0.0360 4.7% 7 0.0051 NS 0.0 <6 ITEMS> +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =============== +# 1 0x66825DDC008FFA89 0.7261 95.3% 1 0.7261 0.00 UPDATE db?.tuningdetail_?_? db?.gonzo +# MISC 0xMISC 0.0360 4.7% 7 0.0051 0.0 <6 ITEMS> diff --git a/t/pt-query-digest/samples/slow002_orderbyreport.txt b/t/pt-query-digest/samples/slow002_orderbyreport.txt index 1c2c67fe..23cd33f2 100644 --- a/t/pt-query-digest/samples/slow002_orderbyreport.txt +++ b/t/pt-query-digest/samples/slow002_orderbyreport.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xB211BA2B8D6D065C at byte 3374 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -44,8 +43,7 @@ select biz = '91848182522' from foo.bar \G # Query 2: 0 QPS, 0x concurrency, ID 0x66825DDC008FFA89 at byte 338 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -56,7 +54,6 @@ select biz = '91848182522' from foo.bar \G # Rows examine 100 61.48k 61.48k 61.48k 61.48k 61.48k 0 61.48k # Merge passes 0 0 0 0 0 0 0 0 # Query size 25 129 129 129 129 129 0 129 -# InnoDB: # Boolean: # Full scan 100% yes, 0% no # String: diff --git a/t/pt-query-digest/samples/slow002_report.txt b/t/pt-query-digest/samples/slow002_report.txt index 5482aa14..0426d96a 100644 --- a/t/pt-query-digest/samples/slow002_report.txt +++ b/t/pt-query-digest/samples/slow002_report.txt @@ -1,7 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x66825DDC008FFA89 at byte 338 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -12,7 +11,6 @@ # Rows examine 100 61.48k 61.48k 61.48k 61.48k 61.48k 0 61.48k # Merge passes 0 0 0 0 0 0 0 0 # Query size 25 129 129 129 129 129 0 129 -# InnoDB: # Boolean: # Full scan 100% yes, 0% no # String: @@ -42,8 +40,7 @@ select n.column1 = a.column1, n.word3 = a.word3 from db2.tuningdetail_21_265507 inner join db1.gonzo a using(gonzo) \G # Query 2: 0 QPS, 0x concurrency, ID 0x0FFE94ABA6A2A9E8 at byte 1334 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -85,8 +82,7 @@ WHERE vab3concept1upload='6994465'\G select vab3concept1id = '91848182522' from db4.vab3concept1upload where vab3concept1upload='6994465'\G # Query 3: 0 QPS, 0x concurrency, ID 0xB211BA2B8D6D065C at byte 3374 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -127,8 +123,7 @@ SET biz = '91848182522'\G select biz = '91848182522' from foo.bar \G # Query 4: 0 QPS, 0x concurrency, ID 0x6969975466519B81 at byte 2861 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -170,8 +165,7 @@ WHERE fillze='899'\G select boop='bop: 899' from bizzle.bat where fillze='899'\G # Query 5: 0 QPS, 0x concurrency, ID 0xC22D235B07D1D774 at byte 1864 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -209,8 +203,7 @@ INSERT INTO db1.conch (word3, vid83) VALUES ('211', '18')\G # Query 6: 0 QPS, 0x concurrency, ID 0x7546F89214254F2F at byte 815 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -248,8 +241,7 @@ INSERT INTO db3.vendor11gonzo (makef, bizzle) VALUES ('', 'Exact')\G # Query 7: 0 QPS, 0x concurrency, ID 0x85FFF5AA78E5FF6A at byte 0 ________ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -260,7 +252,6 @@ VALUES ('', 'Exact')\G # Rows examine 0 0 0 0 0 0 0 0 # Merge passes 0 0 0 0 0 0 0 0 # Query size 0 5 5 5 5 5 0 5 -# InnoDB: # String: # Hosts # Users [SQL_SLAVE] diff --git a/t/pt-query-digest/samples/slow002_report_filtered.txt b/t/pt-query-digest/samples/slow002_report_filtered.txt index e9d7411c..294fdd7f 100644 --- a/t/pt-query-digest/samples/slow002_report_filtered.txt +++ b/t/pt-query-digest/samples/slow002_report_filtered.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x6969975466519B81 at byte 2861 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow003_report.txt b/t/pt-query-digest/samples/slow003_report.txt index df294d96..01bb734f 100644 --- a/t/pt-query-digest/samples/slow003_report.txt +++ b/t/pt-query-digest/samples/slow003_report.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x85FFF5AA78E5FF6A at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow004_report.txt b/t/pt-query-digest/samples/slow004_report.txt index c9af29c6..1d44affc 100644 --- a/t/pt-query-digest/samples/slow004_report.txt +++ b/t/pt-query-digest/samples/slow004_report.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xB16C9E5B3D9C484F at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow006-order-by-re.txt b/t/pt-query-digest/samples/slow006-order-by-re.txt index 7df9b322..80e01a69 100644 --- a/t/pt-query-digest/samples/slow006-order-by-re.txt +++ b/t/pt-query-digest/samples/slow006-order-by-re.txt @@ -1,8 +1,7 @@ # Query 1: 0.05 QPS, 0x concurrency, ID 0xA20C29AF174CE545 at byte 1833 __ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2007-12-18 11:48:27 to 11:49:30 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -34,8 +33,7 @@ SELECT col FROM foo_tbl\G # Query 2: 0.30 QPS, 0x concurrency, ID 0xD4CD74934382A184 at byte 1469 __ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow006_AR_1.txt b/t/pt-query-digest/samples/slow006_AR_1.txt index dad9c923..481c2715 100644 --- a/t/pt-query-digest/samples/slow006_AR_1.txt +++ b/t/pt-query-digest/samples/slow006_AR_1.txt @@ -1,8 +1,7 @@ # Query 1: 0.05 QPS, 0.00x concurrency, ID 0xA20C29AF174CE545 at byte 1833 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2007-12-18 11:48:27 to 11:49:30 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -40,8 +39,7 @@ SELECT col FROM foo_tbl\G # Query 2: 0.30 QPS, 0.00x concurrency, ID 0xD4CD74934382A184 at byte 1469 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow006_AR_2.txt b/t/pt-query-digest/samples/slow006_AR_2.txt index c4338fc7..dac2e8ba 100644 --- a/t/pt-query-digest/samples/slow006_AR_2.txt +++ b/t/pt-query-digest/samples/slow006_AR_2.txt @@ -2,8 +2,7 @@ # Query 2: 0.30 QPS, 0.00x concurrency, ID 0xD4CD74934382A184 at byte 1469 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow006_AR_4.txt b/t/pt-query-digest/samples/slow006_AR_4.txt index 22372b61..fa2361c4 100644 --- a/t/pt-query-digest/samples/slow006_AR_4.txt +++ b/t/pt-query-digest/samples/slow006_AR_4.txt @@ -1,8 +1,7 @@ # Query 1: 0.05 QPS, 0.00x concurrency, ID 0xA20C29AF174CE545 at byte 1833 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2007-12-18 11:48:27 to 11:49:30 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -40,8 +39,7 @@ SELECT col FROM foo_tbl\G # Query 2: 0.30 QPS, 0.00x concurrency, ID 0xD4CD74934382A184 at byte 1469 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow006_AR_5.txt b/t/pt-query-digest/samples/slow006_AR_5.txt index 4705e25c..cef99266 100644 --- a/t/pt-query-digest/samples/slow006_AR_5.txt +++ b/t/pt-query-digest/samples/slow006_AR_5.txt @@ -2,8 +2,7 @@ # Query 2: 0.30 QPS, 0.00x concurrency, ID 0xD4CD74934382A184 at byte 1469 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow006_report.txt b/t/pt-query-digest/samples/slow006_report.txt index 76359986..4f5fa94c 100644 --- a/t/pt-query-digest/samples/slow006_report.txt +++ b/t/pt-query-digest/samples/slow006_report.txt @@ -1,8 +1,7 @@ # Query 1: 0.05 QPS, 0.00x concurrency, ID 0xA20C29AF174CE545 at byte 1833 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2007-12-18 11:48:27 to 11:49:30 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -34,8 +33,7 @@ SELECT col FROM foo_tbl\G # Query 2: 0.30 QPS, 0.00x concurrency, ID 0xD4CD74934382A184 at byte 1469 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow007_explain_1-51.txt b/t/pt-query-digest/samples/slow007_explain_1-51.txt index 9d37d8c1..2e9c9c61 100644 --- a/t/pt-query-digest/samples/slow007_explain_1-51.txt +++ b/t/pt-query-digest/samples/slow007_explain_1-51.txt @@ -1,9 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E306CDB7A800841 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# EXPLAIN sparkline: s -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow007_explain_1-55.txt b/t/pt-query-digest/samples/slow007_explain_1-55.txt index 267ec4c8..a74ee132 100644 --- a/t/pt-query-digest/samples/slow007_explain_1-55.txt +++ b/t/pt-query-digest/samples/slow007_explain_1-55.txt @@ -1,9 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E306CDB7A800841 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# EXPLAIN sparkline: I -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow007_explain_1.txt b/t/pt-query-digest/samples/slow007_explain_1.txt index 8bdfeceb..8c2ce5a0 100644 --- a/t/pt-query-digest/samples/slow007_explain_1.txt +++ b/t/pt-query-digest/samples/slow007_explain_1.txt @@ -1,9 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E306CDB7A800841 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# EXPLAIN sparkline: s -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow007_explain_2-51.txt b/t/pt-query-digest/samples/slow007_explain_2-51.txt index 4f89e6a1..511c5924 100644 --- a/t/pt-query-digest/samples/slow007_explain_2-51.txt +++ b/t/pt-query-digest/samples/slow007_explain_2-51.txt @@ -1,9 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E306CDB7A800841 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# EXPLAIN sparkline: I -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow007_explain_2.txt b/t/pt-query-digest/samples/slow007_explain_2.txt index 17ab592f..3cfac091 100644 --- a/t/pt-query-digest/samples/slow007_explain_2.txt +++ b/t/pt-query-digest/samples/slow007_explain_2.txt @@ -1,9 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E306CDB7A800841 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# EXPLAIN sparkline: I -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow007_explain_3.txt b/t/pt-query-digest/samples/slow007_explain_3.txt index 424b6b5f..91235c91 100644 --- a/t/pt-query-digest/samples/slow007_explain_3.txt +++ b/t/pt-query-digest/samples/slow007_explain_3.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E306CDB7A800841 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -34,6 +33,6 @@ SELECT fruit FROM trees\G # EXPLAIN failed: DBD::mysql::st execute failed: Table 'food.trees' doesn't exist [for Statement "EXPLAIN /*!50100 PARTITIONS */ SELECT fruit FROM trees"] at line ?. # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ==== ===== ========= ======== -# 1 0x8E306CDB7A800841 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT trees +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ============ +# 1 0x8E306CDB7A800841 0.0000 100.0% 1 0.0000 0.00 SELECT trees diff --git a/t/pt-query-digest/samples/slow007_explain_4.txt b/t/pt-query-digest/samples/slow007_explain_4.txt index e4fe95e8..42013836 100644 --- a/t/pt-query-digest/samples/slow007_explain_4.txt +++ b/t/pt-query-digest/samples/slow007_explain_4.txt @@ -1,5 +1,5 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ==== ===== ======= ========== -# 1 0x8E306CDB7A800841 0.0000 100.0% 1 0.0000 1.00 0.00 I SELECT trees +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ============ +# 1 0x8E306CDB7A800841 0.0000 100.0% 1 0.0000 0.00 SELECT trees diff --git a/t/pt-query-digest/samples/slow008_report.txt b/t/pt-query-digest/samples/slow008_report.txt index e91388f9..f4bb7458 100644 --- a/t/pt-query-digest/samples/slow008_report.txt +++ b/t/pt-query-digest/samples/slow008_report.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xC72BF45D68E35A6E at byte 435 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 33 1 @@ -32,8 +31,7 @@ SELECT MIN(id),MAX(id) FROM tbl\G # Query 2: 0 QPS, 0x concurrency, ID 0xCC47B42511EA22DD at byte 221 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 33 1 @@ -59,8 +57,7 @@ SET NAMES utf8\G # Query 3: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: |^ | +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 33 1 diff --git a/t/pt-query-digest/samples/slow010_reportbyfile.txt b/t/pt-query-digest/samples/slow010_reportbyfile.txt index 2e2d4215..de0b5fa3 100644 --- a/t/pt-query-digest/samples/slow010_reportbyfile.txt +++ b/t/pt-query-digest/samples/slow010_reportbyfile.txt @@ -5,8 +5,7 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xE0976A52E15A18AC at byte 0 _________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow011_report.txt b/t/pt-query-digest/samples/slow011_report.txt index 577ee8b7..d624a280 100644 --- a/t/pt-query-digest/samples/slow011_report.txt +++ b/t/pt-query-digest/samples/slow011_report.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 435 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.02 -# Query_time sparkline: |^ ^ | +# Scores: V/M = 0.02 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 50 2 @@ -28,8 +27,7 @@ administrator command: Quit\G # Query 2: 0 QPS, 0x concurrency, ID 0xCC47B42511EA22DD at byte 663 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 50 2 diff --git a/t/pt-query-digest/samples/slow013_report.txt b/t/pt-query-digest/samples/slow013_report.txt index 48daf735..3e8d479b 100644 --- a/t/pt-query-digest/samples/slow013_report.txt +++ b/t/pt-query-digest/samples/slow013_report.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x31DA25F95494CA95 at byte 174 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2008-11-27 08:51:20 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -29,8 +28,7 @@ SHOW STATUS\G # Query 2: 0 QPS, 0x concurrency, ID 0x3AEAAD0E15D725B5 at byte 600 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2008-11-27 08:51:21 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -57,8 +55,7 @@ SET autocommit=0\G # Query 3: 0 QPS, 0x concurrency, ID 0x813031B8BBC3B329 at byte 782 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2008-11-27 08:51:21 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -85,8 +82,7 @@ commit\G # Query 4: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 385 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: |^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2008-11-27 08:51:21 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow013_report_fingerprint_user.txt b/t/pt-query-digest/samples/slow013_report_fingerprint_user.txt index 01749d56..9ba33aec 100644 --- a/t/pt-query-digest/samples/slow013_report_fingerprint_user.txt +++ b/t/pt-query-digest/samples/slow013_report_fingerprint_user.txt @@ -15,8 +15,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x31DA25F95494CA95 at byte 174 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2008-11-27 08:51:20 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -47,8 +46,7 @@ SHOW STATUS\G # Item 1: 2 QPS, 0.15x concurrency, ID 0x4F1658C9B243995F at byte 174 ____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.15 -# Query_time sparkline: |^ ^ | +# Scores: V/M = 0.15 # Time range: 2008-11-27 08:51:20 to 08:51:21 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow013_report_limit.txt b/t/pt-query-digest/samples/slow013_report_limit.txt index 62f12054..d465694e 100644 --- a/t/pt-query-digest/samples/slow013_report_limit.txt +++ b/t/pt-query-digest/samples/slow013_report_limit.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x31DA25F95494CA95 at byte 174 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2008-11-27 08:51:20 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow013_report_outliers.txt b/t/pt-query-digest/samples/slow013_report_outliers.txt index 0096c271..6b60ddc0 100644 --- a/t/pt-query-digest/samples/slow013_report_outliers.txt +++ b/t/pt-query-digest/samples/slow013_report_outliers.txt @@ -5,8 +5,7 @@ # Item 1: 2 QPS, 0.15x concurrency, ID 0x4F1658C9B243995F at byte 174 ____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.15 -# Query_time sparkline: |^ ^ | +# Scores: V/M = 0.15 # Time range: 2008-11-27 08:51:20 to 08:51:21 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -32,8 +31,7 @@ mytopuser # Item 2: 0 QPS, 0x concurrency, ID 0x8F4C76E92F07EABE at byte 600 _______ # This item is included in the report because it matches --outliers. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2008-11-27 08:51:21 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow013_report_profile.txt b/t/pt-query-digest/samples/slow013_report_profile.txt index 7ace77bd..99245089 100644 --- a/t/pt-query-digest/samples/slow013_report_profile.txt +++ b/t/pt-query-digest/samples/slow013_report_profile.txt @@ -1,8 +1,8 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x31DA25F95494CA95 0.1494 99.9% 1 0.1494 1.00 0.00 SHOW STATUS -# 2 0x3AEAAD0E15D725B5 0.0001 0.1% 2 0.0000 1.00 0.00 SET -# 3 0x813031B8BBC3B329 0.0000 0.0% 1 0.0000 1.00 0.00 COMMIT -# MISC 0xMISC 0.0000 0.0% 1 0.0000 NS 0.0 <1 ITEMS> +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =========== +# 1 0x31DA25F95494CA95 0.1494 99.9% 1 0.1494 0.00 SHOW STATUS +# 2 0x3AEAAD0E15D725B5 0.0001 0.1% 2 0.0000 0.00 SET +# 3 0x813031B8BBC3B329 0.0000 0.0% 1 0.0000 0.00 COMMIT +# MISC 0xMISC 0.0000 0.0% 1 0.0000 0.0 <1 ITEMS> diff --git a/t/pt-query-digest/samples/slow013_report_user.txt b/t/pt-query-digest/samples/slow013_report_user.txt index 49f20069..1428d2c5 100644 --- a/t/pt-query-digest/samples/slow013_report_user.txt +++ b/t/pt-query-digest/samples/slow013_report_user.txt @@ -5,8 +5,7 @@ # Item 1: 2 QPS, 0.15x concurrency, ID 0x4F1658C9B243995F at byte 174 ____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.15 -# Query_time sparkline: |^ ^ | +# Scores: V/M = 0.15 # Time range: 2008-11-27 08:51:20 to 08:51:21 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -32,8 +31,7 @@ mytopuser # Item 2: 0 QPS, 0x concurrency, ID 0x8F4C76E92F07EABE at byte 600 _______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2008-11-27 08:51:21 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow014_report.txt b/t/pt-query-digest/samples/slow014_report.txt index 0931345b..dae63fbd 100644 --- a/t/pt-query-digest/samples/slow014_report.txt +++ b/t/pt-query-digest/samples/slow014_report.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7F7D57ACDD8A346E at byte 1313 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow018_execute_report_1.txt b/t/pt-query-digest/samples/slow018_execute_report_1.txt deleted file mode 100644 index c03e29d7..00000000 --- a/t/pt-query-digest/samples/slow018_execute_report_1.txt +++ /dev/null @@ -1,41 +0,0 @@ - -# Query 1: 0 QPS, 0x concurrency, ID 0x6083030C4A5D8996 at byte 0 ________ -# This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | -# Time range: all events occurred at 2007-10-15 21:43:52 -# Attribute pct total min max avg 95% stddev median -# ============ === ======= ======= ======= ======= ======= ======= ======= -# Count 100 1 -# Exec time 100 2s 2s 2s 2s 2s 0 2s -# Exec orig ti 100 2s 2s 2s 2s 2s 0 2s -# Lock time 0 0 0 0 0 0 0 0 -# Rows sent 100 1 1 1 1 1 0 1 -# Rows examine 0 0 0 0 0 0 0 0 -# Query size 100 44 44 44 44 44 0 44 -# String: -# Hosts localhost -# Users root -# Query_time distribution -# 1us -# 10us -# 100us -# 1ms -# 10ms -# 100ms -# 1s ################################################################ -# 10s+ -# Tables -# SHOW TABLE STATUS LIKE 'foo'\G -# SHOW CREATE TABLE `foo`\G -INSERT INTO `foo` VALUES (1, 2, 3) /*... omitted ...*/\G - -# Statistic Count %/Events -# ====================================== ===== ======== -# events_read 1 100.00 -# events_parsed 1 100.00 -# events_aggregated 1 100.00 -# execute_error 1 100.00 -# execute_executed 1 100.00 -# execute_no_database 1 100.00 -# pipeline_restarted_after_SlowLogParser 1 100.00 diff --git a/t/pt-query-digest/samples/slow018_execute_report_2.txt b/t/pt-query-digest/samples/slow018_execute_report_2.txt deleted file mode 100644 index dc7626a3..00000000 --- a/t/pt-query-digest/samples/slow018_execute_report_2.txt +++ /dev/null @@ -1,30 +0,0 @@ -# Exec orig ti 100 2s 2s 2s 2s 2s 0 2s -# Lock time 0 0 0 0 0 0 0 0 -# Rows sent 100 1 1 1 1 1 0 1 -# Rows examine 0 0 0 0 0 0 0 0 -# Query size 100 44 44 44 44 44 0 44 -# Exec diff ti 100 0 0 0 0 0 0 0 -# String: -# Hosts localhost -# Users root -# Query_time distribution -# 1us -# 10us -# 100us -# 1ms -# 10ms -# 100ms -# 1s -# 10s+ -# Tables -# SHOW TABLE STATUS LIKE 'foo'\G -# SHOW CREATE TABLE `foo`\G -INSERT INTO `foo` VALUES (1, 2, 3) /*... omitted ...*/\G - -# Statistic Count %/Events -# ====================================== ===== ======== -# events_read 1 100.00 -# events_parsed 1 100.00 -# events_aggregated 1 100.00 -# execute_executed 1 100.00 -# pipeline_restarted_after_SlowLogParser 1 100.00 diff --git a/t/pt-query-digest/samples/slow018_report.txt b/t/pt-query-digest/samples/slow018_report.txt index 61fc6497..a24e59af 100644 --- a/t/pt-query-digest/samples/slow018_report.txt +++ b/t/pt-query-digest/samples/slow018_report.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x6083030C4A5D8996 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow019_report.txt b/t/pt-query-digest/samples/slow019_report.txt index ba1fbb6c..2ae61093 100644 --- a/t/pt-query-digest/samples/slow019_report.txt +++ b/t/pt-query-digest/samples/slow019_report.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 435 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.02 -# Query_time sparkline: |^ ^ | +# Scores: V/M = 0.02 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 66 2 @@ -28,8 +27,7 @@ administrator command: Quit\G # Query 2: 0 QPS, 0x concurrency, ID 0xCC47B42511EA22DD at byte 221 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 33 1 diff --git a/t/pt-query-digest/samples/slow019_report_noza.txt b/t/pt-query-digest/samples/slow019_report_noza.txt index 634bc105..fb1683d8 100644 --- a/t/pt-query-digest/samples/slow019_report_noza.txt +++ b/t/pt-query-digest/samples/slow019_report_noza.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 435 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.02 -# Query_time sparkline: |^ ^ | +# Scores: V/M = 0.02 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 66 2 @@ -28,8 +27,7 @@ administrator command: Quit\G # Query 2: 0 QPS, 0x concurrency, ID 0xCC47B42511EA22DD at byte 221 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 33 1 diff --git a/t/pt-query-digest/samples/slow020_table_access.txt b/t/pt-query-digest/samples/slow020_table_access.txt deleted file mode 100644 index 4b11cac7..00000000 --- a/t/pt-query-digest/samples/slow020_table_access.txt +++ /dev/null @@ -1,3 +0,0 @@ -read `db2`.`foo` -write `db`.`tbl` -read `db1`.`foo` diff --git a/t/pt-query-digest/samples/slow023.txt b/t/pt-query-digest/samples/slow023.txt index 34b1e13a..ae3db272 100644 --- a/t/pt-query-digest/samples/slow023.txt +++ b/t/pt-query-digest/samples/slow023.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E38374648788E52 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow024.txt b/t/pt-query-digest/samples/slow024.txt index 1f97fb73..4bc6fe2c 100644 --- a/t/pt-query-digest/samples/slow024.txt +++ b/t/pt-query-digest/samples/slow024.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x93E5C17055D970BE at byte 514419 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -31,8 +30,7 @@ INSERT INTO `film_actor` VALUES (1,1,'2006-02-15 10:05:03') /*... omitted ...*/O # Query 2: 0 QPS, 0x concurrency, ID 0xA1C3EE4F5996E672 at byte 342942 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -61,8 +59,7 @@ INSERT IGNORE INTO `film_actor` VALUES (1,1,'2006-02-15 10:05:03') /*... omitted # Query 3: 0 QPS, 0x concurrency, ID 0xA2C576176F348267 at byte 171471 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow028.txt b/t/pt-query-digest/samples/slow028.txt index bc8735e9..d0f27025 100644 --- a/t/pt-query-digest/samples/slow028.txt +++ b/t/pt-query-digest/samples/slow028.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x182FF6A853858893 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow030_table_access.txt b/t/pt-query-digest/samples/slow030_table_access.txt deleted file mode 100644 index 3e8364d9..00000000 --- a/t/pt-query-digest/samples/slow030_table_access.txt +++ /dev/null @@ -1,2 +0,0 @@ -read `foo` -read `bar` diff --git a/t/pt-query-digest/samples/slow032.txt b/t/pt-query-digest/samples/slow032.txt index e9eb9152..18c3f306 100644 --- a/t/pt-query-digest/samples/slow032.txt +++ b/t/pt-query-digest/samples/slow032.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7546F89214254F2F at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 100 1 diff --git a/t/pt-query-digest/samples/slow033-precise-since-until.txt b/t/pt-query-digest/samples/slow033-precise-since-until.txt index 379a99d1..f036f4cf 100644 --- a/t/pt-query-digest/samples/slow033-precise-since-until.txt +++ b/t/pt-query-digest/samples/slow033-precise-since-until.txt @@ -1,8 +1,7 @@ # Query 1: 2 QPS, 0.00x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2009-07-27 11:19:30 to 11:19:31 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -33,8 +32,7 @@ SELECT * FROM bar\G # Query 2: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow033-rtm-event-1h.txt b/t/pt-query-digest/samples/slow033-rtm-event-1h.txt index 6fd36a99..473568ef 100644 --- a/t/pt-query-digest/samples/slow033-rtm-event-1h.txt +++ b/t/pt-query-digest/samples/slow033-rtm-event-1h.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-25 11:19:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -32,6 +31,6 @@ SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo diff --git a/t/pt-query-digest/samples/slow033-rtm-event-25h.txt b/t/pt-query-digest/samples/slow033-rtm-event-25h.txt index 5b74d609..7b70d373 100644 --- a/t/pt-query-digest/samples/slow033-rtm-event-25h.txt +++ b/t/pt-query-digest/samples/slow033-rtm-event-25h.txt @@ -1,8 +1,7 @@ # Query 1: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2009-07-25 11:19:27 to 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -32,6 +31,6 @@ SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 2 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 2 0.0000 0.00 SELECT foo diff --git a/t/pt-query-digest/samples/slow033-rtm-interval-1d.txt b/t/pt-query-digest/samples/slow033-rtm-interval-1d.txt index 5f652dba..076011a9 100644 --- a/t/pt-query-digest/samples/slow033-rtm-interval-1d.txt +++ b/t/pt-query-digest/samples/slow033-rtm-interval-1d.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-25 11:19:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -32,14 +31,13 @@ SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -69,14 +67,13 @@ SELECT * FROM foo\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 2 QPS, 0.00x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2009-07-27 11:19:30 to 11:19:31 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -107,8 +104,7 @@ SELECT * FROM bar\G # Query 2: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 683 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-27 11:30:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -138,15 +134,14 @@ SELECT * FROM bar\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x07AEF8EFAB3FA3CE 0.0000 66.7% 2 0.0000 1.00 0.00 SELECT bar -# 2 0xAC1BF726F2AB10C5 0.0000 33.3% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0x07AEF8EFAB3FA3CE 0.0000 66.7% 2 0.0000 0.00 SELECT bar +# 2 0xAC1BF726F2AB10C5 0.0000 33.3% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -176,6 +171,6 @@ SELECT * FROM foo\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo diff --git a/t/pt-query-digest/samples/slow033-rtm-interval-30m.txt b/t/pt-query-digest/samples/slow033-rtm-interval-30m.txt index 3243cf0e..5bab12fe 100644 --- a/t/pt-query-digest/samples/slow033-rtm-interval-30m.txt +++ b/t/pt-query-digest/samples/slow033-rtm-interval-30m.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-25 11:19:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -32,14 +31,13 @@ SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -69,14 +67,13 @@ SELECT * FROM foo\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 2 QPS, 0.00x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2009-07-27 11:19:30 to 11:19:31 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -106,14 +103,13 @@ SELECT * FROM foo\G SELECT * FROM bar\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x07AEF8EFAB3FA3CE 0.0000 100.0% 2 0.0000 1.00 0.00 SELECT bar +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0x07AEF8EFAB3FA3CE 0.0000 100.0% 2 0.0000 0.00 SELECT bar # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 683 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-27 11:30:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -143,14 +139,13 @@ SELECT * FROM bar\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -180,6 +175,6 @@ SELECT * FROM foo\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo diff --git a/t/pt-query-digest/samples/slow033-rtm-interval-30s-3iter.txt b/t/pt-query-digest/samples/slow033-rtm-interval-30s-3iter.txt index 7d406587..c4d782bb 100644 --- a/t/pt-query-digest/samples/slow033-rtm-interval-30s-3iter.txt +++ b/t/pt-query-digest/samples/slow033-rtm-interval-30s-3iter.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-25 11:19:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -32,14 +31,13 @@ SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -69,14 +67,13 @@ SELECT * FROM foo\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 344 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-27 11:19:30 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -106,6 +103,6 @@ SELECT * FROM foo\G SELECT * FROM bar\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x07AEF8EFAB3FA3CE 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT bar +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0x07AEF8EFAB3FA3CE 0.0000 100.0% 1 0.0000 0.00 SELECT bar diff --git a/t/pt-query-digest/samples/slow033-rtm-interval-30s.txt b/t/pt-query-digest/samples/slow033-rtm-interval-30s.txt index 9a101610..24839a2d 100644 --- a/t/pt-query-digest/samples/slow033-rtm-interval-30s.txt +++ b/t/pt-query-digest/samples/slow033-rtm-interval-30s.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-25 11:19:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -32,14 +31,13 @@ SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -69,14 +67,13 @@ SELECT * FROM foo\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 344 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-27 11:19:30 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -106,14 +103,13 @@ SELECT * FROM foo\G SELECT * FROM bar\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x07AEF8EFAB3FA3CE 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT bar +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0x07AEF8EFAB3FA3CE 0.0000 100.0% 1 0.0000 0.00 SELECT bar # Query 1: 0 QPS, 0x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-27 11:19:31 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -143,14 +139,13 @@ SELECT * FROM bar\G SELECT * FROM bar\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x07AEF8EFAB3FA3CE 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT bar +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0x07AEF8EFAB3FA3CE 0.0000 100.0% 1 0.0000 0.00 SELECT bar # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 683 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-27 11:30:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -180,14 +175,13 @@ SELECT * FROM bar\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -217,6 +211,6 @@ SELECT * FROM foo\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo diff --git a/t/pt-query-digest/samples/slow033-since-Nd.txt b/t/pt-query-digest/samples/slow033-since-Nd.txt index d196607b..024db278 100644 --- a/t/pt-query-digest/samples/slow033-since-Nd.txt +++ b/t/pt-query-digest/samples/slow033-since-Nd.txt @@ -1,8 +1,7 @@ # Query 1: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2009-07-25 11:19:27 to 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -33,8 +32,7 @@ SELECT * FROM foo\G # Query 2: 2 QPS, 0.00x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2009-07-27 11:19:30 to 11:19:31 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow033-since-yymmdd.txt b/t/pt-query-digest/samples/slow033-since-yymmdd.txt index bb2c52e7..21f1cc67 100644 --- a/t/pt-query-digest/samples/slow033-since-yymmdd.txt +++ b/t/pt-query-digest/samples/slow033-since-yymmdd.txt @@ -1,8 +1,7 @@ # Query 1: 2 QPS, 0.00x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2009-07-27 11:19:30 to 11:19:31 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -33,8 +32,7 @@ SELECT * FROM bar\G # Query 2: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2009-07-27 11:30:00 to 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow033-since-yyyy-mm-dd.txt b/t/pt-query-digest/samples/slow033-since-yyyy-mm-dd.txt index 169a3421..4e0d2d2a 100644 --- a/t/pt-query-digest/samples/slow033-since-yyyy-mm-dd.txt +++ b/t/pt-query-digest/samples/slow033-since-yyyy-mm-dd.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow033-until-date.txt b/t/pt-query-digest/samples/slow033-until-date.txt index cd331351..26373f79 100644 --- a/t/pt-query-digest/samples/slow033-until-date.txt +++ b/t/pt-query-digest/samples/slow033-until-date.txt @@ -1,8 +1,7 @@ # Query 1: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2009-07-25 11:19:27 to 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow034-order-by-Locktime-sum-with-Locktime-distro.txt b/t/pt-query-digest/samples/slow034-order-by-Locktime-sum-with-Locktime-distro.txt index f108bdec..d861d638 100644 --- a/t/pt-query-digest/samples/slow034-order-by-Locktime-sum-with-Locktime-distro.txt +++ b/t/pt-query-digest/samples/slow034-order-by-Locktime-sum-with-Locktime-distro.txt @@ -1,7 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xABE9508269335CD1 at byte 1866 _____ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 -# Lock_time sparkline: | ^| +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-08-05 13:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -38,8 +37,7 @@ DELETE FROM forest WHERE animal = 'dead'\G select * from forest WHERE animal = 'dead'\G # Query 2: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 934 -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.03 -# Lock_time sparkline: | _^ | +# Scores: V/M = 0.03 # Time range: 2009-08-05 11:00:27 to 13:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -71,8 +69,7 @@ select * from forest WHERE animal = 'dead'\G SELECT * FROM foo\G # Query 3: 0 QPS, 0x concurrency, ID 0xB79802214165F670 at byte 1267 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.73 -# Lock_time sparkline: | ^^ | +# Scores: V/M = 0.73 # Time range: all events occurred at 2009-08-05 12:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -102,8 +99,7 @@ SELECT * FROM foo\G INSERT INTO tbl VALUES ('a', 'b')\G # Query 4: 0 QPS, 0x concurrency, ID 0x1F9B2F47A843D460 at byte 333 ______ -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Lock_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-08-05 11:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -132,8 +128,7 @@ INSERT INTO tbl VALUES ('a', 'b')\G SELECT id FROM tbl WHERE id = 1\G # Query 5: 0 QPS, 0x concurrency, ID 0x3F1024B96D9D469E at byte 625 ______ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 -# Lock_time sparkline: |^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-08-05 11:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -166,10 +161,10 @@ SELECT id FROM tbl WHERE id = 1\G SELECT COUNT(*) FROM blah WHERE col > 2\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== =============== ===== ========= ==== ===== ===== -# 1 0xABE9508269335CD1 1349.0001 98.9% 1 1349.0001 0.00 0.00 DELETE forest -# 2 0xAC1BF726F2AB10C5 2.9042 0.2% 4 0.7261 1.00 0.03 SELECT foo -# 3 0xB79802214165F670 0.7261 0.1% 2 0.3631 1.00 0.73 INSERT tbl -# 4 0x1F9B2F47A843D460 1.7261 0.1% 1 1.7261 0.50 0.00 SELECT tbl -# 5 0x3F1024B96D9D469E 9.0001 0.7% 1 9.0001 0.00 0.00 SELECT blah +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== =============== ===== ========= ===== ========== +# 1 0xABE9508269335CD1 1349.0001 98.9% 1 1349.0001 0.00 DELETE forest +# 2 0xAC1BF726F2AB10C5 2.9042 0.2% 4 0.7261 0.03 SELECT foo +# 3 0xB79802214165F670 0.7261 0.1% 2 0.3631 0.73 INSERT tbl +# 4 0x1F9B2F47A843D460 1.7261 0.1% 1 1.7261 0.00 SELECT tbl +# 5 0x3F1024B96D9D469E 9.0001 0.7% 1 9.0001 0.00 SELECT blah diff --git a/t/pt-query-digest/samples/slow034-order-by-Locktime-sum.txt b/t/pt-query-digest/samples/slow034-order-by-Locktime-sum.txt index 39c0d365..56feb2ef 100644 --- a/t/pt-query-digest/samples/slow034-order-by-Locktime-sum.txt +++ b/t/pt-query-digest/samples/slow034-order-by-Locktime-sum.txt @@ -1,7 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xABE9508269335CD1 at byte 1866 _____ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^| +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-08-05 13:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -38,8 +37,7 @@ DELETE FROM forest WHERE animal = 'dead'\G select * from forest WHERE animal = 'dead'\G # Query 2: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 934 -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.03 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.03 # Time range: 2009-08-05 11:00:27 to 13:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -71,8 +69,7 @@ select * from forest WHERE animal = 'dead'\G SELECT * FROM foo\G # Query 3: 0 QPS, 0x concurrency, ID 0xB79802214165F670 at byte 1267 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.73 -# Query_time sparkline: | ^ ^ | +# Scores: V/M = 0.73 # Time range: all events occurred at 2009-08-05 12:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -102,8 +99,7 @@ SELECT * FROM foo\G INSERT INTO tbl VALUES ('a', 'b')\G # Query 4: 0 QPS, 0x concurrency, ID 0x1F9B2F47A843D460 at byte 333 ______ -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-08-05 11:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -132,8 +128,7 @@ INSERT INTO tbl VALUES ('a', 'b')\G SELECT id FROM tbl WHERE id = 1\G # Query 5: 0 QPS, 0x concurrency, ID 0x3F1024B96D9D469E at byte 625 ______ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-08-05 11:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -166,10 +161,10 @@ SELECT id FROM tbl WHERE id = 1\G SELECT COUNT(*) FROM blah WHERE col > 2\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== =============== ===== ========= ==== ===== ===== -# 1 0xABE9508269335CD1 1349.0001 98.9% 1 1349.0001 0.00 0.00 DELETE forest -# 2 0xAC1BF726F2AB10C5 2.9042 0.2% 4 0.7261 1.00 0.03 SELECT foo -# 3 0xB79802214165F670 0.7261 0.1% 2 0.3631 1.00 0.73 INSERT tbl -# 4 0x1F9B2F47A843D460 1.7261 0.1% 1 1.7261 0.50 0.00 SELECT tbl -# 5 0x3F1024B96D9D469E 9.0001 0.7% 1 9.0001 0.00 0.00 SELECT blah +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== =============== ===== ========= ===== ========== +# 1 0xABE9508269335CD1 1349.0001 98.9% 1 1349.0001 0.00 DELETE forest +# 2 0xAC1BF726F2AB10C5 2.9042 0.2% 4 0.7261 0.03 SELECT foo +# 3 0xB79802214165F670 0.7261 0.1% 2 0.3631 0.73 INSERT tbl +# 4 0x1F9B2F47A843D460 1.7261 0.1% 1 1.7261 0.00 SELECT tbl +# 5 0x3F1024B96D9D469E 9.0001 0.7% 1 9.0001 0.00 SELECT blah diff --git a/t/pt-query-digest/samples/slow035.txt b/t/pt-query-digest/samples/slow035.txt index e1262170..c337879d 100644 --- a/t/pt-query-digest/samples/slow035.txt +++ b/t/pt-query-digest/samples/slow035.txt @@ -19,8 +19,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x727841EC88423713 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -57,8 +56,7 @@ INSERT INTO db.v (m, b) VALUES ('', 'Exact')\G # Query 2: 0 QPS, 0x concurrency, ID 0x9E892D4B16D7BFC2 at byte 525 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -68,7 +66,6 @@ INSERT INTO db.v (m, b) VALUES ('', 'Exact')\G # Rows sent 0 0 0 0 0 0 0 0 # Rows examine 0 0 0 0 0 0 0 0 # Query size 52 48 48 48 48 48 0 48 -# InnoDB: # String: # Hosts # Users [SQL_SLAVE] @@ -88,7 +85,7 @@ INSERT INTO db.v (m, b) VALUES ('', 'Exact')\G SELECT * FROM blah WHERE something = 'important'\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x727841EC88423713 0.0000 0.0% 1 0.0000 1.00 0.00 INSERT db.v -# 2 0x9E892D4B16D7BFC2 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT blah +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =========== +# 1 0x727841EC88423713 0.0000 0.0% 1 0.0000 0.00 INSERT db.v +# 2 0x9E892D4B16D7BFC2 0.0000 0.0% 1 0.0000 0.00 SELECT blah diff --git a/t/pt-query-digest/samples/slow037_report.txt b/t/pt-query-digest/samples/slow037_report.txt index ef75bee9..ee1fe919 100644 --- a/t/pt-query-digest/samples/slow037_report.txt +++ b/t/pt-query-digest/samples/slow037_report.txt @@ -5,8 +5,7 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xABCC9DEC8C43EEDC at byte 0 _________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -28,6 +27,6 @@ LOCK foo bar # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ======== ============= ===== ====== ==== ===== ============ -# 1 0x 0.0010 100.0% 1 0.0010 1.00 0.00 LOCK foo bar +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ======== ============= ===== ====== ===== ============ +# 1 0x 0.0010 100.0% 1 0.0010 0.00 LOCK foo bar diff --git a/t/pt-query-digest/samples/slow042-show-all-host.txt b/t/pt-query-digest/samples/slow042-show-all-host.txt index 3fcf79c3..fe9f56dc 100644 --- a/t/pt-query-digest/samples/slow042-show-all-host.txt +++ b/t/pt-query-digest/samples/slow042-show-all-host.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7CE9953EA3A36141 at byte 417 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-12-05 19:55:11 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow042.txt b/t/pt-query-digest/samples/slow042.txt index 89616f51..bee41e47 100644 --- a/t/pt-query-digest/samples/slow042.txt +++ b/t/pt-query-digest/samples/slow042.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7CE9953EA3A36141 at byte 417 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-12-05 19:55:11 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow048.txt b/t/pt-query-digest/samples/slow048.txt index 3213fa47..efa2cae1 100644 --- a/t/pt-query-digest/samples/slow048.txt +++ b/t/pt-query-digest/samples/slow048.txt @@ -1,8 +1,7 @@ # Query 1: 1.33 QPS, 0.00x concurrency, ID 0x208AC308FD716D83 at byte 454 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2010-06-24 11:48:27 to 11:48:30 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -28,6 +27,6 @@ SELECT * FROM `products` ORDER BY name, shape asc\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x208AC308FD716D83 0.0001 100.0% 4 0.0000 1.00 0.00 SELECT products +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =============== +# 1 0x208AC308FD716D83 0.0001 100.0% 4 0.0000 0.00 SELECT products diff --git a/t/pt-query-digest/samples/slow049.txt b/t/pt-query-digest/samples/slow049.txt index e469e0ac..290045e5 100644 --- a/t/pt-query-digest/samples/slow049.txt +++ b/t/pt-query-digest/samples/slow049.txt @@ -10,17 +10,16 @@ # Query size 308 30 34 30.80 31.70 1.64 28.75 # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== =============== ===== ======== ==== ===== ====== -# 1 0x95AADD230F4EB56A 1000.0000 53.8% 2 500.0000 0.00 0.00 SELECT two -# 2 0x5081E1858C60FD05 500.0000 26.9% 1 500.0000 0.00 0.00 SELECT three -# 4 0x70E215C4BFED0080 50.0000 2.7% 5 10.0000 0.00 0.00 SELECT one -# MISC 0xMISC 310.0000 16.7% 2 155.0000 NS 0.0 <2 ITEMS> +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== =============== ===== ======== ===== =========== +# 1 0x95AADD230F4EB56A 1000.0000 53.8% 2 500.0000 0.00 SELECT two +# 2 0x5081E1858C60FD05 500.0000 26.9% 1 500.0000 0.00 SELECT three +# 4 0x70E215C4BFED0080 50.0000 2.7% 5 10.0000 0.00 SELECT one +# MISC 0xMISC 310.0000 16.7% 2 155.0000 0.0 <2 ITEMS> # Query 1: 2 QPS, 1.00kx concurrency, ID 0x95AADD230F4EB56A at byte 886 __ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^| +# Scores: V/M = 0.00 # Time range: 2010-06-24 11:48:34 to 11:48:35 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -47,8 +46,7 @@ SELECT two FROM two WHERE id=?\G # Query 2: 0 QPS, 0x concurrency, ID 0x5081E1858C60FD05 at byte 1013 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^| +# Scores: V/M = 0.00 # Time range: all events occurred at 2010-06-24 11:48:35 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -75,8 +73,7 @@ SELECT three FROM three WHERE id=?\G # Query 4: 1.25 QPS, 12.50x concurrency, ID 0x70E215C4BFED0080 at byte 633 # This item is included in the report because it matches --outliers. -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^| +# Scores: V/M = 0.00 # Time range: 2010-06-24 11:48:21 to 11:48:25 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow050.txt b/t/pt-query-digest/samples/slow050.txt index 052c252b..594a7458 100644 --- a/t/pt-query-digest/samples/slow050.txt +++ b/t/pt-query-digest/samples/slow050.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x305E73C51188758F at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^| +# Scores: V/M = 0.00 # Time range: all events occurred at 2010-06-24 11:48:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -29,6 +28,6 @@ UPDATE mybbl_MBMessage SET groupId = (select groupId from Group_ where name = 'Guest')\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============== ===== ======= ==== ===== ======== -# 1 0x305E73C51188758F 10.0000 100.0% 1 10.0000 0.00 0.00 UPDATE SELECT mybbl_MBMessage Group_ +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============== ===== ======= ===== ============= +# 1 0x305E73C51188758F 10.0000 100.0% 1 10.0000 0.00 UPDATE SELECT mybbl_MBMessage Group_ diff --git a/t/pt-query-digest/samples/slow051.txt b/t/pt-query-digest/samples/slow051.txt index be391ce9..d96d69c2 100644 --- a/t/pt-query-digest/samples/slow051.txt +++ b/t/pt-query-digest/samples/slow051.txt @@ -1,8 +1,7 @@ # Query 1: 0.20 QPS, 0.00x concurrency, ID 0xD989521B246E945B at byte 146 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2007-12-18 11:48:27 to 11:48:37 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -27,6 +26,6 @@ LOAD DATA INFILE '/tmp/bar.txt' INTO db.tbl\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ====== -# 1 0xD989521B246E945B 0.0000 100.0% 2 0.0000 1.00 0.00 db.tbl +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ====== +# 1 0xD989521B246E945B 0.0000 100.0% 2 0.0000 0.00 db.tbl diff --git a/t/pt-query-digest/samples/slow052-apdex-t-0.1.txt b/t/pt-query-digest/samples/slow052-apdex-t-0.1.txt deleted file mode 100644 index da8b2774..00000000 --- a/t/pt-query-digest/samples/slow052-apdex-t-0.1.txt +++ /dev/null @@ -1,66 +0,0 @@ - -# Query 1: 0 QPS, 0x concurrency, ID 0x32B0659E6D13E5A2 at byte 16849 ____ -# This item is included in the report because it matches --limit. -# Scores: Apdex = 0.00 [0.1], V/M = 0.48 -# Query_time sparkline: | ^ | -# Attribute pct total min max avg 95% stddev median -# ============ === ======= ======= ======= ======= ======= ======= ======= -# Count 50 100 -# Exec time 74 308s 1s 5s 3s 5s 1s 3s -# Lock time 0 0 0 0 0 0 0 0 -# Rows sent 50 100 1 1 1 1 0 1 -# Rows examine 0 0 0 0 0 0 0 0 -# Query size 51 4.59k 47 47 47 47 0 47 -# String: -# Hosts localhost -# Users root -# Query_time distribution -# 1us -# 10us -# 100us -# 1ms -# 10ms -# 100ms -# 1s ################################################################ -# 10s+ -# Tables -# SHOW TABLE STATUS LIKE 'unsteady_table'\G -# SHOW CREATE TABLE `unsteady_table`\G -# EXPLAIN /*!50100 PARTITIONS*/ -select very_variable_column from unsteady_table\G - -# Query 2: 0 QPS, 0x concurrency, ID 0x2F621C2B0611518C at byte 8582 _____ -# This item is included in the report because it matches --limit. -# Scores: Apdex = 0.00 [0.1], V/M = 0.00 -# Query_time sparkline: | ^ | -# Attribute pct total min max avg 95% stddev median -# ============ === ======= ======= ======= ======= ======= ======= ======= -# Count 50 100 -# Exec time 25 105s 1s 1s 1s 1s 30ms 1s -# Lock time 0 0 0 0 0 0 0 0 -# Rows sent 50 100 1 1 1 1 0 1 -# Rows examine 0 0 0 0 0 0 0 0 -# Query size 48 4.39k 45 45 45 45 0 45 -# String: -# Hosts localhost -# Users root -# Query_time distribution -# 1us -# 10us -# 100us -# 1ms -# 10ms -# 100ms -# 1s ################################################################ -# 10s+ -# Tables -# SHOW TABLE STATUS LIKE 'steady_table'\G -# SHOW CREATE TABLE `steady_table`\G -# EXPLAIN /*!50100 PARTITIONS*/ -select less_variable_column from steady_table\G - -# Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============== ===== ====== ==== ===== ========= -# 1 0x32B0659E6D13E5A2 308.4675 74.6% 100 3.0847 0.00 0.48 SELECT unsteady_table -# 2 0x2F621C2B0611518C 104.9344 25.4% 100 1.0493 0.00 0.00 SELECT steady_table diff --git a/t/pt-query-digest/samples/slow052.txt b/t/pt-query-digest/samples/slow052.txt index 5827481f..92214103 100644 --- a/t/pt-query-digest/samples/slow052.txt +++ b/t/pt-query-digest/samples/slow052.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x32B0659E6D13E5A2 at byte 16849 ____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.38 [1.0], V/M = 0.48 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.48 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 50 100 @@ -31,8 +30,7 @@ select very_variable_column from unsteady_table\G # Query 2: 0 QPS, 0x concurrency, ID 0x2F621C2B0611518C at byte 8582 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.72 [1.0], V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 50 100 @@ -60,7 +58,7 @@ select very_variable_column from unsteady_table\G select less_variable_column from steady_table\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============== ===== ====== ==== ===== ========= -# 1 0x32B0659E6D13E5A2 308.4675 74.6% 100 3.0847 0.38 0.48 SELECT unsteady_table -# 2 0x2F621C2B0611518C 104.9344 25.4% 100 1.0493 0.72 0.00 SELECT steady_table +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============== ===== ====== ===== ============== +# 1 0x32B0659E6D13E5A2 308.4675 74.6% 100 3.0847 0.48 SELECT unsteady_table +# 2 0x2F621C2B0611518C 104.9344 25.4% 100 1.0493 0.00 SELECT steady_table diff --git a/t/pt-query-digest/samples/slow053.txt b/t/pt-query-digest/samples/slow053.txt index c97655da..51fec151 100644 --- a/t/pt-query-digest/samples/slow053.txt +++ b/t/pt-query-digest/samples/slow053.txt @@ -1,8 +1,7 @@ # Query 1: 2 QPS, 1.90x concurrency, ID 0xA4EAD36B5CEB1C13 at byte 1044 __ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.01 -# Query_time sparkline: | ^^ | +# Scores: V/M = 0.01 # Time range: 2011-02-08 12:00:09 to 12:00:10 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -32,8 +31,7 @@ SELECT * FROM blah WHERE id IS NOT NULL\G # Query 2: 1.50 QPS, 0.03x concurrency, ID 0xAC0EC652760FEEB3 at byte 913 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.03 -# Query_time sparkline: | ^ _ | +# Scores: V/M = 0.03 # Time range: 2011-02-08 12:00:06 to 12:00:08 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -63,8 +61,7 @@ SELECT * FROM bar WHERE id=12\G # Query 3: 1.25 QPS, 0.00x concurrency, ID 0xBB11C6B7F3BAAB30 at byte 521 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2011-02-08 12:00:01 to 12:00:05 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow054.txt b/t/pt-query-digest/samples/slow054.txt index 777f8908..ed380133 100644 --- a/t/pt-query-digest/samples/slow054.txt +++ b/t/pt-query-digest/samples/slow054.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xBB11C6B7F3BAAB30 at byte 1058 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2011-02-08 12:00:01 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow055.txt b/t/pt-query-digest/samples/slow055.txt index c920a1ee..c834e436 100644 --- a/t/pt-query-digest/samples/slow055.txt +++ b/t/pt-query-digest/samples/slow055.txt @@ -5,8 +5,7 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xE9800998ECF8427E at byte 420 _______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.01 -# Query_time sparkline: |^ ^ ^ | +# Scores: V/M = 0.01 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 100 3 diff --git a/t/pt-query-digest/samples/slow056.txt b/t/pt-query-digest/samples/slow056.txt index 8e6571d3..a265aa1f 100644 --- a/t/pt-query-digest/samples/slow056.txt +++ b/t/pt-query-digest/samples/slow056.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x54E0BB9E70EAA792 at byte 596 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2012-11-23 19:56:06 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -38,8 +37,7 @@ select b = b + 30 from t where user_id=1\G # Query 2: 0 QPS, 0x concurrency, ID 0xE9800998ECF8427E at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2012-11-23 19:56:06 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump001.txt b/t/pt-query-digest/samples/tcpdump001.txt index cc5de25d..ba6bb94a 100644 --- a/t/pt-query-digest/samples/tcpdump001.txt +++ b/t/pt-query-digest/samples/tcpdump001.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xA3C9C49321D65C30 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-04-12 09:50:16.805123 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump002_report.txt b/t/pt-query-digest/samples/tcpdump002_report.txt index 111be585..91958ebb 100644 --- a/t/pt-query-digest/samples/tcpdump002_report.txt +++ b/t/pt-query-digest/samples/tcpdump002_report.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x5D51E5F01B88B79E at byte 1470 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-04-12 11:00:13.118191 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -29,8 +28,7 @@ administrator command: Connect\G # Query 2: 0 QPS, 0x concurrency, ID 0xE3A3649C5FAC418D at byte 2449 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-04-12 11:00:13.118643 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -58,8 +56,7 @@ select @@version_comment limit 1\G # Query 3: 0 QPS, 0x concurrency, ID 0xAE5A83B27932AB98 at byte 3298 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-04-12 11:00:13.119079 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -87,8 +84,7 @@ select "paris in the the spring" as trick\G # Query 4: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 4186 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-04-12 11:00:13.119487 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump003.txt b/t/pt-query-digest/samples/tcpdump003.txt index d404e3b6..2ae2d210 100644 --- a/t/pt-query-digest/samples/tcpdump003.txt +++ b/t/pt-query-digest/samples/tcpdump003.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x5D51E5F01B88B79E at byte 1455 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-04-12 12:41:46.357853 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump012.txt b/t/pt-query-digest/samples/tcpdump012.txt index cc5de25d..ba6bb94a 100644 --- a/t/pt-query-digest/samples/tcpdump012.txt +++ b/t/pt-query-digest/samples/tcpdump012.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xA3C9C49321D65C30 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-04-12 09:50:16.805123 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump017_report.txt b/t/pt-query-digest/samples/tcpdump017_report.txt index 904558df..b387b333 100644 --- a/t/pt-query-digest/samples/tcpdump017_report.txt +++ b/t/pt-query-digest/samples/tcpdump017_report.txt @@ -10,8 +10,7 @@ # Query 1: 2.13 QPS, 0.36x concurrency, ID 0xE3A3649C5FAC418D at byte 2548 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.19 -# Query_time sparkline: | ^ ^ | +# Scores: V/M = 0.19 # Time range: 2009-04-12 11:00:13.118643 to 11:00:14.999999 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -36,6 +35,6 @@ select @@version_comment limit 1\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ====== -# 1 0xE3A3649C5FAC418D 0.6696 100.0% 4 0.1674 1.00 0.19 SELECT +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ====== +# 1 0xE3A3649C5FAC418D 0.6696 100.0% 4 0.1674 0.19 SELECT diff --git a/t/pt-query-digest/samples/tcpdump021.txt b/t/pt-query-digest/samples/tcpdump021.txt index 7f5dbb6d..82d201e1 100644 --- a/t/pt-query-digest/samples/tcpdump021.txt +++ b/t/pt-query-digest/samples/tcpdump021.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAA8E9FA785927259 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-12-08 09:23:49.637394 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -34,8 +33,7 @@ SELECT i FROM d.t WHERE i=?\G # Query 2: 0 QPS, 0x concurrency, ID 0x3F79759E7FA2F117 at byte 1106 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-12-08 09:23:49.637892 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -69,8 +67,7 @@ SELECT i FROM d.t WHERE i="3"\G # Query 3: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 1850 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-12-08 09:23:49.638381 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump022.txt b/t/pt-query-digest/samples/tcpdump022.txt index 04fe6b9e..84857f9d 100644 --- a/t/pt-query-digest/samples/tcpdump022.txt +++ b/t/pt-query-digest/samples/tcpdump022.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xC30A1A850F4E510F at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-12-08 13:41:12.811188 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -34,8 +33,7 @@ SELECT i,j FROM d.t2 WHERE i=? AND j=?\G # Query 2: 0 QPS, 0x concurrency, ID 0x26EEAE2EADD904A1 at byte 1330 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-12-08 13:41:12.811591 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump023.txt b/t/pt-query-digest/samples/tcpdump023.txt index 34b31182..66410f70 100644 --- a/t/pt-query-digest/samples/tcpdump023.txt +++ b/t/pt-query-digest/samples/tcpdump023.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E77A2947B4BC375 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-12-08 14:14:55.951863 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -34,8 +33,7 @@ SELECT * FROM d.t3 WHERE v=? OR c=? OR f=?\G # Query 2: 0 QPS, 0x concurrency, ID 0xA0B1C345E8654C18 at byte 1540 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-12-08 14:14:55.952344 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump024.txt b/t/pt-query-digest/samples/tcpdump024.txt index 68a53f11..521289c9 100644 --- a/t/pt-query-digest/samples/tcpdump024.txt +++ b/t/pt-query-digest/samples/tcpdump024.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E77A2947B4BC375 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-12-08 14:33:13.711351 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -34,8 +33,7 @@ SELECT * FROM d.t3 WHERE v=? OR c=? OR f=?\G # Query 2: 0 QPS, 0x concurrency, ID 0xA0B1C345E8654C18 at byte 1540 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-12-08 14:33:13.711642 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump025.txt b/t/pt-query-digest/samples/tcpdump025.txt index 462c7908..ba38d084 100644 --- a/t/pt-query-digest/samples/tcpdump025.txt +++ b/t/pt-query-digest/samples/tcpdump025.txt @@ -1,8 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x72B6E5BC2632931C at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-12-08 14:44:52.709181 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -34,8 +33,7 @@ SELECT * FROM d.t WHERE 1 LIMIT 1;\G # Query 2: 0 QPS, 0x concurrency, ID 0xDDF5E71E9A66B752 at byte 1014 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-12-08 14:44:52.709597 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump033.txt b/t/pt-query-digest/samples/tcpdump033.txt index 08a06069..795dc84e 100644 --- a/t/pt-query-digest/samples/tcpdump033.txt +++ b/t/pt-query-digest/samples/tcpdump033.txt @@ -12,8 +12,7 @@ # Query 1: 2.03k QPS, 0.28x concurrency, ID 0x6EE88728F6F29C72 at byte 800 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2009-12-18 08:44:07.235011 to 08:44:07.238467 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -47,8 +46,7 @@ select * from d.t where name="adam"\G # Query 2: 1.17k QPS, 0.19x concurrency, ID 0xECBCD0412B5E497A at byte 9215 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: 2009-12-18 08:44:07.234727 to 08:44:07.238999 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -80,8 +78,7 @@ select * from d.t where name="daniel"\G # Query 3: 1.70k QPS, 0x concurrency, ID 0x559914DA8A7B7F28 at byte 8202 _ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | | +# Scores: V/M = 0.00 # Time range: 2009-12-18 08:44:07.236509 to 08:44:07.238274 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -105,11 +102,11 @@ select * from d.t where name="daniel"\G DEALLOCATE PREPARE 4\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x6EE88728F6F29C72 0.0010 54.4% 7 0.0001 1.00 0.00 SELECT d.t -# 2 0xECBCD0412B5E497A 0.0008 45.6% 5 0.0002 1.00 0.00 SELECT d.t -# 3 0x559914DA8A7B7F28 0.0000 0.0% 3 0.0000 1.00 0.00 +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0x6EE88728F6F29C72 0.0010 54.4% 7 0.0001 0.00 SELECT d.t +# 2 0xECBCD0412B5E497A 0.0008 45.6% 5 0.0002 0.00 SELECT d.t +# 3 0x559914DA8A7B7F28 0.0000 0.0% 3 0.0000 0.00 # Prepared statements # Rank Query ID PREP PREP Response EXEC EXEC Response Item diff --git a/t/pt-query-digest/samples/tcpdump041.txt b/t/pt-query-digest/samples/tcpdump041.txt index 09edac73..f07654b3 100644 --- a/t/pt-query-digest/samples/tcpdump041.txt +++ b/t/pt-query-digest/samples/tcpdump041.txt @@ -10,8 +10,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAA8E9FA785927259 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-12-08 09:23:49.637394 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -42,9 +41,9 @@ PREPARE SELECT i FROM d.t WHERE i=?\G SELECT i FROM d.t WHERE i=?\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAA8E9FA785927259 0.0003 100.0% 1 0.0003 1.00 0.00 SELECT d.t +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAA8E9FA785927259 0.0003 100.0% 1 0.0003 0.00 SELECT d.t # Prepared statements # Rank Query ID PREP PREP Response EXEC EXEC Response Item diff --git a/t/pt-query-digest/slowlog_analyses.t b/t/pt-query-digest/slowlog_analyses.t index 978d3994..eb4eef0f 100644 --- a/t/pt-query-digest/slowlog_analyses.t +++ b/t/pt-query-digest/slowlog_analyses.t @@ -276,24 +276,6 @@ ok( 'Distill UNLOCK and LOCK TABLES' ); -# Test --table-access. -ok( - no_diff( - sub { pt_query_digest::main(@args, $sample.'slow020.txt', qw(--no-report --table-access)) }, - "t/pt-query-digest/samples/slow020_table_access.txt", - ), - 'Analysis for slow020 with --table-access' -); - -# This one tests that the list of tables is unique. -ok( - no_diff( - sub { pt_query_digest::main(@args, $sample.'slow030.txt', qw(--no-report --table-access)) }, - "t/pt-query-digest/samples/slow030_table_access.txt" - ), - 'Analysis for slow030 with --table-access' -); - ok( no_diff( sub { pt_query_digest::main(@args, $sample.'slow034.txt', qw(--order-by Lock_time:sum), @@ -391,7 +373,6 @@ ok( # ############################################################################# # Issue 1124: Make mk-query-digest profile include variance-to-mean ratio -# Issue 1054: Add Apdex scores to mk-query-digest report # ############################################################################# ok( no_diff( @@ -401,14 +382,6 @@ ok( 'Analysis for slow052 (Apdex and V/M)', ); -ok( - no_diff( - sub { pt_query_digest::main(@args, qw(--apdex-t 0.1), '--report-format', 'query_report,profile', $sample.'slow052.txt') }, - "t/pt-query-digest/samples/slow052-apdex-t-0.1.txt", - ), - 'Analysis for slow052 (Apdex T = 0.1)', -); - # ############################################################################# # Bug 821694: pt-query-digest doesn't recognize hex InnoDB txn IDs # ############################################################################# diff --git a/t/pt-query-digest/statistics.t b/t/pt-query-digest/statistics.t deleted file mode 100644 index 6609f592..00000000 --- a/t/pt-query-digest/statistics.t +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env perl - -BEGIN { - die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" - unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; - unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; -}; - -use strict; -use warnings FATAL => 'all'; -use English qw(-no_match_vars); -use Test::More tests => 1; - -use PerconaTest; - -require "$trunk/bin/pt-query-digest"; - -my @args = qw(--no-report --statistics); -my $sample = "$trunk/t/lib/samples/slowlogs/"; - -ok( - no_diff( - sub { pt_query_digest::main(@args, $sample.'slow002.txt') }, - "t/pt-query-digest/samples/stats-slow002.txt" - ), - '--statistics for slow002.txt', -); - -# ############################################################################# -# Done. -# ############################################################################# -exit;